title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
[CLN] [BLD] Fix many compiler warnings
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index ff6570e2106b2..7f4a2eeafeea2 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -29,7 +29,7 @@ dtypes = [('Float64', 'float64', 'float64_t'), ctypedef struct {{name}}VectorData: {{arg}} *data - size_t n, m + Py_ssize_t n, m {{endif}} @@ -147,7 +147,7 @@ cdef class StringVector: cdef resize(self): cdef: char **orig_data - size_t i, m + Py_ssize_t i, m m = self.data.m self.data.m = max(self.data.m * 4, _INIT_VEC_CAP) @@ -172,7 +172,7 @@ cdef class StringVector: def to_array(self): cdef: ndarray ao - size_t n + Py_ssize_t n object val ao = np.empty(self.data.n, dtype=np.object) @@ -198,7 +198,7 @@ cdef class ObjectVector: cdef: PyObject **data - size_t n, m + Py_ssize_t n, m ndarray ao bint external_view_exists @@ -281,7 +281,7 @@ cdef class {{name}}HashTable(HashTable): def sizeof(self, deep=False): """ return the size of my table in bytes """ return self.table.n_buckets * (sizeof({{dtype}}_t) + # keys - sizeof(size_t) + # vals + sizeof(Py_ssize_t) + # vals sizeof(uint32_t)) # flags cpdef get_item(self, {{dtype}}_t val): @@ -522,13 +522,13 @@ cdef class StringHashTable(HashTable): def sizeof(self, deep=False): """ return the size of my table in bytes """ return self.table.n_buckets * (sizeof(char *) + # keys - sizeof(size_t) + # vals + sizeof(Py_ssize_t) + # vals sizeof(uint32_t)) # flags cpdef get_item(self, object val): cdef: khiter_t k - char *v + const char *v v = util.get_c_string(val) k = kh_get_str(self.table, v) @@ -541,7 +541,7 @@ cdef class StringHashTable(HashTable): cdef: khiter_t k int ret = 0 - char *v + const char *v v = util.get_c_string(val) @@ -560,10 +560,10 @@ cdef class StringHashTable(HashTable): int64_t *resbuf = <int64_t*> labels.data khiter_t k kh_str_t *table = self.table - char *v - char **vecs + const char *v + const char **vecs - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) for i in range(n): val = values[i] v = util.get_c_string(val) @@ -589,10 +589,10 @@ cdef class StringHashTable(HashTable): object val ObjectVector uniques khiter_t k - char *v - char **vecs + const char *v + const char **vecs - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) uindexer = np.empty(n, dtype=np.int64) for i in range(n): val = values[i] @@ -627,7 +627,7 @@ cdef class StringHashTable(HashTable): Py_ssize_t i, n = len(values) int ret = 0 object val - char *v + const char *v khiter_t k int64_t[:] locs = np.empty(n, dtype=np.int64) @@ -660,12 +660,12 @@ cdef class StringHashTable(HashTable): Py_ssize_t i, n = len(values) int ret = 0 object val - char *v - char **vecs + const char *v + const char **vecs khiter_t k # these by-definition *must* be strings - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) for i in range(n): val = values[i] @@ -693,8 +693,8 @@ cdef class StringHashTable(HashTable): Py_ssize_t idx, count = count_prior int ret = 0 object val - char *v - char **vecs + const char *v + const char **vecs khiter_t k bint use_na_value @@ -705,7 +705,7 @@ cdef class StringHashTable(HashTable): # pre-filter out missing # and assign pointers - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) for i in range(n): val = values[i] @@ -769,7 +769,7 @@ cdef class PyObjectHashTable(HashTable): def sizeof(self, deep=False): """ return the size of my table in bytes """ return self.table.n_buckets * (sizeof(PyObject *) + # keys - sizeof(size_t) + # vals + sizeof(Py_ssize_t) + # vals sizeof(uint32_t)) # flags cpdef get_item(self, object val): diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index 9e56802b92bf0..663ec66a35db2 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -329,10 +329,11 @@ int cmp_npy_datetimestruct(const npy_datetimestruct *a, * Returns -1 on error, 0 on success, and 1 (with no error set) * if obj doesn't have the needed date or datetime attributes. */ -int convert_pydatetime_to_datetimestruct(PyDateTime_Date *obj, +int convert_pydatetime_to_datetimestruct(PyDateTime_Date *dtobj, npy_datetimestruct *out) { // Assumes that obj is a valid datetime object PyObject *tmp; + PyObject *obj = (PyObject*)dtobj; /* Initialize the output to all zeros */ memset(out, 0, sizeof(npy_datetimestruct)); diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index 4347d0c8c47d4..04009c6581ac0 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -31,7 +31,7 @@ extern const npy_datetimestruct _NS_MAX_DTS; // stuff pandas needs // ---------------------------------------------------------------------------- -int convert_pydatetime_to_datetimestruct(PyDateTime_Date *obj, +int convert_pydatetime_to_datetimestruct(PyDateTime_Date *dtobj, npy_datetimestruct *out); npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index 25eede6c286dc..a18d12616a802 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -262,7 +262,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { ("\n\nmake_stream_space: nbytes = %zu. grow_buffer(self->stream...)\n", nbytes)) self->stream = (char *)grow_buffer((void *)self->stream, self->stream_len, - (size_t*)&self->stream_cap, nbytes * 2, + (int64_t*)&self->stream_cap, nbytes * 2, sizeof(char), &status); TRACE( ("make_stream_space: self->stream=%p, self->stream_len = %zu, " @@ -289,7 +289,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { cap = self->words_cap; self->words = (char **)grow_buffer((void *)self->words, self->words_len, - (size_t*)&self->words_cap, nbytes, + (int64_t*)&self->words_cap, nbytes, sizeof(char *), &status); TRACE( ("make_stream_space: grow_buffer(self->self->words, %zu, %zu, %zu, " @@ -320,7 +320,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { cap = self->lines_cap; self->line_start = (int64_t *)grow_buffer((void *)self->line_start, self->lines + 1, - (size_t*)&self->lines_cap, nbytes, + (int64_t*)&self->lines_cap, nbytes, sizeof(int64_t), &status); TRACE(( "make_stream_space: grow_buffer(self->line_start, %zu, %zu, %zu, %d)\n", diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 4bab32e93ab1e..8c7b92ddeaa81 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -427,7 +427,7 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, #if (PY_VERSION_HEX >= 0x03030000) if (PyUnicode_IS_COMPACT_ASCII(obj)) { Py_ssize_t len; - char *data = PyUnicode_AsUTF8AndSize(obj, &len); + char *data = (char*)PyUnicode_AsUTF8AndSize(obj, &len); *_outLen = len; return data; } diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index cd3ce5c1a8f09..4054154cd285b 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -927,7 +927,8 @@ def extract_freq(ndarray[object] values): # ----------------------------------------------------------------------- # period helpers - +@cython.wraparound(False) +@cython.boundscheck(False) cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, int freq, object tz): cdef: diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd index 305c4f8f908e0..efdb1570ed878 100644 --- a/pandas/_libs/tslibs/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -70,7 +70,7 @@ cdef extern from "../src/numpy_helper.h": int assign_value_1d(ndarray, Py_ssize_t, object) except -1 cnp.int64_t get_nat() object get_value_1d(ndarray, Py_ssize_t) - char *get_c_string(object) except NULL + const char *get_c_string(object) except NULL object char_to_string(char*) ctypedef fused numeric: diff --git a/pandas/io/msgpack/_unpacker.pyx b/pandas/io/msgpack/_unpacker.pyx index 04bb330e595dd..427414b80dfe4 100644 --- a/pandas/io/msgpack/_unpacker.pyx +++ b/pandas/io/msgpack/_unpacker.pyx @@ -139,7 +139,7 @@ def unpackb(object packed, object object_hook=None, object list_hook=None, ret = unpack_construct(&ctx, buf, buf_len, &off) if ret == 1: obj = unpack_data(&ctx) - if off < buf_len: + if <Py_ssize_t> off < buf_len: raise ExtraData(obj, PyBytes_FromStringAndSize( buf + off, buf_len - off)) return obj @@ -367,9 +367,11 @@ cdef class Unpacker(object): self.buf_tail = tail + _buf_len cdef read_from_file(self): + # Assume self.max_buffer_size - (self.buf_tail - self.buf_head) >= 0 next_bytes = self.file_like_read( min(self.read_size, - self.max_buffer_size - (self.buf_tail - self.buf_head))) + <Py_ssize_t>(self.max_buffer_size - + (self.buf_tail - self.buf_head)))) if next_bytes: self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) @@ -417,7 +419,9 @@ cdef class Unpacker(object): def read_bytes(self, Py_ssize_t nbytes): """Read a specified number of raw bytes from the stream""" cdef size_t nread - nread = min(self.buf_tail - self.buf_head, nbytes) + + # Assume that self.buf_tail - self.buf_head >= 0 + nread = min(<Py_ssize_t>(self.buf_tail - self.buf_head), nbytes) ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) self.buf_head += nread if len(ret) < nbytes and self.file_like is not None: diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index e2a1107969990..3d94dc127a1d2 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -104,7 +104,8 @@ cdef ndarray[uint8_t, ndim=1] rle_decompress( raise ValueError("unknown control byte: {byte}" .format(byte=control_byte)) - if len(result) != result_length: + # In py37 cython/clang sees `len(outbuff)` as size_t and not Py_ssize_t + if <Py_ssize_t>len(result) != <Py_ssize_t>result_length: raise ValueError("RLE: {got} != {expect}".format(got=len(result), expect=result_length)) @@ -186,12 +187,14 @@ cdef ndarray[uint8_t, ndim=1] rdc_decompress( else: raise ValueError("unknown RDC command") - if len(outbuff) != result_length: + # In py37 cython/clang sees `len(outbuff)` as size_t and not Py_ssize_t + if <Py_ssize_t>len(outbuff) != <Py_ssize_t>result_length: raise ValueError("RDC: {got} != {expect}\n" .format(got=len(outbuff), expect=result_length)) return np.asarray(outbuff) + cdef enum ColumnTypes: column_type_decimal = 1 column_type_string = 2 @@ -204,6 +207,7 @@ cdef int page_mix_types_1 = const.page_mix_types[1] cdef int page_data_type = const.page_data_type cdef int subheader_pointers_offset = const.subheader_pointers_offset + cdef class Parser(object): cdef: diff --git a/setup.py b/setup.py index 85c5970af018f..d265733738425 100755 --- a/setup.py +++ b/setup.py @@ -491,7 +491,6 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): if suffix == '.pyx': lib_depends = [srcpath(f, suffix='.pyx', subdir='_libs/src') for f in lib_depends] - lib_depends.append('pandas/_libs/util.pxd') else: lib_depends = [] @@ -507,7 +506,7 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c'] -tseries_depends = np_datetime_headers + ['pandas/_libs/tslibs/np_datetime.pxd'] +tseries_depends = np_datetime_headers ext_data = {
Checked in OSX in 2.7 and 3.7, on Ubuntu on 2.7 and 3.5, this fixes just about all the warnings that are fixable at my pay-grade. The ones that are left: - Ubiquitous `NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION` --> I expect there is a compiler flag that can be set in setup.py to silence this (and only this) warning, haven't figured it out. - In Py3.7: ``` pandas/_libs/src/ujson/python/objToJSON.c:430:15: warning: initializing 'char *' with an expression of type 'const char *' discards qualifiers [-Wincompatible-pointer-types-discards-qualifiers] char *data = PyUnicode_AsUTF8AndSize(obj, &len); ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ pandas/_libs/src/ujson/python/objToJSON.c:436:14: warning: 'PyUnicode_EncodeUTF8' is deprecated [-Wdeprecated-declarations] newObj = PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(obj), ^ /usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/include/python3.7m/unicodeobject.h:1324:7: note: 'PyUnicode_EncodeUTF8' has been explicitly marked deprecated here ) Py_DEPRECATED(3.3); ^ /usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/include/python3.7m/pyport.h:493:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ 3 warnings generated. ``` - On Linux: ``` In file included from pandas/_libs/src/datetime/np_datetime.h:21:0, from pandas/_libs/src/datetime/np_datetime_strings.c:33: /usr/include/python3.5m/datetime.h:191:25: warning: ‘PyDateTimeAPI’ defined but not used [-Wunused-variable] static PyDateTime_CAPI *PyDateTimeAPI = NULL; ``` - Also on Linux a bunch of "this may be unitialized" warnings.
https://api.github.com/repos/pandas-dev/pandas/pulls/22013
2018-07-21T19:00:32Z
2018-07-24T00:11:21Z
2018-07-24T00:11:21Z
2018-07-24T00:19:38Z
Default to_* methods to compression='infer'
diff --git a/doc/source/io.rst b/doc/source/io.rst index 9fe578524c8e0..c2c8c1c17700f 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -298,7 +298,7 @@ compression : {``'infer'``, ``'gzip'``, ``'bz2'``, ``'zip'``, ``'xz'``, ``None`` Set to ``None`` for no decompression. .. versionadded:: 0.18.1 support for 'zip' and 'xz' compression. - + .. versionchanged:: 0.24.0 'infer' option added and set to default. thousands : str, default ``None`` Thousands separator. decimal : str, default ``'.'`` diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 42e286f487a7d..213a4e91176c5 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -177,7 +177,8 @@ Other Enhancements - :func:`read_html` copies cell data across ``colspan`` and ``rowspan``, and it treats all-``th`` table rows as headers if ``header`` kwarg is not given and there is no ``thead`` (:issue:`17054`) - :meth:`Series.nlargest`, :meth:`Series.nsmallest`, :meth:`DataFrame.nlargest`, and :meth:`DataFrame.nsmallest` now accept the value ``"all"`` for the ``keep`` argument. This keeps all ties for the nth largest/smallest value (:issue:`16818`) - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) -- :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) +- :func:`~DataFrame.to_csv`, :func:`~Series.to_csv`, :func:`~DataFrame.to_json`, and :func:`~Series.to_json` now support ``compression='infer'`` to infer compression based on filename extension (:issue:`15008`). + The default compression for ``to_csv``, ``to_json``, and ``to_pickle`` methods has been updated to ``'infer'`` (:issue:`22004`). - :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`) - :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 834cc3d188b39..ebd35cb1a6a1a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1715,7 +1715,7 @@ def to_panel(self): def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, - mode='w', encoding=None, compression=None, quoting=None, + mode='w', encoding=None, compression='infer', quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=None, date_format=None, doublequote=True, escapechar=None, decimal='.'): @@ -1750,10 +1750,14 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, encoding : string, optional A string representing the encoding to use in the output file, defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, + default 'infer' If 'infer' and `path_or_buf` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip' or '.xz' (otherwise no compression). + + .. versionchanged:: 0.24.0 + 'infer' option added and set to default line_terminator : string, default ``'\n'`` The newline character or character sequence to use in the output file diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7a12ce0e1385e..f62605c342702 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1933,7 +1933,7 @@ def _repr_latex_(self): def to_json(self, path_or_buf=None, orient=None, date_format=None, double_precision=10, force_ascii=True, date_unit='ms', - default_handler=None, lines=False, compression=None, + default_handler=None, lines=False, compression='infer', index=True): """ Convert the object to a JSON string. @@ -1999,13 +1999,14 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, like. .. versionadded:: 0.19.0 - - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, + default 'infer' A string representing the compression to use in the output file, only used when the first argument is a filename. .. versionadded:: 0.21.0 - + .. versionchanged:: 0.24.0 + 'infer' option added and set to default index : boolean, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when diff --git a/pandas/core/series.py b/pandas/core/series.py index 8f9fe5ee516e6..21dea15772cc0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3767,7 +3767,7 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, def to_csv(self, path=None, index=True, sep=",", na_rep='', float_format=None, header=False, index_label=None, - mode='w', encoding=None, compression=None, date_format=None, + mode='w', encoding=None, compression='infer', date_format=None, decimal='.'): """ Write Series to a comma-separated values (csv) file @@ -3795,10 +3795,13 @@ def to_csv(self, path=None, index=True, sep=",", na_rep='', encoding : string, optional a string representing the encoding to use if the contents are non-ascii, for python versions prior to 3 - compression : string, optional + compression : None or string, default 'infer' A string representing the compression to use in the output file. - Allowed values are 'gzip', 'bz2', 'zip', 'xz'. This input is only - used when the first argument is a filename. + Allowed values are None, 'gzip', 'bz2', 'zip', 'xz', and 'infer'. + This input is only used when the first argument is a filename. + + .. versionchanged:: 0.24.0 + 'infer' option added and set to default date_format: string, default None Format string for datetime objects. decimal: string, default '.' diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 0796888554a46..6fabd2573a7b4 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -21,8 +21,13 @@ from pandas.core.dtypes.generic import ( ABCMultiIndex, ABCPeriodIndex, ABCDatetimeIndex, ABCIndexClass) -from pandas.io.common import (_get_handle, UnicodeWriter, _expand_user, - _stringify_path) +from pandas.io.common import ( + _expand_user, + _get_handle, + _infer_compression, + _stringify_path, + UnicodeWriter, +) class CSVFormatter(object): @@ -30,7 +35,7 @@ class CSVFormatter(object): def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None, cols=None, header=True, index=True, index_label=None, mode='w', nanRep=None, encoding=None, - compression=None, quoting=None, line_terminator='\n', + compression='infer', quoting=None, line_terminator='\n', chunksize=None, tupleize_cols=False, quotechar='"', date_format=None, doublequote=True, escapechar=None, decimal='.'): @@ -50,8 +55,10 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.index = index self.index_label = index_label self.mode = mode + if encoding is None: + encoding = 'ascii' if compat.PY2 else 'utf-8' self.encoding = encoding - self.compression = compression + self.compression = _infer_compression(self.path_or_buf, compression) if quoting is None: quoting = csvlib.QUOTE_MINIMAL @@ -124,16 +131,10 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.nlevels = 0 def save(self): - # create the writer & save - if self.encoding is None: - if compat.PY2: - encoding = 'ascii' - else: - encoding = 'utf-8' - else: - encoding = self.encoding - - # GH 21227 internal compression is not used when file-like passed. + """ + Create the writer & save + """ + # GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, 'write'): msg = ("compression has no effect when passing file-like " "object as input.") @@ -147,7 +148,7 @@ def save(self): if is_zip: # zipfile doesn't support writing string to archive. uses string # buffer to receive csv writing and dump into zip compression - # file handle. GH 21241, 21118 + # file handle. GH21241, GH21118 f = StringIO() close = False elif hasattr(self.path_or_buf, 'write'): @@ -155,7 +156,7 @@ def save(self): close = False else: f, handles = _get_handle(self.path_or_buf, self.mode, - encoding=encoding, + encoding=self.encoding, compression=self.compression) close = True @@ -165,23 +166,23 @@ def save(self): doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar) - if encoding == 'ascii': + if self.encoding == 'ascii': self.writer = csvlib.writer(f, **writer_kwargs) else: - writer_kwargs['encoding'] = encoding + writer_kwargs['encoding'] = self.encoding self.writer = UnicodeWriter(f, **writer_kwargs) self._save() finally: if is_zip: - # GH 17778 handles zip compression separately. + # GH17778 handles zip compression separately. buf = f.getvalue() if hasattr(self.path_or_buf, 'write'): self.path_or_buf.write(buf) else: f, handles = _get_handle(self.path_or_buf, self.mode, - encoding=encoding, + encoding=self.encoding, compression=self.compression) f.write(buf) close = True diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 629e00ebfa7d0..c5f8872f93d94 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -28,7 +28,7 @@ # interface to/from def to_json(path_or_buf, obj, orient=None, date_format='epoch', double_precision=10, force_ascii=True, date_unit='ms', - default_handler=None, lines=False, compression=None, + default_handler=None, lines=False, compression='infer', index=True): if not index and orient not in ['split', 'table']: diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 5c9739be73393..ceaac9818354a 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -1,19 +1,20 @@ """ - Tests for the pandas.io.common functionalities +Tests for the pandas.io.common functionalities """ import mmap -import pytest import os -from os.path import isabs + +import pytest import pandas as pd -import pandas.util.testing as tm +import pandas.io.common as icom import pandas.util._test_decorators as td - -from pandas.io import common -from pandas.compat import is_platform_windows, StringIO, FileNotFoundError - -from pandas import read_csv, concat +import pandas.util.testing as tm +from pandas.compat import ( + is_platform_windows, + StringIO, + FileNotFoundError, +) class CustomFSPath(object): @@ -55,24 +56,24 @@ class TestCommonIOCapabilities(object): def test_expand_user(self): filename = '~/sometest' - expanded_name = common._expand_user(filename) + expanded_name = icom._expand_user(filename) assert expanded_name != filename - assert isabs(expanded_name) + assert os.path.isabs(expanded_name) assert os.path.expanduser(filename) == expanded_name def test_expand_user_normal_path(self): filename = '/somefolder/sometest' - expanded_name = common._expand_user(filename) + expanded_name = icom._expand_user(filename) assert expanded_name == filename assert os.path.expanduser(filename) == expanded_name @td.skip_if_no('pathlib') def test_stringify_path_pathlib(self): - rel_path = common._stringify_path(Path('.')) + rel_path = icom._stringify_path(Path('.')) assert rel_path == '.' - redundant_path = common._stringify_path(Path('foo//bar')) + redundant_path = icom._stringify_path(Path('foo//bar')) assert redundant_path == os.path.join('foo', 'bar') @td.skip_if_no('py.path') @@ -80,11 +81,11 @@ def test_stringify_path_localpath(self): path = os.path.join('foo', 'bar') abs_path = os.path.abspath(path) lpath = LocalPath(path) - assert common._stringify_path(lpath) == abs_path + assert icom._stringify_path(lpath) == abs_path def test_stringify_path_fspath(self): p = CustomFSPath('foo/bar.csv') - result = common._stringify_path(p) + result = icom._stringify_path(p) assert result == 'foo/bar.csv' @pytest.mark.parametrize('extension,expected', [ @@ -97,36 +98,36 @@ def test_stringify_path_fspath(self): @pytest.mark.parametrize('path_type', path_types) def test_infer_compression_from_path(self, extension, expected, path_type): path = path_type('foo/bar.csv' + extension) - compression = common._infer_compression(path, compression='infer') + compression = icom._infer_compression(path, compression='infer') assert compression == expected def test_get_filepath_or_buffer_with_path(self): filename = '~/sometest' - filepath_or_buffer, _, _, should_close = common.get_filepath_or_buffer( + filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer( filename) assert filepath_or_buffer != filename - assert isabs(filepath_or_buffer) + assert os.path.isabs(filepath_or_buffer) assert os.path.expanduser(filename) == filepath_or_buffer assert not should_close def test_get_filepath_or_buffer_with_buffer(self): input_buffer = StringIO() - filepath_or_buffer, _, _, should_close = common.get_filepath_or_buffer( + filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer( input_buffer) assert filepath_or_buffer == input_buffer assert not should_close def test_iterator(self): - reader = read_csv(StringIO(self.data1), chunksize=1) - result = concat(reader, ignore_index=True) - expected = read_csv(StringIO(self.data1)) + reader = pd.read_csv(StringIO(self.data1), chunksize=1) + result = pd.concat(reader, ignore_index=True) + expected = pd.read_csv(StringIO(self.data1)) tm.assert_frame_equal(result, expected) # GH12153 - it = read_csv(StringIO(self.data1), chunksize=1) + it = pd.read_csv(StringIO(self.data1), chunksize=1) first = next(it) tm.assert_frame_equal(first, expected.iloc[[0]]) - tm.assert_frame_equal(concat(it), expected.iloc[1:]) + tm.assert_frame_equal(pd.concat(it), expected.iloc[1:]) @pytest.mark.parametrize('reader, module, error_class, fn_ext', [ (pd.read_csv, 'os', FileNotFoundError, 'csv'), @@ -246,18 +247,18 @@ def test_constructor_bad_file(self, mmap_file): msg = "[Errno 22]" err = mmap.error - tm.assert_raises_regex(err, msg, common.MMapWrapper, non_file) + tm.assert_raises_regex(err, msg, icom.MMapWrapper, non_file) target = open(mmap_file, 'r') target.close() msg = "I/O operation on closed file" tm.assert_raises_regex( - ValueError, msg, common.MMapWrapper, target) + ValueError, msg, icom.MMapWrapper, target) def test_get_attr(self, mmap_file): with open(mmap_file, 'r') as target: - wrapper = common.MMapWrapper(target) + wrapper = icom.MMapWrapper(target) attrs = dir(wrapper.mmap) attrs = [attr for attr in attrs @@ -271,7 +272,7 @@ def test_get_attr(self, mmap_file): def test_next(self, mmap_file): with open(mmap_file, 'r') as target: - wrapper = common.MMapWrapper(target) + wrapper = icom.MMapWrapper(target) lines = target.readlines() for line in lines: @@ -285,4 +286,4 @@ def test_unknown_engine(self): df = tm.makeDataFrame() df.to_csv(path) with tm.assert_raises_regex(ValueError, 'Unknown engine'): - read_csv(path, engine='pyt') + pd.read_csv(path, engine='pyt') diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py new file mode 100644 index 0000000000000..76788ced44e84 --- /dev/null +++ b/pandas/tests/io/test_compression.py @@ -0,0 +1,99 @@ +import os + +import pytest + +import pandas as pd +import pandas.io.common as icom +import pandas.util.testing as tm + + +@pytest.mark.parametrize('obj', [ + pd.DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) +@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) +def test_compression_size(obj, method, compression_only): + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression=compression_only) + compressed_size = os.path.getsize(path) + getattr(obj, method)(path, compression=None) + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size + + +@pytest.mark.parametrize('obj', [ + pd.DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) +@pytest.mark.parametrize('method', ['to_csv', 'to_json']) +def test_compression_size_fh(obj, method, compression_only): + with tm.ensure_clean() as path: + f, handles = icom._get_handle(path, 'w', compression=compression_only) + with f: + getattr(obj, method)(f) + assert not f.closed + assert f.closed + compressed_size = os.path.getsize(path) + with tm.ensure_clean() as path: + f, handles = icom._get_handle(path, 'w', compression=None) + with f: + getattr(obj, method)(f) + assert not f.closed + assert f.closed + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size + + +@pytest.mark.parametrize('write_method, write_kwargs, read_method', [ + ('to_csv', {'index': False}, pd.read_csv), + ('to_json', {}, pd.read_json), + ('to_pickle', {}, pd.read_pickle), +]) +def test_dataframe_compression_defaults_to_infer( + write_method, write_kwargs, read_method, compression_only): + # GH22004 + input = pd.DataFrame([[1.0, 0, -4], [3.4, 5, 2]], columns=['X', 'Y', 'Z']) + extension = icom._compression_to_extension[compression_only] + with tm.ensure_clean('compressed' + extension) as path: + getattr(input, write_method)(path, **write_kwargs) + output = read_method(path, compression=compression_only) + tm.assert_frame_equal(output, input) + + +@pytest.mark.parametrize('write_method,write_kwargs,read_method,read_kwargs', [ + ('to_csv', {'index': False, 'header': True}, + pd.read_csv, {'squeeze': True}), + ('to_json', {}, pd.read_json, {'typ': 'series'}), + ('to_pickle', {}, pd.read_pickle, {}), +]) +def test_series_compression_defaults_to_infer( + write_method, write_kwargs, read_method, read_kwargs, + compression_only): + # GH22004 + input = pd.Series([0, 5, -2, 10], name='X') + extension = icom._compression_to_extension[compression_only] + with tm.ensure_clean('compressed' + extension) as path: + getattr(input, write_method)(path, **write_kwargs) + output = read_method(path, compression=compression_only, **read_kwargs) + tm.assert_series_equal(output, input, check_names=False) + + +def test_compression_warning(compression_only): + # Assert that passing a file object to to_csv while explicitly specifying a + # compression protocol triggers a RuntimeWarning, as per GH21227. + # Note that pytest has an issue that causes assert_produces_warning to fail + # in Python 2 if the warning has occurred in previous tests + # (see https://git.io/fNEBm & https://git.io/fNEBC). Hence, should this + # test fail in just Python 2 builds, it likely indicates that other tests + # are producing RuntimeWarnings, thereby triggering the pytest bug. + df = pd.DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']) + with tm.ensure_clean() as path: + f, handles = icom._get_handle(path, 'w', compression=compression_only) + with tm.assert_produces_warning(RuntimeWarning, + check_stacklevel=False): + with f: + df.to_csv(f, compression=compression_only) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index e1c9202189972..868525e818b62 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,17 +1,16 @@ # -*- coding: utf-8 -*- -import pytest -import os import collections from functools import partial import numpy as np +import pytest -from pandas import Series, DataFrame, Timestamp -import pandas.core.common as com -from pandas.core import ops -from pandas.io.common import _get_handle -import pandas.util.testing as tm +from pandas import Series, Timestamp +from pandas.core import ( + common as com, + ops, +) def test_get_callable_name(): @@ -20,7 +19,7 @@ def test_get_callable_name(): def fn(x): return x - lambda_ = lambda x: x + lambda_ = lambda x: x # noqa: E731 part1 = partial(fn) part2 = partial(part1) @@ -111,57 +110,3 @@ def test_standardize_mapping(): dd = collections.defaultdict(list) assert isinstance(com.standardize_mapping(dd), partial) - - -@pytest.mark.parametrize('obj', [ - DataFrame(100 * [[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - columns=['X', 'Y', 'Z']), - Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) -@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) -def test_compression_size(obj, method, compression_only): - - with tm.ensure_clean() as filename: - getattr(obj, method)(filename, compression=compression_only) - compressed = os.path.getsize(filename) - getattr(obj, method)(filename, compression=None) - uncompressed = os.path.getsize(filename) - assert uncompressed > compressed - - -@pytest.mark.parametrize('obj', [ - DataFrame(100 * [[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - columns=['X', 'Y', 'Z']), - Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) -@pytest.mark.parametrize('method', ['to_csv', 'to_json']) -def test_compression_size_fh(obj, method, compression_only): - - with tm.ensure_clean() as filename: - f, _handles = _get_handle(filename, 'w', compression=compression_only) - with f: - getattr(obj, method)(f) - assert not f.closed - assert f.closed - compressed = os.path.getsize(filename) - with tm.ensure_clean() as filename: - f, _handles = _get_handle(filename, 'w', compression=None) - with f: - getattr(obj, method)(f) - assert not f.closed - assert f.closed - uncompressed = os.path.getsize(filename) - assert uncompressed > compressed - - -# GH 21227 -def test_compression_warning(compression_only): - df = DataFrame(100 * [[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - columns=['X', 'Y', 'Z']) - with tm.ensure_clean() as filename: - f, _handles = _get_handle(filename, 'w', compression=compression_only) - with tm.assert_produces_warning(RuntimeWarning, - check_stacklevel=False): - with f: - df.to_csv(f, compression=compression_only)
- [x] closes https://github.com/pandas-dev/pandas/issues/22004 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR does the following: - Update default compression for `to_csv`, `to_json`, and `to_pickle` methods to infer. - Adds `test_compression_defaults_to_infer` to test that compression='infer' is default for the relevant to_* methods. - Fixes a bug in CSVFormatter where setting `compression='infer'` with a file object would produce a RuntimeWarning. - Adds documentation to `test_compression_warning` which can fail due to a pytest bug. - Cleans up how the encoding argument in CSVFormatter is processed. - Moves compression tests from `pandas/tests/test_common.py` to `pandas/tests/io/test_common.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/22011
2018-07-21T15:14:44Z
2018-08-01T21:23:34Z
2018-08-01T21:23:34Z
2018-08-06T21:35:15Z
CLN: remove F821 flake8 error in test case
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 76a50a9ecf5e7..30a670ead3aa0 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -9,7 +9,7 @@ import numpy as np from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, Categorical, compat, concat, option_context) -from pandas.compat import u, PY2 +from pandas.compat import u from pandas import _np_version_under1p14 from pandas.core.dtypes.dtypes import DatetimeTZDtype, CategoricalDtype @@ -356,9 +356,10 @@ def test_select_dtypes_datetime_with_tz(self): expected = df3.reindex(columns=[]) assert_frame_equal(result, expected) - @pytest.mark.parametrize( - "dtype", [str, "str", np.string_, "S1", - "unicode", np.unicode_, "U1"] + ([unicode] if PY2 else [])) + @pytest.mark.parametrize("dtype", [ + str, "str", np.string_, "S1", "unicode", np.unicode_, "U1", + compat.text_type + ]) @pytest.mark.parametrize("arg", ["include", "exclude"]) def test_select_dtypes_str_raises(self, dtype, arg): df = DataFrame({"a": list("abc"),
Kill flake8 error: F821 undefined name 'unicode' `compat.text_type == unicode if PY2`
https://api.github.com/repos/pandas-dev/pandas/pulls/22009
2018-07-21T09:57:46Z
2018-07-22T12:38:00Z
2018-07-22T12:38:00Z
2018-07-22T12:38:12Z
CLN: Remove Unneeded BlockManager methods
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 55f2e06a1a976..97cc7f96cb24f 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -32,6 +32,8 @@ cdef class BlockPlacement: def __init__(self, val): cdef slice slc + self._as_slice = None + self._as_array = None self._has_slice = False self._has_array = False @@ -144,6 +146,7 @@ cdef class BlockPlacement: other_int = <Py_ssize_t>other if other_int == 0: + # BlockPlacement is treated as immutable return self start, stop, step, l = slice_get_indices_ex(s) @@ -155,33 +158,21 @@ cdef class BlockPlacement: raise ValueError("iadd causes length change") if stop < 0: - self._as_slice = slice(start, None, step) + val = slice(start, None, step) else: - self._as_slice = slice(start, stop, step) + val = slice(start, stop, step) - self._has_array = False - self._as_array = None + return BlockPlacement(val) else: newarr = self.as_array + other if (newarr < 0).any(): raise ValueError("iadd causes length change") - self._as_array = newarr - self._has_array = True - self._has_slice = False - self._as_slice = None - - return self - - cdef BlockPlacement copy(self): - cdef slice s = self._ensure_has_slice() - if s is not None: - return BlockPlacement(s) - else: - return BlockPlacement(self._as_array) + val = newarr + return BlockPlacement(val) def add(self, other): - return self.copy().iadd(other) + return self.iadd(other) def sub(self, other): return self.add(-other) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 8ad569003a43a..e7b7cb463a27b 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -82,7 +82,6 @@ class BlockManager(PandasObject): get_slice(slice_like, axis) get(label) iget(loc) - get_scalar(label_tup) take(indexer, axis) reindex_axis(new_labels, axis) @@ -993,21 +992,6 @@ def iget(self, i, fastpath=True): ndim=1)], self.axes[1]) - def get_scalar(self, tup): - """ - Retrieve single item - """ - full_loc = [ax.get_loc(x) for ax, x in zip(self.axes, tup)] - blk = self.blocks[self._blknos[full_loc[0]]] - values = blk.values - - # FIXME: this may return non-upcasted types? - if values.ndim == 1: - return values[full_loc[1]] - - full_loc[0] = self._blklocs[full_loc[0]] - return values[tuple(full_loc)] - def delete(self, item): """ Delete selected item (items if non-unique) in-place. @@ -1382,9 +1366,9 @@ def take(self, indexer, axis=1, verify=True, convert=True): axis=axis, allow_dups=True) def merge(self, other, lsuffix='', rsuffix=''): - if not self._is_indexed_like(other): - raise AssertionError('Must have same axes to merge managers') - + # We assume at this point that the axes of self and other match. + # This is only called from Panel.join, which reindexes prior + # to calling to ensure this assumption holds. l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, right=other.items, rsuffix=rsuffix) new_items = _concat_indexes([l, r]) @@ -1402,19 +1386,6 @@ def merge(self, other, lsuffix='', rsuffix=''): return self.__class__(_consolidate(new_blocks), new_axes) - def _is_indexed_like(self, other): - """ - Check all axes except items - """ - if self.ndim != other.ndim: - raise AssertionError( - 'Number of dimensions must agree got {ndim} and ' - '{oth_ndim}'.format(ndim=self.ndim, oth_ndim=other.ndim)) - for ax, oax in zip(self.axes[1:], other.axes[1:]): - if not ax.equals(oax): - return False - return True - def equals(self, other): self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 39418fb72bf4a..0b06775326ab1 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -329,17 +329,6 @@ def test_is_mixed_dtype(self): assert create_mgr('a,b:f8; c,d: f4').is_mixed_type assert create_mgr('a,b:f8; c,d: object').is_mixed_type - def test_is_indexed_like(self): - mgr1 = create_mgr('a,b: f8') - mgr2 = create_mgr('a:i8; b:bool') - mgr3 = create_mgr('a,b,c: f8') - assert mgr1._is_indexed_like(mgr1) - assert mgr1._is_indexed_like(mgr2) - assert mgr1._is_indexed_like(mgr3) - - assert not mgr1._is_indexed_like(mgr1.get_slice( - slice(-1), axis=1)) - def test_duplicate_ref_loc_failure(self): tmp_mgr = create_mgr('a:bool; a: f8') @@ -396,15 +385,6 @@ def test_categorical_block_pickle(self): smgr2 = tm.round_trip_pickle(smgr) assert_series_equal(Series(smgr), Series(smgr2)) - def test_get_scalar(self, mgr): - for item in mgr.items: - for i, index in enumerate(mgr.axes[1]): - res = mgr.get_scalar((item, index)) - exp = mgr.get(item, fastpath=False)[i] - assert res == exp - exp = mgr.get(item).internal_values()[i] - assert res == exp - def test_get(self): cols = Index(list('abc')) values = np.random.rand(3, 3)
BlockManager.get_scalar is never used outside of tests, BlockManager._is_indexed_like is only ever called from `merge`, which is only called once in Panel, before which the indexed_like check is already done. Also BlockPlacements.iadd was more complicated than it needed to be, so tore out bits of that.
https://api.github.com/repos/pandas-dev/pandas/pulls/22002
2018-07-20T19:09:19Z
2018-07-26T12:41:33Z
2018-07-26T12:41:33Z
2018-07-26T16:20:56Z
CLN: De-privatize core.common funcs, remove unused
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 137fd5aafe5bd..8cb384f50d371 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -390,7 +390,7 @@ Removal of prior version deprecations/changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``LongPanel`` and ``WidePanel`` classes have been removed (:issue:`10892`) -- +- Several private functions were removed from the (non-public) module ``pandas.core.common`` (:issue:`22001`) - - diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 78c9113ce60de..49705cb6d9ad2 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -262,7 +262,7 @@ def match(to_match, values, na_sentinel=-1): ------- match : ndarray of integers """ - values = com._asarray_tuplesafe(values) + values = com.asarray_tuplesafe(values) htable, _, values, dtype, ndtype = _get_hashtable_algo(values) to_match, _, _ = _ensure_data(to_match, dtype) table = htable(min(len(to_match), 1000000)) @@ -412,7 +412,7 @@ def isin(comps, values): # handle categoricals return comps._values.isin(values) - comps = com._values_from_object(comps) + comps = com.values_from_object(comps) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 0d73b2c60d76d..4584e4694cdc5 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -43,6 +43,7 @@ import pandas.core.algorithms as algorithms +from pandas.io.formats import console from pandas.io.formats.terminal import get_terminal_size from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs from pandas.core.config import get_option @@ -1887,7 +1888,7 @@ def _repr_categories_info(self): length=len(self.categories), dtype=dtype) width, height = get_terminal_size() max_width = get_option("display.width") or width - if com.in_ipython_frontend(): + if console.in_ipython_frontend(): # 0 = no breaks max_width = 0 levstring = "" diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 78e6d1f222160..29f97b344f267 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -120,7 +120,7 @@ def wrapper(self, other): self._assert_tzawareness_compat(other) result = meth(self, np.asarray(other)) - result = com._values_from_object(result) + result = com.values_from_object(result) # Make sure to pass an array to result[...]; indexing with # Series breaks with older version of numpy diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 2c8853dec4f69..5ecc79e030f56 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -984,7 +984,7 @@ def __array__(self, dtype=None): examples='', )) def to_tuples(self, na_tuple=True): - tuples = com._asarray_tuplesafe(zip(self.left, self.right)) + tuples = com.asarray_tuplesafe(zip(self.left, self.right)) if not na_tuple: # GH 18756 tuples = np.where(~self.isna(), tuples, np.nan) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index cb5afa34add2a..9c98f73312dbf 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -167,7 +167,7 @@ def _generate_range(cls, start, end, periods, freq, fields): freq = Period._maybe_convert_freq(freq) field_count = len(fields) - if com._count_not_none(start, end) > 0: + if com.count_not_none(start, end) > 0: if field_count > 0: raise ValueError('Can either instantiate from fields ' 'or endpoints, but not both') @@ -392,7 +392,7 @@ def _maybe_convert_timedelta(self, other): # Constructor Helpers def _get_ordinal_range(start, end, periods, freq, mult=1): - if com._count_not_none(start, end, periods) != 2: + if com.count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, ' 'exactly two must be specified') diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index efa7c0b0e44d4..cc93644677463 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -81,7 +81,7 @@ def wrapper(self, other): else: other = type(self)(other).values result = meth(self, other) - result = com._values_from_object(result) + result = com.values_from_object(result) o_mask = np.array(isna(other)) if o_mask.any(): @@ -150,7 +150,7 @@ def __new__(cls, values, freq=None, start=None, end=None, periods=None, @classmethod def _generate_range(cls, start, end, periods, freq, closed=None, **kwargs): # **kwargs are for compat with TimedeltaIndex, which includes `name` - if com._count_not_none(start, end, periods, freq) != 3: + if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' 'and freq, exactly three must be specified') diff --git a/pandas/core/base.py b/pandas/core/base.py index 1226662824eb5..5382315bad32b 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -581,7 +581,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): results.append(colg.aggregate(a)) # make sure we find a good name - name = com._get_callable_name(a) or a + name = com.get_callable_name(a) or a keys.append(name) except (TypeError, DataError): pass @@ -856,7 +856,7 @@ def tolist(self): numpy.ndarray.tolist """ if is_datetimelike(self._values): - return [com._maybe_box_datetimelike(x) for x in self._values] + return [com.maybe_box_datetimelike(x) for x in self._values] elif is_extension_array_dtype(self._values): return list(self._values) else: diff --git a/pandas/core/common.py b/pandas/core/common.py index 0ca776b6bfa77..0350b338f2bee 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1,5 +1,7 @@ """ Misc tools for implementing data structures + +Note: pandas.core.common is *not* part of the public API. """ from datetime import datetime, timedelta @@ -11,8 +13,7 @@ from pandas._libs import lib, tslibs from pandas import compat -from pandas.compat import long, zip, iteritems, PY36, OrderedDict -from pandas.core.config import get_option +from pandas.compat import iteritems, PY36, OrderedDict from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCIndexClass from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import _iterable_not_string @@ -52,7 +53,7 @@ def flatten(l): yield el -def _consensus_name_attr(objs): +def consensus_name_attr(objs): name = objs[0].name for obj in objs[1:]: try: @@ -63,7 +64,8 @@ def _consensus_name_attr(objs): return name -def _get_info_slice(obj, indexer): +# TODO: only used once in frame.py; belongs elsewhere? +def get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): msg = 'object of type {typ!r} has no info axis' @@ -73,7 +75,7 @@ def _get_info_slice(obj, indexer): return tuple(slices) -def _maybe_box(indexer, values, obj, key): +def maybe_box(indexer, values, obj, key): # if we have multiples coming back, box em if isinstance(values, np.ndarray): @@ -83,7 +85,7 @@ def _maybe_box(indexer, values, obj, key): return values -def _maybe_box_datetimelike(value): +def maybe_box_datetimelike(value): # turn a datetime like into a Timestamp/timedelta as needed if isinstance(value, (np.datetime64, datetime)): @@ -94,13 +96,13 @@ def _maybe_box_datetimelike(value): return value -_values_from_object = lib.values_from_object +values_from_object = lib.values_from_object def is_bool_indexer(key): if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)): if key.dtype == np.object_: - key = np.asarray(_values_from_object(key)) + key = np.asarray(values_from_object(key)) if not lib.is_bool_array(key): if isna(key).any(): @@ -120,19 +122,6 @@ def is_bool_indexer(key): return False -def _mut_exclusive(**kwargs): - item1, item2 = kwargs.items() - label1, val1 = item1 - label2, val2 = item2 - if val1 is not None and val2 is not None: - msg = 'mutually exclusive arguments: {label1!r} and {label2!r}' - raise TypeError(msg.format(label1=label1, label2=label2)) - elif val1 is not None: - return val1 - else: - return val2 - - def _not_none(*args): """Returns a generator consisting of the arguments that are not None""" return (arg for arg in args if arg is not None) @@ -170,12 +159,12 @@ def _all_not_none(*args): return True -def _count_not_none(*args): +def count_not_none(*args): """Returns the count of arguments that are not None""" return sum(x is not None for x in args) -def _try_sort(iterable): +def try_sort(iterable): listed = list(iterable) try: return sorted(listed) @@ -183,117 +172,17 @@ def _try_sort(iterable): return listed -def _dict_keys_to_ordered_list(mapping): +def dict_keys_to_ordered_list(mapping): # when pandas drops support for Python < 3.6, this function # can be replaced by a simple list(mapping.keys()) if PY36 or isinstance(mapping, OrderedDict): keys = list(mapping.keys()) else: - keys = _try_sort(mapping) + keys = try_sort(mapping) return keys -def iterpairs(seq): - """ - Parameters - ---------- - seq : sequence - - Returns - ------- - iterator returning overlapping pairs of elements - - Examples - -------- - >>> list(iterpairs([1, 2, 3, 4])) - [(1, 2), (2, 3), (3, 4)] - """ - # input may not be sliceable - seq_it = iter(seq) - seq_it_next = iter(seq) - next(seq_it_next) - - return zip(seq_it, seq_it_next) - - -def split_ranges(mask): - """ Generates tuples of ranges which cover all True value in mask - - >>> list(split_ranges([1,0,0,1,0])) - [(0, 1), (3, 4)] - """ - ranges = [(0, len(mask))] - - for pos, val in enumerate(mask): - if not val: # this pos should be omitted, split off the prefix range - r = ranges.pop() - if pos > r[0]: # yield non-zero range - yield (r[0], pos) - if pos + 1 < len(mask): # save the rest for processing - ranges.append((pos + 1, len(mask))) - if ranges: - yield ranges[-1] - - -def _long_prod(vals): - result = long(1) - for x in vals: - result *= x - return result - - -class groupby(dict): - """ - A simple groupby different from the one in itertools. - - Does not require the sequence elements to be sorted by keys, - however it is slower. - """ - - def __init__(self, seq, key=lambda x: x): - for value in seq: - k = key(value) - self.setdefault(k, []).append(value) - - try: - __iter__ = dict.iteritems - except AttributeError: # pragma: no cover - # Python 3 - def __iter__(self): - return iter(dict.items(self)) - - -def map_indices_py(arr): - """ - Returns a dictionary with (element, index) pairs for each element in the - given array/list - """ - return {x: i for i, x in enumerate(arr)} - - -def union(*seqs): - result = set([]) - for seq in seqs: - if not isinstance(seq, set): - seq = set(seq) - result |= seq - return type(seqs[0])(list(result)) - - -def difference(a, b): - return type(a)(list(set(a) - set(b))) - - -def intersection(*seqs): - result = set(seqs[0]) - for seq in seqs: - if not isinstance(seq, set): - seq = set(seq) - result &= seq - return type(seqs[0])(list(result)) - - -def _asarray_tuplesafe(values, dtype=None): +def asarray_tuplesafe(values, dtype=None): if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): values = list(values) @@ -317,7 +206,7 @@ def _asarray_tuplesafe(values, dtype=None): return result -def _index_labels_to_array(labels, dtype=None): +def index_labels_to_array(labels, dtype=None): """ Transform label or iterable of labels to array, for use in Index. @@ -339,12 +228,12 @@ def _index_labels_to_array(labels, dtype=None): except TypeError: # non-iterable labels = [labels] - labels = _asarray_tuplesafe(labels, dtype=dtype) + labels = asarray_tuplesafe(labels, dtype=dtype) return labels -def _maybe_make_list(obj): +def maybe_make_list(obj): if obj is not None and not isinstance(obj, (tuple, list)): return [obj] return obj @@ -363,19 +252,20 @@ def is_true_slices(l): return [isinstance(k, slice) and not is_null_slice(k) for k in l] +# TODO: used only once in indexing; belongs elsewhere? def is_full_slice(obj, l): """ we have a full length slice """ return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and obj.step is None) -def _get_callable_name(obj): +def get_callable_name(obj): # typical case has name if hasattr(obj, '__name__'): return getattr(obj, '__name__') # some objects don't; could recurse if isinstance(obj, partial): - return _get_callable_name(obj.func) + return get_callable_name(obj.func) # fall back to class name if hasattr(obj, '__call__'): return obj.__class__.__name__ @@ -386,7 +276,7 @@ def _get_callable_name(obj): return None -def _apply_if_callable(maybe_callable, obj, **kwargs): +def apply_if_callable(maybe_callable, obj, **kwargs): """ Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is @@ -404,7 +294,7 @@ def _apply_if_callable(maybe_callable, obj, **kwargs): return maybe_callable -def _dict_compat(d): +def dict_compat(d): """ Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict @@ -417,7 +307,7 @@ def _dict_compat(d): dict """ - return dict((_maybe_box_datetimelike(key), value) + return dict((maybe_box_datetimelike(key), value) for key, value in iteritems(d)) @@ -464,78 +354,7 @@ class Sentinel(object): return Sentinel() -# ---------------------------------------------------------------------- -# Detect our environment - -def in_interactive_session(): - """ check if we're running in an interactive shell - - returns True if running under python/ipython interactive shell - """ - - def check_main(): - import __main__ as main - return (not hasattr(main, '__file__') or - get_option('mode.sim_interactive')) - - try: - return __IPYTHON__ or check_main() # noqa - except: - return check_main() - - -def in_qtconsole(): - """ - check if we're inside an IPython qtconsole - - .. deprecated:: 0.14.1 - This is no longer needed, or working, in IPython 3 and above. - """ - try: - ip = get_ipython() # noqa - front_end = ( - ip.config.get('KernelApp', {}).get('parent_appname', "") or - ip.config.get('IPKernelApp', {}).get('parent_appname', "")) - if 'qtconsole' in front_end.lower(): - return True - except: - return False - return False - - -def in_ipnb(): - """ - check if we're inside an IPython Notebook - - .. deprecated:: 0.14.1 - This is no longer needed, or working, in IPython 3 and above. - """ - try: - ip = get_ipython() # noqa - front_end = ( - ip.config.get('KernelApp', {}).get('parent_appname', "") or - ip.config.get('IPKernelApp', {}).get('parent_appname', "")) - if 'notebook' in front_end.lower(): - return True - except: - return False - return False - - -def in_ipython_frontend(): - """ - check if we're inside an an IPython zmq frontend - """ - try: - ip = get_ipython() # noqa - return 'zmq' in str(type(ip)).lower() - except: - pass - - return False - - -def _random_state(state=None): +def random_state(state=None): """ Helper function for processing random_state arguments. @@ -564,7 +383,8 @@ def _random_state(state=None): "RandomState, or None") -def _get_distinct_objs(objs): +# TODO: only used once in indexes.api; belongs elsewhere? +def get_distinct_objs(objs): """ Return a list with distinct elements of "objs" (different ids). Preserves order. diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 781101f5804e6..ac552e7b80de3 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -123,8 +123,8 @@ def _evaluate_numexpr(op, op_str, a, b, truediv=True, def _where_standard(cond, a, b): - return np.where(com._values_from_object(cond), com._values_from_object(a), - com._values_from_object(b)) + return np.where(com.values_from_object(cond), com.values_from_object(a), + com.values_from_object(b)) def _where_numexpr(cond, a, b): diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 26eefa75b2675..2bd1b0c5b3507 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -190,7 +190,7 @@ def stringify(value): v = _coerce_scalar_to_timedelta_type(v, unit='s').value return TermValue(int(v), v, kind) elif meta == u('category'): - metadata = com._values_from_object(self.metadata) + metadata = com.values_from_object(self.metadata) result = metadata.searchsorted(v, side='left') # result returns 0 if v is first element or if v is not in metadata diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 873170eb9813b..078e176ff2b99 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -466,7 +466,7 @@ def _init_dict(self, data, index, columns, dtype=None): arrays.loc[missing] = [v] * missing.sum() else: - keys = com._dict_keys_to_ordered_list(data) + keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) arrays = [data[k] for k in keys] @@ -617,11 +617,11 @@ def _repr_fits_horizontal_(self, ignore_width=False): # used by repr_html under IPython notebook or scripts ignore terminal # dims - if ignore_width or not com.in_interactive_session(): + if ignore_width or not console.in_interactive_session(): return True if (get_option('display.width') is not None or - com.in_ipython_frontend()): + console.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: @@ -689,7 +689,7 @@ def _repr_html_(self): # XXX: In IPython 3.x and above, the Qt console will not attempt to # display HTML, so this check can be removed when support for # IPython 2.x is no longer needed. - if com.in_qtconsole(): + if console.in_qtconsole(): # 'HTML output is disabled in QtConsole' return None @@ -1100,13 +1100,13 @@ def to_dict(self, orient='dict', into=dict): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', lib.map_infer(self.values.ravel(), - com._maybe_box_datetimelike) + com.maybe_box_datetimelike) .reshape(self.values.shape).tolist()))) elif orient.lower().startswith('s'): - return into_c((k, com._maybe_box_datetimelike(v)) + return into_c((k, com.maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): - return [into_c((k, com._maybe_box_datetimelike(v)) + return [into_c((k, com.maybe_box_datetimelike(v)) for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): @@ -2614,7 +2614,7 @@ def _get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) - return com._maybe_box_datetimelike(series._values[index]) + return com.maybe_box_datetimelike(series._values[index]) series = self._get_item_cache(col) engine = self.index._engine @@ -2746,7 +2746,7 @@ def _ixs(self, i, axis=0): return result def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) # shortcut if the key is in columns try: @@ -3183,7 +3183,7 @@ def is_dtype_instance_mapper(idx, dtype): exclude_these.iloc[idx] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these - return self.loc[com._get_info_slice(self, dtype_indexer)] + return self.loc[com.get_info_slice(self, dtype_indexer)] def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] @@ -3198,7 +3198,7 @@ def _box_col_values(self, values, items): return klass(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) @@ -3403,12 +3403,12 @@ def assign(self, **kwargs): # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): - data[k] = com._apply_if_callable(v, data) + data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): - results[k] = com._apply_if_callable(v, data) + results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) @@ -3489,7 +3489,7 @@ def reindexer(value): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) else: - value = com._asarray_tuplesafe(value) + value = com.asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T elif isinstance(value, Index): @@ -7827,7 +7827,7 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) - values = com._values_from_object(s) + values = com.values_from_object(s) aligned_values.append(algorithms.take_1d(values, indexer)) values = np.vstack(aligned_values) @@ -7915,7 +7915,7 @@ def _homogenize(data, index, dtype=None): oindex = index.astype('O') if isinstance(index, (DatetimeIndex, TimedeltaIndex)): - v = com._dict_compat(v) + v = com.dict_compat(v) else: v = dict(v) v = lib.fast_multiget(v, oindex.values, default=np.nan) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fa4572dd7b979..fd3d2a5802413 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1090,7 +1090,7 @@ def rename(self, *args, **kwargs): raise TypeError('rename() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) - if com._count_not_none(*axes.values()) == 0: + if com.count_not_none(*axes.values()) == 0: raise TypeError('must pass an index to rename') # renamer function if passed a dict @@ -1265,7 +1265,7 @@ def _indexed_same(self, other): for a in self._AXIS_ORDERS) def __neg__(self): - values = com._values_from_object(self) + values = com.values_from_object(self) if is_bool_dtype(values): arr = operator.inv(values) elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) @@ -1277,7 +1277,7 @@ def __neg__(self): return self.__array_wrap__(arr) def __pos__(self): - values = com._values_from_object(self) + values = com.values_from_object(self) if (is_bool_dtype(values) or is_period_arraylike(values)): arr = values elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) @@ -1290,7 +1290,7 @@ def __pos__(self): def __invert__(self): try: - arr = operator.inv(com._values_from_object(self)) + arr = operator.inv(com.values_from_object(self)) return self.__array_wrap__(arr) except Exception: @@ -1587,7 +1587,7 @@ def _drop_labels_or_levels(self, keys, axis=0): .format(type=type(self))) # Validate keys - keys = com._maybe_make_list(keys) + keys = com.maybe_make_list(keys) invalid_keys = [k for k in keys if not self._is_label_or_level_reference(k, axis=axis)] @@ -1753,7 +1753,7 @@ def __round__(self, decimals=0): # Array Interface def __array__(self, dtype=None): - return com._values_from_object(self) + return com.values_from_object(self) def __array_wrap__(self, result, context=None): d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) @@ -3188,7 +3188,7 @@ def xs(self, key, axis=0, level=None, drop_level=True): # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) if not is_list_like(new_values) or self.ndim == 1: - return com._maybe_box_datetimelike(new_values) + return com.maybe_box_datetimelike(new_values) result = self._constructor_sliced( new_values, index=self.columns, @@ -3328,7 +3328,7 @@ def _drop_axis(self, labels, axis, level=None, errors='raise'): # Case for non-unique axis else: - labels = ensure_object(com._index_labels_to_array(labels)) + labels = ensure_object(com.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') @@ -3893,7 +3893,7 @@ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, def _needs_reindex_multi(self, axes, method, level): """Check if we do need a multi reindex.""" - return ((com._count_not_none(*axes.values()) == self._AXIS_LEN) and + return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type) def _reindex_multi(self, axes, copy, fill_value): @@ -4067,7 +4067,7 @@ def filter(self, items=None, like=None, regex=None, axis=None): """ import re - nkw = com._count_not_none(items, like, regex) + nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` ' 'are mutually exclusive') @@ -4313,7 +4313,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, axis_length = self.shape[axis] # Process random_state argument - rs = com._random_state(random_state) + rs = com.random_state(random_state) # Check weights for compliance if weights is not None: @@ -7745,7 +7745,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, inplace = validate_bool_kwarg(inplace, 'inplace') # align the cond to same shape as myself - cond = com._apply_if_callable(cond, self) + cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): cond, _ = cond.align(self, join='right', broadcast_axis=1) else: @@ -7815,7 +7815,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, if try_quick: try: - new_other = com._values_from_object(self) + new_other = com.values_from_object(self) new_other = new_other.copy() new_other[icond] = other other = new_other @@ -8012,7 +8012,7 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, else: errors = 'ignore' - other = com._apply_if_callable(other, self) + other = com.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level, errors=errors, try_cast=try_cast) @@ -8034,7 +8034,7 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors = 'ignore' inplace = validate_bool_kwarg(inplace, 'inplace') - cond = com._apply_if_callable(cond, self) + cond = com.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): @@ -8982,7 +8982,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, **kwargs)) - 1) rs = rs.reindex_like(data) if freq is None: - mask = isna(com._values_from_object(data)) + mask = isna(com.values_from_object(data)) np.putmask(rs.values, mask, np.nan) return rs @@ -9913,7 +9913,7 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs): else: axis = self._get_axis_number(axis) - y = com._values_from_object(self).copy() + y = com.values_from_object(self).copy() if (skipna and issubclass(y.dtype.type, (np.datetime64, np.timedelta64))): diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 169416d6f8211..fdededc325b03 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -819,7 +819,7 @@ def _aggregate_multiple_funcs(self, arg, _level): columns.append(f) else: # protect against callables without names - columns.append(com._get_callable_name(f)) + columns.append(com.get_callable_name(f)) arg = lzip(columns, arg) results = {} diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 3070fa0e63c88..36cdfbd3b3479 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -288,7 +288,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.grouper = self.obj[self.name] elif isinstance(self.grouper, (list, tuple)): - self.grouper = com._asarray_tuplesafe(self.grouper) + self.grouper = com.asarray_tuplesafe(self.grouper) # a passed Categorical elif is_categorical_dtype(self.grouper): @@ -533,7 +533,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, if not any_callable and not all_in_columns_index and \ not any_arraylike and not any_groupers and \ match_axis_length and level is None: - keys = [com._asarray_tuplesafe(keys)] + keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index f2c55a56b119d..38ac144ac6c95 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -175,7 +175,7 @@ def apply(self, f, data, axis=0): group_keys = self._get_group_keys() # oh boy - f_name = com._get_callable_name(f) + f_name = com.get_callable_name(f) if (f_name not in base.plotting_methods and hasattr(splitter, 'fast_apply') and axis == 0): try: @@ -209,7 +209,7 @@ def indices(self): return self.groupings[0].indices else: label_list = [ping.labels for ping in self.groupings] - keys = [com._values_from_object(ping.group_index) + keys = [com.values_from_object(ping.group_index) for ping in self.groupings] return get_indexer_dict(label_list, keys) diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index b409d695a73e8..3f3448d104165 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -55,7 +55,7 @@ def _get_objs_combined_axis(objs, intersect=False, axis=0, sort=True): def _get_combined_index(indexes, intersect=False, sort=False): # TODO: handle index names! - indexes = com._get_distinct_objs(indexes) + indexes = com.get_distinct_objs(indexes) if len(indexes) == 0: index = Index([]) elif len(indexes) == 1: @@ -130,7 +130,7 @@ def _sanitize_and_check(indexes): if list in kinds: if len(kinds) > 1: - indexes = [Index(com._try_sort(x)) + indexes = [Index(com.try_sort(x)) if not isinstance(x, Index) else x for x in indexes] kinds.remove(list) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index cf4b4fe6bc084..20926ea5163af 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -381,9 +381,9 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): subarr = data.astype('object') else: - subarr = com._asarray_tuplesafe(data, dtype=object) + subarr = com.asarray_tuplesafe(data, dtype=object) - # _asarray_tuplesafe does not always copy underlying data, + # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens if copy: subarr = subarr.copy() @@ -449,7 +449,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, return MultiIndex.from_tuples( data, names=name or kwargs.get('names')) # other iterable of some kind - subarr = com._asarray_tuplesafe(data, dtype=object) + subarr = com.asarray_tuplesafe(data, dtype=object) return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs) """ @@ -1706,7 +1706,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = com._asarray_tuplesafe(keyarr) + keyarr = com.asarray_tuplesafe(keyarr) return keyarr _index_shared_docs['_convert_index_indexer'] = """ @@ -2001,7 +2001,7 @@ def __getitem__(self, key): if com.is_bool_indexer(key): key = np.asarray(key) - key = com._values_from_object(key) + key = com.values_from_object(key) result = getitem(key) if not is_scalar(result): return promote(result) @@ -2367,8 +2367,8 @@ def equals(self, other): return other.equals(self) try: - return array_equivalent(com._values_from_object(self), - com._values_from_object(other)) + return array_equivalent(com.values_from_object(self), + com.values_from_object(other)) except Exception: return False @@ -3072,8 +3072,8 @@ def get_value(self, series, key): elif is_integer(key): return s[key] - s = com._values_from_object(series) - k = com._values_from_object(key) + s = com.values_from_object(series) + k = com.values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') try: @@ -3106,8 +3106,8 @@ def set_value(self, arr, key, value): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - self._engine.set_value(com._values_from_object(arr), - com._values_from_object(key), value) + self._engine.set_value(com.values_from_object(arr), + com.values_from_object(key), value) def _get_level_values(self, level): """ @@ -4432,7 +4432,7 @@ def drop(self, labels, errors='raise'): If not all of the labels are found in the selected axis """ arr_dtype = 'object' if self.dtype == 'object' else None - labels = com._index_labels_to_array(labels, dtype=arr_dtype) + labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): @@ -4725,7 +4725,7 @@ def _validate_for_numeric_binop(self, other, op): if len(self) != len(other): raise ValueError("cannot evaluate a numeric op with " "unequal lengths") - other = com._values_from_object(other) + other = com.values_from_object(other) if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index a03e478f81caf..d76a7ef00f625 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -440,7 +440,7 @@ def get_value(self, series, key): know what you're doing """ try: - k = com._values_from_object(key) + k = com.values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') indexer = self.get_loc(k) return series.iloc[indexer] @@ -629,7 +629,7 @@ def _convert_list_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = com._asarray_tuplesafe(keyarr) + keyarr = com.asarray_tuplesafe(keyarr) if self.categories._defer_to_indexing: return keyarr diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6ed752d3a213d..933e7406b5af3 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -377,7 +377,7 @@ def __new__(cls, data=None, @classmethod def _generate_range(cls, start, end, periods, name, freq, tz=None, normalize=False, ambiguous='raise', closed=None): - if com._count_not_none(start, end, periods, freq) != 3: + if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' 'and freq, exactly three must be specified') @@ -1276,8 +1276,8 @@ def get_value(self, series, key): return series.take(locs) try: - return com._maybe_box(self, Index.get_value(self, series, key), - series, key) + return com.maybe_box(self, Index.get_value(self, series, key), + series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -1296,9 +1296,9 @@ def get_value_maybe_box(self, series, key): key = Timestamp(key, tz=self.tz) elif not isinstance(key, Timestamp): key = Timestamp(key) - values = self._engine.get_value(com._values_from_object(series), + values = self._engine.get_value(com.values_from_object(series), key, tz=self.tz) - return com._maybe_box(self, values, series, key) + return com.maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index e92f980caf3dc..246bd3d541b72 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1120,14 +1120,14 @@ def interval_range(start=None, end=None, periods=None, freq=None, -------- IntervalIndex : an Index of intervals that are all closed on the same side. """ - start = com._maybe_box_datetimelike(start) - end = com._maybe_box_datetimelike(end) + start = com.maybe_box_datetimelike(start) + end = com.maybe_box_datetimelike(end) endpoint = start if start is not None else end if freq is None and com._any_none(periods, start, end): freq = 1 if is_number(endpoint) else 'D' - if com._count_not_none(start, end, periods, freq) != 3: + if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, and ' 'freq, exactly three must be specified') diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 0d4ceb2783bad..7d24a901382bb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -950,8 +950,8 @@ def get_value(self, series, key): from pandas.core.indexing import maybe_droplevels # Label-based - s = com._values_from_object(series) - k = com._values_from_object(key) + s = com.values_from_object(series) + k = com.values_from_object(key) def _try_mi(k): # TODO: what if a level contains tuples?? @@ -1691,7 +1691,7 @@ def drop(self, labels, level=None, errors='raise'): try: if not isinstance(labels, (np.ndarray, Index)): - labels = com._index_labels_to_array(labels) + labels = com.index_labels_to_array(labels) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): @@ -1730,7 +1730,7 @@ def drop(self, labels, level=None, errors='raise'): return self.delete(inds) def _drop_from_level(self, labels, level): - labels = com._index_labels_to_array(labels) + labels = com.index_labels_to_array(labels) i = self._get_level_number(level) index = self.levels[i] values = index.get_indexer(labels) @@ -2628,7 +2628,7 @@ def equals(self, other): return False if not isinstance(other, MultiIndex): - other_vals = com._values_from_object(ensure_index(other)) + other_vals = com.values_from_object(ensure_index(other)) return array_equivalent(self._ndarray_values, other_vals) if self.nlevels != other.nlevels: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 1fe0c8fa289e6..ea392d0b93377 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -249,9 +249,9 @@ def _convert_arr_indexer(self, keyarr): # Cast the indexer to uint64 if possible so # that the values returned from indexing are # also uint64. - keyarr = com._asarray_tuplesafe(keyarr) + keyarr = com.asarray_tuplesafe(keyarr) if is_integer_dtype(keyarr): - return com._asarray_tuplesafe(keyarr, dtype=np.uint64) + return com.asarray_tuplesafe(keyarr, dtype=np.uint64) return keyarr @Appender(_index_shared_docs['_convert_index_indexer']) @@ -354,9 +354,9 @@ def get_value(self, series, key): if not is_scalar(key): raise InvalidIndexError - k = com._values_from_object(key) + k = com.values_from_object(key) loc = self.get_loc(k) - new_values = com._values_from_object(series)[loc] + new_values = com.values_from_object(series)[loc] return new_values diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index ab1b3001e23e0..b315e3ec20830 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -524,11 +524,11 @@ def get_value(self, series, key): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - s = com._values_from_object(series) + s = com.values_from_object(series) try: - return com._maybe_box(self, - super(PeriodIndex, self).get_value(s, key), - series, key) + return com.maybe_box(self, + super(PeriodIndex, self).get_value(s, key), + series, key) except (KeyError, IndexError): try: asdt, parsed, reso = parse_time_string(key, self.freq) @@ -551,16 +551,16 @@ def get_value(self, series, key): return series[key] elif grp == freqn: key = Period(asdt, freq=self.freq).ordinal - return com._maybe_box(self, self._engine.get_value(s, key), - series, key) + return com.maybe_box(self, self._engine.get_value(s, key), + series, key) else: raise KeyError(key) except TypeError: pass key = Period(key, self.freq).ordinal - return com._maybe_box(self, self._engine.get_value(s, key), - series, key) + return com.maybe_box(self, self._engine.get_value(s, key), + series, key) @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): @@ -865,7 +865,7 @@ def period_range(start=None, end=None, periods=None, freq='D', name=None): PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], dtype='period[M]', freq='M') """ - if com._count_not_none(start, end, periods) != 2: + if com.count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, ' 'exactly two must be specified') diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index af34ec8b22824..006758f276f87 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -491,8 +491,8 @@ def get_value(self, series, key): return self.get_value_maybe_box(series, key) try: - return com._maybe_box(self, Index.get_value(self, series, key), - series, key) + return com.maybe_box(self, Index.get_value(self, series, key), + series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -508,8 +508,8 @@ def get_value(self, series, key): def get_value_maybe_box(self, series, key): if not isinstance(key, Timedelta): key = Timedelta(key) - values = self._engine.get_value(com._values_from_object(series), key) - return com._maybe_box(self, values, series, key) + values = self._engine.get_value(com.values_from_object(series), key) + return com.maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e0b6048b2ad64..13c019dea469a 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -112,7 +112,7 @@ def __iter__(self): def __getitem__(self, key): if type(key) is tuple: - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) try: values = self.obj._get_value(*key) @@ -126,7 +126,7 @@ def __getitem__(self, key): # we by definition only have the 0th axis axis = self.axis or 0 - key = com._apply_if_callable(key, self.obj) + key = com.apply_if_callable(key, self.obj) return self._getitem_axis(key, axis=axis) def _get_label(self, label, axis=None): @@ -186,10 +186,10 @@ def _get_setitem_indexer(self, key): def __setitem__(self, key, value): if isinstance(key, tuple): - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: - key = com._apply_if_callable(key, self.obj) + key = com.apply_if_callable(key, self.obj) indexer = self._get_setitem_indexer(key) self._setitem_with_indexer(indexer, value) @@ -1474,7 +1474,7 @@ def _convert_for_reindex(self, key, axis=None): keyarr = labels._convert_index_indexer(key) else: # asarray can be unsafe, NumPy strings are weird - keyarr = com._asarray_tuplesafe(key) + keyarr = com.asarray_tuplesafe(key) if is_integer_dtype(keyarr): # Cast the indexer to uint64 if possible so @@ -1494,7 +1494,7 @@ class _LocationIndexer(_NDFrameIndexer): def __getitem__(self, key): if type(key) is tuple: - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) try: if self._is_scalar_access(key): @@ -1506,7 +1506,7 @@ def __getitem__(self, key): # we by definition only have the 0th axis axis = self.axis or 0 - maybe_callable = com._apply_if_callable(key, self.obj) + maybe_callable = com.apply_if_callable(key, self.obj) return self._getitem_axis(maybe_callable, axis=axis) def _is_scalar_access(self, key): @@ -2282,11 +2282,11 @@ def __getitem__(self, key): def __setitem__(self, key, value): if isinstance(key, tuple): - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: # scalar callable may return tuple - key = com._apply_if_callable(key, self.obj) + key = com.apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = self._tuplify(key) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index de31c6ac11c3f..32fd70bcf654d 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -205,7 +205,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, if necessary copy and mask using the specified fill_value copy = True will force the copy """ - values = com._values_from_object(values) + values = com.values_from_object(values) if isfinite: mask = _isfinite(values) else: @@ -440,7 +440,7 @@ def nanstd(values, axis=None, skipna=True, ddof=1): @bottleneck_switch(ddof=1) def nanvar(values, axis=None, skipna=True, ddof=1): - values = com._values_from_object(values) + values = com.values_from_object(values) dtype = values.dtype mask = isna(values) if is_any_int_dtype(values): @@ -549,7 +549,7 @@ def nanskew(values, axis=None, skipna=True): """ - values = com._values_from_object(values) + values = com.values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') @@ -607,7 +607,7 @@ def nankurt(values, axis=None, skipna=True): central moment. """ - values = com._values_from_object(values) + values = com.values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') diff --git a/pandas/core/ops.py b/pandas/core/ops.py index a8c1b954a61b7..c65d2dcdc478c 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -89,7 +89,7 @@ def _maybe_match_name(a, b): See also -------- - pandas.core.common._consensus_name_attr + pandas.core.common.consensus_name_attr """ a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') @@ -1111,7 +1111,7 @@ def na_op(x, y): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) mask = notna(x) & notna(y) - result[mask] = op(x[mask], com._values_from_object(y[mask])) + result[mask] = op(x[mask], com.values_from_object(y[mask])) else: assert isinstance(x, np.ndarray) result = np.empty(len(x), dtype=x.dtype) @@ -1407,7 +1407,7 @@ def wrapper(self, other, axis=None): .format(typ=type(other))) # always return a full value series here - res_values = com._values_from_object(res) + res_values = com.values_from_object(res) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 16ade3fae90a1..4ebac55eea137 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -204,7 +204,7 @@ def _init_dict(self, data, axes, dtype=None): for k, v in compat.iteritems(data) if k in haxis) else: - keys = com._dict_keys_to_ordered_list(data) + keys = com.dict_keys_to_ordered_list(data) haxis = Index(keys) for k, v in compat.iteritems(data): @@ -282,7 +282,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): return cls(**d) def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) @@ -596,7 +596,7 @@ def _box_item_values(self, key, values): return self._constructor_sliced(values, **d) def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) shape = tuple(self.shape) if isinstance(value, self._constructor_sliced): value = value.reindex( diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 1d6105cb68bf1..1c602a0af1ec1 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -385,7 +385,7 @@ def get_result(self): # stack blocks if self.axis == 0: - name = com._consensus_name_attr(self.objs) + name = com.consensus_name_attr(self.objs) mgr = self.objs[0]._data.concat([x._data for x in self.objs], self.new_axes) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index caaeb1bad2358..3989c70c9d13f 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -500,9 +500,9 @@ def __init__(self, left, right, how='inner', on=None, self.how = how self.axis = axis - self.on = com._maybe_make_list(on) - self.left_on = com._maybe_make_list(left_on) - self.right_on = com._maybe_make_list(right_on) + self.on = com.maybe_make_list(on) + self.left_on = com.maybe_make_list(left_on) + self.right_on = com.maybe_make_list(right_on) self.copy = copy self.suffixes = suffixes @@ -1552,8 +1552,8 @@ def _factorize_keys(lk, rk, sort=True): rk = ensure_int64(rk) elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk): klass = libhashtable.Int64Factorizer - lk = ensure_int64(com._values_from_object(lk)) - rk = ensure_int64(com._values_from_object(rk)) + lk = ensure_int64(com.values_from_object(lk)) + rk = ensure_int64(com.values_from_object(rk)) else: klass = libhashtable.Factorizer lk = ensure_object(lk) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index da5246d389817..0d1caa3d57d73 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -470,8 +470,8 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, crosstab : DataFrame """ - index = com._maybe_make_list(index) - columns = com._maybe_make_list(columns) + index = com.maybe_make_list(index) + columns = com.maybe_make_list(columns) rownames = _get_names(index, rownames, prefix='row') colnames = _get_names(columns, colnames, prefix='col') diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py index 2fe82e5d6bc57..e83bcf800e949 100644 --- a/pandas/core/reshape/util.py +++ b/pandas/core/reshape/util.py @@ -60,7 +60,7 @@ def cartesian_product(X): # if any factor is empty, the cartesian product is empty b = np.zeros_like(cumprodX) - return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]), + return [np.tile(np.repeat(np.asarray(com.values_from_object(x)), b[i]), np.product(a[i])) for i, x in enumerate(X)] diff --git a/pandas/core/series.py b/pandas/core/series.py index 3571e908fc6a7..03fc9701de1fc 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -766,7 +766,7 @@ def _slice(self, slobj, axis=0, kind=None): return self._get_values(slobj) def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) try: result = self.index.get_value(self, key) @@ -884,7 +884,7 @@ def _get_values(self, indexer): return self._values[indexer] def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) def setitem(key, value): try: @@ -990,7 +990,7 @@ def _set_labels(self, key, value): if isinstance(key, Index): key = key.values else: - key = com._asarray_tuplesafe(key) + key = com.asarray_tuplesafe(key) indexer = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): @@ -1042,7 +1042,7 @@ def get_value(self, label, takeable=False): def _get_value(self, label, takeable=False): if takeable is True: - return com._maybe_box_datetimelike(self._values[label]) + return com.maybe_box_datetimelike(self._values[label]) return self.index.get_value(self._values, label) _get_value.__doc__ = get_value.__doc__ @@ -1418,7 +1418,7 @@ def count(self, level=None): nobs : int or Series (if level specified) """ if level is None: - return notna(com._values_from_object(self)).sum() + return notna(com.values_from_object(self)).sum() if isinstance(level, compat.string_types): level = self.index._get_level_number(level) @@ -1722,7 +1722,7 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs): nan """ skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) - i = nanops.nanargmin(com._values_from_object(self), skipna=skipna) + i = nanops.nanargmin(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1792,7 +1792,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs): nan """ skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) - i = nanops.nanargmax(com._values_from_object(self), skipna=skipna) + i = nanops.nanargmax(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1835,7 +1835,7 @@ def round(self, decimals=0, *args, **kwargs): """ nv.validate_round(args, kwargs) - result = com._values_from_object(self).round(decimals) + result = com.values_from_object(self).round(decimals) result = self._constructor(result, index=self.index).__finalize__(self) return result @@ -2003,7 +2003,7 @@ def diff(self, periods=1): 5 NaN dtype: float64 """ - result = algorithms.diff(com._values_from_object(self), periods) + result = algorithms.diff(com.values_from_object(self), periods) return self._constructor(result, index=self.index).__finalize__(self) def autocorr(self, lag=1): @@ -4190,7 +4190,7 @@ def _try_cast(arr, take_fast_path): if isinstance(data, np.ndarray): raise Exception('Data must be 1-dimensional') else: - subarr = com._asarray_tuplesafe(data, dtype=dtype) + subarr = com.asarray_tuplesafe(data, dtype=dtype) # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index f7071061d07ab..5cb9f4744cc58 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -143,7 +143,7 @@ def _init_dict(self, data, index, columns, dtype=None): columns = ensure_index(columns) data = {k: v for k, v in compat.iteritems(data) if k in columns} else: - keys = com._dict_keys_to_ordered_list(data) + keys = com.dict_keys_to_ordered_list(data) columns = Index(keys) if index is None: diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 96ee5b7954f45..1a92a27bfb390 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -375,7 +375,7 @@ def __getitem__(self, key): # Could not hash item, must be array-like? pass - key = com._values_from_object(key) + key = com.values_from_object(key) if self.index.nlevels > 1 and isinstance(key, tuple): # to handle MultiIndex labels key = self.index.get_loc(key) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index a3e091d43f261..6349af4d2e0ac 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -55,7 +55,7 @@ def _get_array_list(arr, others): """ from pandas.core.series import Series - if len(others) and isinstance(com._values_from_object(others)[0], + if len(others) and isinstance(com.values_from_object(others)[0], (list, np.ndarray, Series)): arrays = [arr] + list(others) else: @@ -702,7 +702,7 @@ def rep(x, r): return compat.text_type.__mul__(x, r) repeats = np.asarray(repeats, dtype=object) - result = libops.vec_binop(com._values_from_object(arr), repeats, rep) + result = libops.vec_binop(com.values_from_object(arr), repeats, rep) return result diff --git a/pandas/core/window.py b/pandas/core/window.py index 6b6f27bcb3863..f3b4aaa74ec6b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -625,7 +625,7 @@ def _prep_window(self, **kwargs): window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): - return com._asarray_tuplesafe(window).astype(float) + return com.asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig @@ -2467,7 +2467,7 @@ def dataframe_from_int_dict(data, frame_template): def _get_center_of_mass(comass, span, halflife, alpha): - valid_count = com._count_not_none(comass, span, halflife, alpha) + valid_count = com.count_not_none(comass, span, halflife, alpha) if valid_count > 1: raise ValueError("comass, span, halflife, and alpha " "are mutually exclusive") diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py index 36eac8dd57fbd..45d50ea3fa073 100644 --- a/pandas/io/formats/console.py +++ b/pandas/io/formats/console.py @@ -49,7 +49,6 @@ def get_console_size(): Returns (None,None) in non-interactive session. """ from pandas import get_option - from pandas.core import common as com display_width = get_option('display.width') # deprecated. @@ -65,8 +64,8 @@ def get_console_size(): # should use Auto-Detection, But only in interactive shell-terminal. # Simple. yeah. - if com.in_interactive_session(): - if com.in_ipython_frontend(): + if in_interactive_session(): + if in_ipython_frontend(): # sane defaults for interactive non-shell terminal # match default for width,height in config_init from pandas.core.config import get_default_val @@ -82,3 +81,75 @@ def get_console_size(): # and we're in a script (non-inter), this will return (None,None) # caller needs to deal. return (display_width or terminal_width, display_height or terminal_height) + + +# ---------------------------------------------------------------------- +# Detect our environment + +def in_interactive_session(): + """ check if we're running in an interactive shell + + returns True if running under python/ipython interactive shell + """ + from pandas import get_option + + def check_main(): + import __main__ as main + return (not hasattr(main, '__file__') or + get_option('mode.sim_interactive')) + + try: + return __IPYTHON__ or check_main() # noqa + except: + return check_main() + + +def in_qtconsole(): + """ + check if we're inside an IPython qtconsole + + .. deprecated:: 0.14.1 + This is no longer needed, or working, in IPython 3 and above. + """ + try: + ip = get_ipython() # noqa + front_end = ( + ip.config.get('KernelApp', {}).get('parent_appname', "") or + ip.config.get('IPKernelApp', {}).get('parent_appname', "")) + if 'qtconsole' in front_end.lower(): + return True + except: + return False + return False + + +def in_ipnb(): + """ + check if we're inside an IPython Notebook + + .. deprecated:: 0.14.1 + This is no longer needed, or working, in IPython 3 and above. + """ + try: + ip = get_ipython() # noqa + front_end = ( + ip.config.get('KernelApp', {}).get('parent_appname', "") or + ip.config.get('IPKernelApp', {}).get('parent_appname', "")) + if 'notebook' in front_end.lower(): + return True + except: + return False + return False + + +def in_ipython_frontend(): + """ + check if we're inside an an IPython zmq frontend + """ + try: + ip = get_ipython() # noqa + return 'zmq' in str(type(ip)).lower() + except: + pass + + return False diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f2d6fe01e0573..c57b1c3e211f6 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3935,7 +3935,7 @@ def read(self, where=None, columns=None, **kwargs): tuple_index = long_index.values unique_tuples = unique(tuple_index) - unique_tuples = com._asarray_tuplesafe(unique_tuples) + unique_tuples = com.asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) indexer = ensure_platform_int(indexer) diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index beebf84b8a033..3bb0b98851234 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -324,7 +324,7 @@ def try_parse(values): if isinstance(values, Index): values = values.values if not isinstance(values, np.ndarray): - values = com._asarray_tuplesafe(values) + values = com.asarray_tuplesafe(values) if is_integer_dtype(values) or is_float_dtype(values): return values diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 06020bdfd5d1d..7ce4c23f81ad6 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -233,7 +233,7 @@ def _iter_data(self, data=None, keep_index=False, fillna=None): # TODO: unused? # if self.sort_columns: - # columns = com._try_sort(data.columns) + # columns = com.try_sort(data.columns) # else: # columns = data.columns @@ -2428,7 +2428,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, layout=layout) _axes = _flatten(axes) - for i, col in enumerate(com._try_sort(data.columns)): + for i, col in enumerate(com.try_sort(data.columns)): ax = _axes[i] ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 426b29a8840f4..c72e092c73aa2 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -49,7 +49,7 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default', def random_color(column): """ Returns a random color represented as a list of length 3""" # GH17525 use common._random_state to avoid resetting the seed - rs = com._random_state(column) + rs = com.random_state(column) return rs.rand(3).tolist() colors = lmap(random_color, lrange(num_colors)) diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 07ba0b681418e..118b05d16ab09 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -10,7 +10,6 @@ from pandas.core.dtypes.common import is_bool, is_list_like, is_scalar import pandas as pd -from pandas.core import common as com from pandas.errors import PerformanceWarning from pandas import DataFrame, Series, Panel, date_range from pandas.util.testing import makeCustomDataframe as mkdf @@ -94,7 +93,7 @@ def _is_py3_complex_incompat(result, expected): np.isnan(result)) -_good_arith_ops = com.difference(_arith_ops_syms, _special_case_arith_ops_syms) +_good_arith_ops = set(_arith_ops_syms).difference(_special_case_arith_ops_syms) @td.skip_if_no_ne diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 66577d738dd28..8b2b74802556d 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1260,17 +1260,17 @@ def test_groupby_sort_multi(): 'd': np.random.randn(3)}) tups = lmap(tuple, df[['a', 'b', 'c']].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) result = df.groupby(['a', 'b', 'c'], sort=True).sum() tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]]) tups = lmap(tuple, df[['c', 'a', 'b']].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) result = df.groupby(['c', 'a', 'b'], sort=True).sum() tm.assert_numpy_array_equal(result.index.values, tups) tups = lmap(tuple, df[['b', 'c', 'a']].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) result = df.groupby(['b', 'c', 'a'], sort=True).sum() tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]]) @@ -1282,7 +1282,7 @@ def test_groupby_sort_multi(): def _check_groupby(df, result, keys, field, f=lambda x: x.sum()): tups = lmap(tuple, df[keys].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) expected = f(df.groupby(tups)[field]) for k, v in compat.iteritems(expected): assert (result[k] == v) diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py index 8c9d0459eff55..208d498180692 100644 --- a/pandas/tests/indexes/interval/test_construction.py +++ b/pandas/tests/indexes/interval/test_construction.py @@ -253,7 +253,7 @@ def get_kwargs_from_breaks(self, breaks, closed='right'): return {'data': tuples} elif is_categorical_dtype(breaks): return {'data': breaks._constructor(tuples)} - return {'data': com._asarray_tuplesafe(tuples)} + return {'data': com.asarray_tuplesafe(tuples)} def test_constructor_errors(self): # non-tuple diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 0dc5970c22803..e179286e839db 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -947,7 +947,7 @@ def test_to_tuples(self, tuples): # GH 18756 idx = IntervalIndex.from_tuples(tuples) result = idx.to_tuples() - expected = Index(com._asarray_tuplesafe(tuples)) + expected = Index(com.asarray_tuplesafe(tuples)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('tuples', [ @@ -963,7 +963,7 @@ def test_to_tuples_na(self, tuples, na_tuple): result = idx.to_tuples(na_tuple=na_tuple) # check the non-NA portion - expected_notna = Index(com._asarray_tuplesafe(tuples[:-1])) + expected_notna = Index(com.asarray_tuplesafe(tuples[:-1])) result_notna = result[:-1] tm.assert_index_equal(result_notna, expected_notna) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 25e64aa82cc36..62b37a35249d0 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -217,8 +217,8 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level): tm.assert_numpy_array_equal(result[0], np.array(expected_label, dtype=np.intp)) - expected_level_array = com._asarray_tuplesafe(expected_level, - dtype=object) + expected_level_array = com.asarray_tuplesafe(expected_level, + dtype=object) tm.assert_numpy_array_equal(result[1], expected_level_array) def test_complex_sorting(self): diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 61f838eeeeb30..e1c9202189972 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -8,24 +8,14 @@ import numpy as np from pandas import Series, DataFrame, Timestamp -from pandas.compat import range, lmap import pandas.core.common as com from pandas.core import ops from pandas.io.common import _get_handle import pandas.util.testing as tm -def test_mut_exclusive(): - msg = "mutually exclusive arguments: '[ab]' and '[ab]'" - with tm.assert_raises_regex(TypeError, msg): - com._mut_exclusive(a=1, b=2) - assert com._mut_exclusive(a=1, b=None) == 1 - assert com._mut_exclusive(major=None, major_axis=None) is None - assert com._mut_exclusive(a=None, b=2) == 2 - - def test_get_callable_name(): - getname = com._get_callable_name + getname = com.get_callable_name def fn(x): return x @@ -58,112 +48,25 @@ def test_all_not_none(): assert (not com._all_not_none(None, None, None, None)) -def test_iterpairs(): - data = [1, 2, 3, 4] - expected = [(1, 2), (2, 3), (3, 4)] - - result = list(com.iterpairs(data)) - - assert (result == expected) - - -def test_split_ranges(): - def _bin(x, width): - "return int(x) as a base2 string of given width" - return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1)) - - def test_locs(mask): - nfalse = sum(np.array(mask) == 0) - - remaining = 0 - for s, e in com.split_ranges(mask): - remaining += e - s - - assert 0 not in mask[s:e] - - # make sure the total items covered by the ranges are a complete cover - assert remaining + nfalse == len(mask) - - # exhaustively test all possible mask sequences of length 8 - ncols = 8 - for i in range(2 ** ncols): - cols = lmap(int, list(_bin(i, ncols))) # count up in base2 - mask = [cols[i] == 1 for i in range(len(cols))] - test_locs(mask) - - # base cases - test_locs([]) - test_locs([0]) - test_locs([1]) - - -def test_map_indices_py(): - data = [4, 3, 2, 1] - expected = {4: 0, 3: 1, 2: 2, 1: 3} - - result = com.map_indices_py(data) - - assert (result == expected) - - -def test_union(): - a = [1, 2, 3] - b = [4, 5, 6] - - union = sorted(com.union(a, b)) - - assert ((a + b) == union) - - -def test_difference(): - a = [1, 2, 3] - b = [1, 2, 3, 4, 5, 6] - - inter = sorted(com.difference(b, a)) - - assert ([4, 5, 6] == inter) - - -def test_intersection(): - a = [1, 2, 3] - b = [1, 2, 3, 4, 5, 6] - - inter = sorted(com.intersection(a, b)) - - assert (a == inter) - - -def test_groupby(): - values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3'] - expected = {'f': ['foo', 'foo3'], - 'b': ['bar', 'baz', 'baz2'], - 'q': ['qux']} - - grouped = com.groupby(values, lambda x: x[0]) - - for k, v in grouped: - assert v == expected[k] - - def test_random_state(): import numpy.random as npr # Check with seed - state = com._random_state(5) + state = com.random_state(5) assert state.uniform() == npr.RandomState(5).uniform() # Check with random state object state2 = npr.RandomState(10) - assert com._random_state(state2).uniform() == npr.RandomState(10).uniform() + assert com.random_state(state2).uniform() == npr.RandomState(10).uniform() # check with no arg random state - assert com._random_state() is np.random + assert com.random_state() is np.random # Error for floats or strings with pytest.raises(ValueError): - com._random_state('test') + com.random_state('test') with pytest.raises(ValueError): - com._random_state(5.5) + com.random_state(5.5) @pytest.mark.parametrize('left, right, expected', [ @@ -182,9 +85,9 @@ def test_dict_compat(): np.datetime64('2015-03-15'): 2} data_unchanged = {1: 2, 3: 4, 5: 6} expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2} - assert (com._dict_compat(data_datetime64) == expected) - assert (com._dict_compat(expected) == expected) - assert (com._dict_compat(data_unchanged) == data_unchanged) + assert (com.dict_compat(data_datetime64) == expected) + assert (com.dict_compat(expected) == expected) + assert (com.dict_compat(data_unchanged) == data_unchanged) def test_standardize_mapping(): diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index d0350ba252329..98026f6d4cf0e 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -50,7 +50,7 @@ def test_int64_overflow(self): tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' ]].values)) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) expected = df.groupby(tups).sum()['values']
Moves a few console-checking functions to `io.formats.console`. A bunch of core.common functions were never used outside of tests, got rid of em. The ones I left alone were _any_not_none, _all_not_none etc, as I'm inclined to think these should be removed in favor of python builtins.
https://api.github.com/repos/pandas-dev/pandas/pulls/22001
2018-07-20T19:05:58Z
2018-07-24T22:10:06Z
2018-07-24T22:10:06Z
2020-04-05T17:40:40Z
Sparse get dummies perf
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index 9044b080c45f9..07634811370c7 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -1,7 +1,9 @@ +import string from itertools import product import numpy as np from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long +import pandas as pd from .pandas_vb_common import setup # noqa @@ -132,3 +134,19 @@ def setup(self): def time_pivot_table(self): self.df.pivot_table(index='key1', columns=['key2', 'key3']) + + +class GetDummies(object): + goal_time = 0.2 + + def setup(self): + categories = list(string.ascii_letters[:12]) + s = pd.Series(np.random.choice(categories, size=1_000_000), + dtype=pd.api.types.CategoricalDtype(categories)) + self.s = s + + def time_get_dummies_1d(self): + pd.get_dummies(self.s, sparse=False) + + def time_get_dummies_1d_sparse(self): + pd.get_dummies(self.s, sparse=True) diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9e3f7ec73f852..fc51ff2df001a 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -343,7 +343,7 @@ Performance Improvements - Improved performance of :meth:`HDFStore.groups` (and dependent functions like :meth:`~HDFStore.keys`. (i.e. ``x in store`` checks are much faster) (:issue:`21372`) -- +- Improved the performance of :func:`pandas.get_dummies` with ``sparse=True`` (:issue:`21997`) .. _whatsnew_0240.docs: diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index d5d2e594b8d6b..b63a938112522 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -940,10 +940,11 @@ def get_empty_Frame(data, sparse): sparse_series = {} N = len(data) sp_indices = [[] for _ in range(len(dummy_cols))] - for ndx, code in enumerate(codes): - if code == -1: - # Blank entries if not dummy_na and code == -1, #GH4446 - continue + mask = codes != -1 + codes = codes[mask] + n_idx = np.arange(N)[mask] + + for ndx, code in zip(n_idx, codes): sp_indices[code].append(ndx) if drop_first:
Previously, we did a scalar `elem == -1` for every element in the ndarray. This replaces that check with a vectorized `array == -1`. Running the ASV now. In the meantime, here's a simple timeit on the same problem ```python # HEAD In [3]: %timeit pd.get_dummies(s, sparse=True) 561 ms ± 4.96 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) # Master In [3]: %timeit pd.get_dummies(s, sparse=True) 2.18 s ± 273 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21997
2018-07-20T15:47:29Z
2018-07-20T20:46:13Z
2018-07-20T20:46:13Z
2018-07-20T20:46:17Z
TST: tuple and namedtuple multiindex tests for read_csv
diff --git a/pandas/tests/io/parser/header.py b/pandas/tests/io/parser/header.py index 3fb0650348763..ad3d4592bd599 100644 --- a/pandas/tests/io/parser/header.py +++ b/pandas/tests/io/parser/header.py @@ -5,6 +5,8 @@ during parsing for all of the parsers defined in parsers.py """ +from collections import namedtuple + import pytest import numpy as np @@ -149,6 +151,22 @@ def test_header_multiindex_common_format(self): result = self.read_csv(StringIO(data), header=[0, 1], index_col=0) tm.assert_frame_equal(df, result) + # to_csv, tuples + result = self.read_csv(StringIO(data), skiprows=3, + names=[('a', 'q'), ('a', 'r'), ('a', 's'), + ('b', 't'), ('c', 'u'), ('c', 'v')], + index_col=0) + tm.assert_frame_equal(df, result) + + # to_csv, namedtuples + TestTuple = namedtuple('names', ['first', 'second']) + result = self.read_csv( + StringIO(data), skiprows=3, index_col=0, + names=[TestTuple('a', 'q'), TestTuple('a', 'r'), + TestTuple('a', 's'), TestTuple('b', 't'), + TestTuple('c', 'u'), TestTuple('c', 'v')]) + tm.assert_frame_equal(df, result) + # common data = """,a,a,a,b,c,c ,q,r,s,t,u,v @@ -158,6 +176,22 @@ def test_header_multiindex_common_format(self): result = self.read_csv(StringIO(data), header=[0, 1], index_col=0) tm.assert_frame_equal(df, result) + # common, tuples + result = self.read_csv(StringIO(data), skiprows=2, + names=[('a', 'q'), ('a', 'r'), ('a', 's'), + ('b', 't'), ('c', 'u'), ('c', 'v')], + index_col=0) + tm.assert_frame_equal(df, result) + + # common, namedtuples + TestTuple = namedtuple('names', ['first', 'second']) + result = self.read_csv( + StringIO(data), skiprows=2, index_col=0, + names=[TestTuple('a', 'q'), TestTuple('a', 'r'), + TestTuple('a', 's'), TestTuple('b', 't'), + TestTuple('c', 'u'), TestTuple('c', 'v')]) + tm.assert_frame_equal(df, result) + # common, no index_col data = """a,a,a,b,c,c q,r,s,t,u,v @@ -167,6 +201,22 @@ def test_header_multiindex_common_format(self): result = self.read_csv(StringIO(data), header=[0, 1], index_col=None) tm.assert_frame_equal(df.reset_index(drop=True), result) + # common, no index_col, tuples + result = self.read_csv(StringIO(data), skiprows=2, + names=[('a', 'q'), ('a', 'r'), ('a', 's'), + ('b', 't'), ('c', 'u'), ('c', 'v')], + index_col=None) + tm.assert_frame_equal(df.reset_index(drop=True), result) + + # common, no index_col, namedtuples + TestTuple = namedtuple('names', ['first', 'second']) + result = self.read_csv( + StringIO(data), skiprows=2, index_col=None, + names=[TestTuple('a', 'q'), TestTuple('a', 'r'), + TestTuple('a', 's'), TestTuple('b', 't'), + TestTuple('c', 'u'), TestTuple('c', 'v')]) + tm.assert_frame_equal(df.reset_index(drop=True), result) + # malformed case 1 expected = DataFrame(np.array( [[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
- [x] closes #7589 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/21994
2018-07-20T14:38:58Z
2018-07-25T12:05:57Z
2018-07-25T12:05:56Z
2018-07-25T18:50:13Z
Move FrequencyInferer out of libresolution
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index a418e54e4da9b..ecfc7355dddfc 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -22,6 +22,11 @@ cnp.import_array() cimport util from util cimport numeric, get_nat +from khash cimport (khiter_t, + kh_destroy_int64, kh_put_int64, + kh_init_int64, kh_int64_t, + kh_resize_int64, kh_get_int64) + import missing cdef float64_t FP_ERR = 1e-13 @@ -71,6 +76,42 @@ class NegInfinity(object): __ge__ = lambda self, other: isinstance(other, NegInfinity) +cpdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr): + """ + Efficiently find the unique first-differences of the given array. + + Parameters + ---------- + arr : ndarray[in64_t] + + Returns + ------- + result : ndarray[int64_t] + result is sorted + """ + cdef: + Py_ssize_t i, n = len(arr) + int64_t val + khiter_t k + kh_int64_t *table + int ret = 0 + list uniques = [] + + table = kh_init_int64() + kh_resize_int64(table, 10) + for i in range(n - 1): + val = arr[i + 1] - arr[i] + k = kh_get_int64(table, val) + if k == table.n_buckets: + kh_put_int64(table, val, &ret) + uniques.append(val) + kh_destroy_int64(table) + + result = np.array(uniques, dtype=np.int64) + result.sort() + return result + + @cython.wraparound(False) @cython.boundscheck(False) def is_lexsorted(list list_of_arrays): diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 0835a43411783..4b90c669eebba 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # cython: profile=False +cimport cython from cython cimport Py_ssize_t import numpy as np @@ -10,23 +11,12 @@ cnp.import_array() from util cimport is_string_object, get_nat -from pandas._libs.khash cimport (khiter_t, - kh_destroy_int64, kh_put_int64, - kh_init_int64, kh_int64_t, - kh_resize_int64, kh_get_int64) - from np_datetime cimport npy_datetimestruct, dt64_to_dtstruct from frequencies cimport get_freq_code from timezones cimport (is_utc, is_tzlocal, maybe_get_tz, get_dst_info) -from fields import build_field_sarray -from conversion import tz_convert from conversion cimport tz_convert_utc_to_tzlocal -from ccalendar import MONTH_ALIASES, int_to_weekday from ccalendar cimport get_days_in_month -from timestamps import Timestamp - -from pandas._libs.properties import cache_readonly # ---------------------------------------------------------------------- # Constants @@ -41,13 +31,6 @@ cdef int RESO_MIN = 4 cdef int RESO_HR = 5 cdef int RESO_DAY = 6 -_ONE_MICRO = <int64_t>1000L -_ONE_MILLI = <int64_t>(_ONE_MICRO * 1000) -_ONE_SECOND = <int64_t>(_ONE_MILLI * 1000) -_ONE_MINUTE = <int64_t>(60 * _ONE_SECOND) -_ONE_HOUR = <int64_t>(60 * _ONE_MINUTE) -_ONE_DAY = <int64_t>(24 * _ONE_HOUR) - # ---------------------------------------------------------------------- cpdef resolution(ndarray[int64_t] stamps, tz=None): @@ -331,31 +314,7 @@ class Resolution(object): # ---------------------------------------------------------------------- # Frequency Inference -cdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr): - cdef: - Py_ssize_t i, n = len(arr) - int64_t val - khiter_t k - kh_int64_t *table - int ret = 0 - list uniques = [] - - table = kh_init_int64() - kh_resize_int64(table, 10) - for i in range(n - 1): - val = arr[i + 1] - arr[i] - k = kh_get_int64(table, val) - if k == table.n_buckets: - kh_put_int64(table, val, &ret) - uniques.append(val) - kh_destroy_int64(table) - - result = np.array(uniques, dtype=np.int64) - result.sort() - return result - - -cdef object month_position_check(fields, weekdays): +def month_position_check(fields, weekdays): cdef: int32_t daysinmonth, y, m, d bint calendar_end = True @@ -397,247 +356,3 @@ cdef object month_position_check(fields, weekdays): return 'bs' else: return None - - -cdef inline bint _is_multiple(int64_t us, int64_t mult): - return us % mult == 0 - - -cdef inline str _maybe_add_count(str base, int64_t count): - if count != 1: - return '{count}{base}'.format(count=count, base=base) - else: - return base - - -cdef class _FrequencyInferer(object): - """ - Not sure if I can avoid the state machine here - """ - cdef public: - object index - object values - bint warn - bint is_monotonic - dict _cache - - def __init__(self, index, warn=True): - self.index = index - self.values = np.asarray(index).view('i8') - - # This moves the values, which are implicitly in UTC, to the - # the timezone so they are in local time - if hasattr(index, 'tz'): - if index.tz is not None: - self.values = tz_convert(self.values, 'UTC', index.tz) - - self.warn = warn - - if len(index) < 3: - raise ValueError('Need at least 3 dates to infer frequency') - - self.is_monotonic = (self.index.is_monotonic_increasing or - self.index.is_monotonic_decreasing) - - @cache_readonly - def deltas(self): - return unique_deltas(self.values) - - @cache_readonly - def deltas_asi8(self): - return unique_deltas(self.index.asi8) - - @cache_readonly - def is_unique(self): - return len(self.deltas) == 1 - - @cache_readonly - def is_unique_asi8(self): - return len(self.deltas_asi8) == 1 - - def get_freq(self): - if not self.is_monotonic or not self.index.is_unique: - return None - - delta = self.deltas[0] - if _is_multiple(delta, _ONE_DAY): - return self._infer_daily_rule() - else: - # Business hourly, maybe. 17: one day / 65: one weekend - if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): - return 'BH' - # Possibly intraday frequency. Here we use the - # original .asi8 values as the modified values - # will not work around DST transitions. See #8772 - elif not self.is_unique_asi8: - return None - delta = self.deltas_asi8[0] - if _is_multiple(delta, _ONE_HOUR): - # Hours - return _maybe_add_count('H', delta / _ONE_HOUR) - elif _is_multiple(delta, _ONE_MINUTE): - # Minutes - return _maybe_add_count('T', delta / _ONE_MINUTE) - elif _is_multiple(delta, _ONE_SECOND): - # Seconds - return _maybe_add_count('S', delta / _ONE_SECOND) - elif _is_multiple(delta, _ONE_MILLI): - # Milliseconds - return _maybe_add_count('L', delta / _ONE_MILLI) - elif _is_multiple(delta, _ONE_MICRO): - # Microseconds - return _maybe_add_count('U', delta / _ONE_MICRO) - else: - # Nanoseconds - return _maybe_add_count('N', delta) - - @cache_readonly - def day_deltas(self): - return [x / _ONE_DAY for x in self.deltas] - - @cache_readonly - def hour_deltas(self): - return [x / _ONE_HOUR for x in self.deltas] - - @cache_readonly - def fields(self): - return build_field_sarray(self.values) - - @cache_readonly - def rep_stamp(self): - return Timestamp(self.values[0]) - - cdef object month_position_check(self): - return month_position_check(self.fields, self.index.dayofweek) - - @cache_readonly - def mdiffs(self): - nmonths = self.fields['Y'] * 12 + self.fields['M'] - return unique_deltas(nmonths.astype('i8')) - - @cache_readonly - def ydiffs(self): - return unique_deltas(self.fields['Y'].astype('i8')) - - cdef _infer_daily_rule(self): - annual_rule = self._get_annual_rule() - if annual_rule: - nyears = self.ydiffs[0] - month = MONTH_ALIASES[self.rep_stamp.month] - alias = '{prefix}-{month}'.format(prefix=annual_rule, month=month) - return _maybe_add_count(alias, nyears) - - quarterly_rule = self._get_quarterly_rule() - if quarterly_rule: - nquarters = self.mdiffs[0] / 3 - mod_dict = {0: 12, 2: 11, 1: 10} - month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]] - alias = '{prefix}-{month}'.format(prefix=quarterly_rule, - month=month) - return _maybe_add_count(alias, nquarters) - - monthly_rule = self._get_monthly_rule() - if monthly_rule: - return _maybe_add_count(monthly_rule, self.mdiffs[0]) - - if self.is_unique: - days = self.deltas[0] / _ONE_DAY - if days % 7 == 0: - # Weekly - day = int_to_weekday[self.rep_stamp.weekday()] - return _maybe_add_count('W-{day}'.format(day=day), days / 7) - else: - return _maybe_add_count('D', days) - - if self._is_business_daily(): - return 'B' - - wom_rule = self._get_wom_rule() - if wom_rule: - return wom_rule - - cdef _get_annual_rule(self): - if len(self.ydiffs) > 1: - return None - - # lazy import to prevent circularity - # TODO: Avoid non-cython dependency - from pandas.core.algorithms import unique - - if len(unique(self.fields['M'])) > 1: - return None - - pos_check = self.month_position_check() - return {'cs': 'AS', 'bs': 'BAS', - 'ce': 'A', 'be': 'BA'}.get(pos_check) - - cdef _get_quarterly_rule(self): - if len(self.mdiffs) > 1: - return None - - if not self.mdiffs[0] % 3 == 0: - return None - - pos_check = self.month_position_check() - return {'cs': 'QS', 'bs': 'BQS', - 'ce': 'Q', 'be': 'BQ'}.get(pos_check) - - cdef _get_monthly_rule(self): - if len(self.mdiffs) > 1: - return None - pos_check = self.month_position_check() - return {'cs': 'MS', 'bs': 'BMS', - 'ce': 'M', 'be': 'BM'}.get(pos_check) - - cdef bint _is_business_daily(self): - # quick check: cannot be business daily - if self.day_deltas != [1, 3]: - return False - - # probably business daily, but need to confirm - first_weekday = self.index[0].weekday() - shifts = np.diff(self.index.asi8) - shifts = np.floor_divide(shifts, _ONE_DAY) - weekdays = np.mod(first_weekday + np.cumsum(shifts), 7) - return np.all(((weekdays == 0) & (shifts == 3)) | - ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))) - - cdef _get_wom_rule(self): - # wdiffs = unique(np.diff(self.index.week)) - # We also need -47, -49, -48 to catch index spanning year boundary - # if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all(): - # return None - - # lazy import to prevent circularity - # TODO: Avoid non-cython dependency - from pandas.core.algorithms import unique - - weekdays = unique(self.index.weekday) - if len(weekdays) > 1: - return None - - week_of_months = unique((self.index.day - 1) // 7) - # Only attempt to infer up to WOM-4. See #9425 - week_of_months = week_of_months[week_of_months < 4] - if len(week_of_months) == 0 or len(week_of_months) > 1: - return None - - # get which week - week = week_of_months[0] + 1 - wd = int_to_weekday[weekdays[0]] - - return 'WOM-{week}{weekday}'.format(week=week, weekday=wd) - - -cdef class _TimedeltaFrequencyInferer(_FrequencyInferer): - - cdef _infer_daily_rule(self): - if self.is_unique: - days = self.deltas[0] / _ONE_DAY - if days % 7 == 0: - # Weekly - wd = int_to_weekday[self.rep_stamp.weekday()] - alias = 'W-{weekday}'.format(weekday=wd) - return _maybe_add_count(alias, days / 7) - else: - return _maybe_add_count('D', days) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 59cd4743f857b..d6e4824575468 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -6,25 +6,32 @@ import numpy as np +from pandas.util._decorators import cache_readonly + from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.common import ( is_period_arraylike, is_timedelta64_dtype, is_datetime64_dtype) +from pandas.core.algorithms import unique + from pandas.tseries.offsets import DateOffset -from pandas._libs.tslibs import Timedelta +from pandas._libs.tslibs import Timedelta, Timestamp import pandas._libs.tslibs.frequencies as libfreqs from pandas._libs.tslibs.frequencies import ( # noqa, semi-public API get_freq, get_base_alias, get_to_timestamp_base, get_freq_code, FreqGroup, is_subperiod, is_superperiod) +from pandas._libs.tslibs.ccalendar import MONTH_ALIASES, int_to_weekday +import pandas._libs.tslibs.resolution as libresolution +from pandas._libs.tslibs.resolution import Resolution +from pandas._libs.tslibs.fields import build_field_sarray +from pandas._libs.tslibs.conversion import tz_convert -from pandas._libs.tslibs.resolution import (Resolution, - _FrequencyInferer, - _TimedeltaFrequencyInferer) +from pandas._libs.algos import unique_deltas from pytz import AmbiguousTimeError @@ -37,6 +44,13 @@ RESO_HR = 5 RESO_DAY = 6 +_ONE_MICRO = 1000 +_ONE_MILLI = (_ONE_MICRO * 1000) +_ONE_SECOND = (_ONE_MILLI * 1000) +_ONE_MINUTE = (60 * _ONE_SECOND) +_ONE_HOUR = (60 * _ONE_MINUTE) +_ONE_DAY = (24 * _ONE_HOUR) + # --------------------------------------------------------------------- # Offset names ("time rules") and related functions @@ -269,3 +283,246 @@ def infer_freq(index, warn=True): inferer = _FrequencyInferer(index, warn=warn) return inferer.get_freq() + + +class _FrequencyInferer(object): + """ + Not sure if I can avoid the state machine here + """ + + def __init__(self, index, warn=True): + self.index = index + self.values = np.asarray(index).view('i8') + + # This moves the values, which are implicitly in UTC, to the + # the timezone so they are in local time + if hasattr(index, 'tz'): + if index.tz is not None: + self.values = tz_convert(self.values, 'UTC', index.tz) + + self.warn = warn + + if len(index) < 3: + raise ValueError('Need at least 3 dates to infer frequency') + + self.is_monotonic = (self.index.is_monotonic_increasing or + self.index.is_monotonic_decreasing) + + @cache_readonly + def deltas(self): + return unique_deltas(self.values) + + @cache_readonly + def deltas_asi8(self): + return unique_deltas(self.index.asi8) + + @cache_readonly + def is_unique(self): + return len(self.deltas) == 1 + + @cache_readonly + def is_unique_asi8(self): + return len(self.deltas_asi8) == 1 + + def get_freq(self): # noqa:F811 + """ + Find the appropriate frequency string to describe the inferred + frequency of self.values + + Returns + ------- + freqstr : str or None + """ + if not self.is_monotonic or not self.index.is_unique: + return None + + delta = self.deltas[0] + if _is_multiple(delta, _ONE_DAY): + return self._infer_daily_rule() + + # Business hourly, maybe. 17: one day / 65: one weekend + if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): + return 'BH' + # Possibly intraday frequency. Here we use the + # original .asi8 values as the modified values + # will not work around DST transitions. See #8772 + elif not self.is_unique_asi8: + return None + + delta = self.deltas_asi8[0] + if _is_multiple(delta, _ONE_HOUR): + # Hours + return _maybe_add_count('H', delta / _ONE_HOUR) + elif _is_multiple(delta, _ONE_MINUTE): + # Minutes + return _maybe_add_count('T', delta / _ONE_MINUTE) + elif _is_multiple(delta, _ONE_SECOND): + # Seconds + return _maybe_add_count('S', delta / _ONE_SECOND) + elif _is_multiple(delta, _ONE_MILLI): + # Milliseconds + return _maybe_add_count('L', delta / _ONE_MILLI) + elif _is_multiple(delta, _ONE_MICRO): + # Microseconds + return _maybe_add_count('U', delta / _ONE_MICRO) + else: + # Nanoseconds + return _maybe_add_count('N', delta) + + @cache_readonly + def day_deltas(self): + return [x / _ONE_DAY for x in self.deltas] + + @cache_readonly + def hour_deltas(self): + return [x / _ONE_HOUR for x in self.deltas] + + @cache_readonly + def fields(self): + return build_field_sarray(self.values) + + @cache_readonly + def rep_stamp(self): + return Timestamp(self.values[0]) + + def month_position_check(self): + return libresolution.month_position_check(self.fields, + self.index.dayofweek) + + @cache_readonly + def mdiffs(self): + nmonths = self.fields['Y'] * 12 + self.fields['M'] + return unique_deltas(nmonths.astype('i8')) + + @cache_readonly + def ydiffs(self): + return unique_deltas(self.fields['Y'].astype('i8')) + + def _infer_daily_rule(self): + annual_rule = self._get_annual_rule() + if annual_rule: + nyears = self.ydiffs[0] + month = MONTH_ALIASES[self.rep_stamp.month] + alias = '{prefix}-{month}'.format(prefix=annual_rule, month=month) + return _maybe_add_count(alias, nyears) + + quarterly_rule = self._get_quarterly_rule() + if quarterly_rule: + nquarters = self.mdiffs[0] / 3 + mod_dict = {0: 12, 2: 11, 1: 10} + month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]] + alias = '{prefix}-{month}'.format(prefix=quarterly_rule, + month=month) + return _maybe_add_count(alias, nquarters) + + monthly_rule = self._get_monthly_rule() + if monthly_rule: + return _maybe_add_count(monthly_rule, self.mdiffs[0]) + + if self.is_unique: + days = self.deltas[0] / _ONE_DAY + if days % 7 == 0: + # Weekly + day = int_to_weekday[self.rep_stamp.weekday()] + return _maybe_add_count( + 'W-{day}'.format(day=day), days / 7) + else: + return _maybe_add_count('D', days) + + if self._is_business_daily(): + return 'B' + + wom_rule = self._get_wom_rule() + if wom_rule: + return wom_rule + + def _get_annual_rule(self): + if len(self.ydiffs) > 1: + return None + + if len(unique(self.fields['M'])) > 1: + return None + + pos_check = self.month_position_check() + return {'cs': 'AS', 'bs': 'BAS', + 'ce': 'A', 'be': 'BA'}.get(pos_check) + + def _get_quarterly_rule(self): + if len(self.mdiffs) > 1: + return None + + if not self.mdiffs[0] % 3 == 0: + return None + + pos_check = self.month_position_check() + return {'cs': 'QS', 'bs': 'BQS', + 'ce': 'Q', 'be': 'BQ'}.get(pos_check) + + def _get_monthly_rule(self): + if len(self.mdiffs) > 1: + return None + pos_check = self.month_position_check() + return {'cs': 'MS', 'bs': 'BMS', + 'ce': 'M', 'be': 'BM'}.get(pos_check) + + def _is_business_daily(self): + # quick check: cannot be business daily + if self.day_deltas != [1, 3]: + return False + + # probably business daily, but need to confirm + first_weekday = self.index[0].weekday() + shifts = np.diff(self.index.asi8) + shifts = np.floor_divide(shifts, _ONE_DAY) + weekdays = np.mod(first_weekday + np.cumsum(shifts), 7) + return np.all(((weekdays == 0) & (shifts == 3)) | + ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))) + + def _get_wom_rule(self): + # wdiffs = unique(np.diff(self.index.week)) + # We also need -47, -49, -48 to catch index spanning year boundary + # if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all(): + # return None + + weekdays = unique(self.index.weekday) + if len(weekdays) > 1: + return None + + week_of_months = unique((self.index.day - 1) // 7) + # Only attempt to infer up to WOM-4. See #9425 + week_of_months = week_of_months[week_of_months < 4] + if len(week_of_months) == 0 or len(week_of_months) > 1: + return None + + # get which week + week = week_of_months[0] + 1 + wd = int_to_weekday[weekdays[0]] + + return 'WOM-{week}{weekday}'.format(week=week, weekday=wd) + + +class _TimedeltaFrequencyInferer(_FrequencyInferer): + + def _infer_daily_rule(self): + if self.is_unique: + days = self.deltas[0] / _ONE_DAY + if days % 7 == 0: + # Weekly + wd = int_to_weekday[self.rep_stamp.weekday()] + alias = 'W-{weekday}'.format(weekday=wd) + return _maybe_add_count(alias, days / 7) + else: + return _maybe_add_count('D', days) + + +def _is_multiple(us, mult): + return us % mult == 0 + + +def _maybe_add_count(base, count): + if count != 1: + assert count == int(count) + count = int(count) + return '{count}{base}'.format(count=count, base=base) + else: + return base
Discussed briefly, FrequencyInferer doesn't benefit much from cython, isn't needed elsewhere in tslibs, and with this move loses the dependency on khash.
https://api.github.com/repos/pandas-dev/pandas/pulls/21992
2018-07-20T14:29:44Z
2018-07-25T10:12:04Z
2018-07-25T10:12:04Z
2018-07-26T16:23:22Z
allow using Iterable in Series and DataFrame constructor
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 06498b28cb77b..8751e882b825b 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -179,7 +179,7 @@ Other Enhancements - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) - :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`) -- +- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`) .. _whatsnew_0240.api_breaking: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 078e176ff2b99..16332738ce610 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -16,7 +16,6 @@ import collections import itertools import sys -import types import warnings from textwrap import dedent @@ -75,7 +74,8 @@ from pandas.core.arrays import Categorical, ExtensionArray import pandas.core.algorithms as algorithms from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u, - OrderedDict, raise_with_traceback) + OrderedDict, raise_with_traceback, + string_and_binary_types) from pandas import compat from pandas.compat import PY36 from pandas.compat.numpy import function as nv @@ -267,7 +267,7 @@ class DataFrame(NDFrame): Parameters ---------- - data : numpy ndarray (structured or homogeneous), dict, or DataFrame + data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects .. versionchanged :: 0.23.0 @@ -391,8 +391,11 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) - elif isinstance(data, (list, types.GeneratorType)): - if isinstance(data, types.GeneratorType): + + # For data is list-like, or Iterable (will consume into list) + elif (isinstance(data, collections.Iterable) + and not isinstance(data, string_and_binary_types)): + if not isinstance(data, collections.Sequence): data = list(data) if len(data) > 0: if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1: @@ -417,8 +420,6 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=copy) else: mgr = self._init_dict({}, index, columns, dtype=dtype) - elif isinstance(data, collections.Iterator): - raise TypeError("data argument can't be an iterator") else: try: arr = np.array(data, dtype=dtype, copy=copy) diff --git a/pandas/core/series.py b/pandas/core/series.py index d4c11b19082ab..08b77c505463e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -6,7 +6,7 @@ # pylint: disable=E1101,E1103 # pylint: disable=W0703,W0622,W0613,W0201 -import types +import collections import warnings from textwrap import dedent @@ -144,7 +144,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): Parameters ---------- - data : array-like, dict, or scalar value + data : array-like, Iterable, dict, or scalar value Contains data stored in Series .. versionchanged :: 0.23.0 @@ -238,12 +238,13 @@ def __init__(self, data=None, index=None, dtype=None, name=None, elif is_extension_array_dtype(data): pass - elif (isinstance(data, types.GeneratorType) or - (compat.PY3 and isinstance(data, map))): - data = list(data) elif isinstance(data, (set, frozenset)): raise TypeError("{0!r} type is unordered" "".format(data.__class__.__name__)) + # If data is Iterable but not list-like, consume into list. + elif (isinstance(data, collections.Iterable) + and not isinstance(data, collections.Sized)): + data = list(data) else: # handle sparse passed here (and force conversion) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index bef38288ff3a5..4426d4ba8ead1 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -865,12 +865,6 @@ def test_constructor_more(self): dm = DataFrame(index=np.arange(10)) assert dm.values.shape == (10, 0) - # corner, silly - # TODO: Fix this Exception to be better... - with tm.assert_raises_regex(ValueError, 'constructor not ' - 'properly called'): - DataFrame((1, 2, 3)) - # can't cast mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1) with tm.assert_raises_regex(ValueError, 'cast'): @@ -953,6 +947,17 @@ def __len__(self, n): array.array('i', range(10))]) tm.assert_frame_equal(result, expected, check_dtype=False) + def test_constructor_iterable(self): + # GH 21987 + class Iter(): + def __iter__(self): + for i in range(10): + yield [1, 2, 3] + + expected = DataFrame([[1, 2, 3]] * 10) + result = DataFrame(Iter()) + tm.assert_frame_equal(result, expected) + def test_constructor_iterator(self): expected = DataFrame([list(range(10)), list(range(10))]) @@ -1374,10 +1379,6 @@ def test_constructor_miscast_na_int_dtype(self): expected = DataFrame([[np.nan, 1], [1, 0]]) tm.assert_frame_equal(df, expected) - def test_constructor_iterator_failure(self): - with tm.assert_raises_regex(TypeError, 'iterator'): - DataFrame(iter([1, 2, 3])) - def test_constructor_column_duplicates(self): # it works! #2079 df = DataFrame([[8, 5]], columns=['a', 'a']) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index e95e41bbdeefa..145682e5be863 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -156,12 +156,29 @@ def test_constructor_series(self): assert_series_equal(s2, s1.sort_index()) - def test_constructor_iterator(self): + def test_constructor_iterable(self): + # GH 21987 + class Iter(): + def __iter__(self): + for i in range(10): + yield i + expected = Series(list(range(10)), dtype='int64') + result = Series(Iter(), dtype='int64') + assert_series_equal(result, expected) + + def test_constructor_sequence(self): + # GH 21987 expected = Series(list(range(10)), dtype='int64') result = Series(range(10), dtype='int64') assert_series_equal(result, expected) + def test_constructor_single_str(self): + # GH 21987 + expected = Series(['abc']) + result = Series('abc') + assert_series_equal(result, expected) + def test_constructor_list_like(self): # make sure that we are coercing different
- [X] closes #2193 - [x] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry --- @TomAugspurger Hope you can review this. BTW, perhpas you may want to change `Iterable` in `is_list_like()` to `Sequence`. ```python gen = (i for i in range(10)) pandas.core.dtypes.inference.is_list_like(gen) # True len(gen) # Traceback (most recent call last): # File "<stdin>", line 1, in <module> # TypeError: object of type 'generator' has no len() gen[0] # Traceback (most recent call last): # File "<stdin>", line 1, in <module> # TypeError: 'generator' object is not subscriptable ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21987
2018-07-20T08:00:50Z
2018-07-26T13:00:55Z
2018-07-26T13:00:55Z
2018-07-26T13:01:09Z
CLN: Remove unused variables
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 4584e4694cdc5..204e800b932a9 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -348,7 +348,6 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, " or `ordered`.") categories = dtype.categories - ordered = dtype.ordered elif is_categorical(values): # If no "dtype" was passed, use the one from "values", but honor diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 5ecc79e030f56..ad01d4ec9b3ca 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -401,7 +401,6 @@ def from_tuples(cls, data, closed='right', copy=False, dtype=None): msg = ('{name}.from_tuples received an invalid ' 'item, {tpl}').format(name=name, tpl=d) raise TypeError(msg) - lhs, rhs = d left.append(lhs) right.append(rhs) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 376700f1418f6..edf341ae2898f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1084,7 +1084,8 @@ def rename(self, *args, **kwargs): level = kwargs.pop('level', None) axis = kwargs.pop('axis', None) if axis is not None: - axis = self._get_axis_number(axis) + # Validate the axis + self._get_axis_number(axis) if kwargs: raise TypeError('rename() got an unexpected keyword ' @@ -5299,6 +5300,12 @@ def __copy__(self, deep=True): return self.copy(deep=deep) def __deepcopy__(self, memo=None): + """ + Parameters + ---------- + memo, default None + Standard signature. Unused + """ if memo is None: memo = {} return self.copy(deep=True) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 38ac144ac6c95..ba04ff3a3d3ee 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -582,7 +582,6 @@ def _transform(self, result, values, comp_ids, transform_func, elif values.ndim > 2: for i, chunk in enumerate(values.transpose(2, 0, 1)): - chunk = chunk.squeeze() transform_func(result[:, :, i], values, comp_ids, is_datetimelike, **kwargs) else: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f09fe8c8abdcf..8ad058c001bba 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -993,6 +993,12 @@ def __copy__(self, **kwargs): return self.copy(**kwargs) def __deepcopy__(self, memo=None): + """ + Parameters + ---------- + memo, default None + Standard signature. Unused + """ if memo is None: memo = {} return self.copy(deep=True) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index d76a7ef00f625..ab180a13ab4f3 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -133,7 +133,7 @@ def _create_from_codes(self, codes, categories=None, ordered=None, if name is None: name = self.name cat = Categorical.from_codes(codes, categories=categories, - ordered=self.ordered) + ordered=ordered) return CategoricalIndex(cat, name=name) @classmethod diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 246bd3d541b72..0b467760d82d9 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -939,7 +939,6 @@ def _format_data(self, name=None): summary = '[{head} ... {tail}]'.format( head=', '.join(head), tail=', '.join(tail)) else: - head = [] tail = [formatter(x) for x in self] summary = '[{tail}]'.format(tail=', '.join(tail)) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ffa2267dd6877..0f3ffb8055330 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1248,7 +1248,7 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): if fill_tuple is None: fill_value = self.fill_value new_values = algos.take_nd(values, indexer, axis=axis, - allow_fill=False) + allow_fill=False, fill_value=fill_value) else: fill_value = fill_tuple[0] new_values = algos.take_nd(values, indexer, axis=axis, @@ -2699,7 +2699,6 @@ def _try_coerce_args(self, values, other): values_mask = isna(values) values = values.view('i8') - other_mask = False if isinstance(other, bool): raise TypeError @@ -2872,11 +2871,9 @@ def _try_coerce_args(self, values, other): values_mask = _block_shape(isna(values), ndim=self.ndim) # asi8 is a view, needs copy values = _block_shape(values.asi8, ndim=self.ndim) - other_mask = False if isinstance(other, ABCSeries): other = self._holder(other) - other_mask = isna(other) if isinstance(other, bool): raise TypeError diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 32fd70bcf654d..f44fb4f6e9e14 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -479,7 +479,9 @@ def nanvar(values, axis=None, skipna=True, ddof=1): @disallow('M8', 'm8') def nansem(values, axis=None, skipna=True, ddof=1): - var = nanvar(values, axis, skipna, ddof=ddof) + # This checks if non-numeric-like data is passed with numeric_only=False + # and raises a TypeError otherwise + nanvar(values, axis, skipna, ddof=ddof) mask = isna(values) if not is_float_dtype(values.dtype): @@ -635,7 +637,6 @@ def nankurt(values, axis=None, skipna=True): adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3)) numer = count * (count + 1) * (count - 1) * m4 denom = (count - 2) * (count - 3) * m2**2 - result = numer / denom - adj # floating point error # diff --git a/pandas/core/series.py b/pandas/core/series.py index 08b77c505463e..8f9fe5ee516e6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2052,7 +2052,6 @@ def dot(self, other): lvals = left.values rvals = right.values else: - left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: @@ -2480,7 +2479,8 @@ def sort_values(self, axis=0, ascending=True, inplace=False, dtype: object """ inplace = validate_bool_kwarg(inplace, 'inplace') - axis = self._get_axis_number(axis) + # Validate the axis parameter + self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: @@ -2652,7 +2652,8 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, # TODO: this can be combined with DataFrame.sort_index impl as # almost identical inplace = validate_bool_kwarg(inplace, 'inplace') - axis = self._get_axis_number(axis) + # Validate the axis parameter + self._get_axis_number(axis) index = self.index if level is not None: @@ -3073,7 +3074,8 @@ def _gotitem(self, key, ndim, subset=None): versionadded='.. versionadded:: 0.20.0', **_shared_doc_kwargs)) def aggregate(self, func, axis=0, *args, **kwargs): - axis = self._get_axis_number(axis) + # Validate the axis parameter + self._get_axis_number(axis) result, how = self._aggregate(func, *args, **kwargs) if result is None: @@ -3919,8 +3921,8 @@ def dropna(self, axis=0, inplace=False, **kwargs): if kwargs: raise TypeError('dropna() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) - - axis = self._get_axis_number(axis or 0) + # Validate the axis parameter + self._get_axis_number(axis or 0) if self._can_hold_na: result = remove_na_arraylike(self) diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 5cb9f4744cc58..58e3001bcfe6a 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -597,7 +597,6 @@ def _combine_match_index(self, other, func, level=None): new_data[col] = func(series.values, other.values) # fill_value is a function of our operator - fill_value = None if isna(other.fill_value) or isna(self.default_fill_value): fill_value = np.nan else: diff --git a/pandas/core/window.py b/pandas/core/window.py index f3b4aaa74ec6b..eed0e97f30dc9 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -933,7 +933,8 @@ class _Rolling_and_Expanding(_Rolling): def count(self): blocks, obj, index = self._create_blocks() - index, indexi = self._get_index(index=index) + # Validate the index + self._get_index(index=index) window = self._get_window() window = min(window, len(obj)) if not self.center else window diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index f69e4a484d177..c6ca59aa08bf9 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -495,8 +495,6 @@ def _chk_truncate(self): frame.iloc[:, -col_num:]), axis=1) self.tr_col_num = col_num if truncate_v: - if max_rows_adj == 0: - row_num = len(frame) if max_rows_adj == 1: row_num = max_rows frame = frame.iloc[:max_rows, :] diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 20be903f54967..3ea5cb95b9c5a 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -222,7 +222,6 @@ def _column_header(): return row self.write('<thead>', indent) - row = [] indent += self.indent_delta diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py index 52262ea05bf96..dcd6f2cf4a718 100644 --- a/pandas/io/formats/terminal.py +++ b/pandas/io/formats/terminal.py @@ -67,7 +67,7 @@ def is_terminal(): def _get_terminal_size_windows(): - res = None + try: from ctypes import windll, create_string_buffer diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 3ec5e8d9be955..629e00ebfa7d0 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -547,7 +547,7 @@ def _get_object_parser(self, json): if typ == 'series' or obj is None: if not isinstance(dtype, bool): - dtype = dict(data=dtype) + kwargs['dtype'] = dtype obj = SeriesParser(json, **kwargs).parse() return obj diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 52b25898fc67e..14e7ad9682db6 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -181,10 +181,6 @@ def _parse_float_vec(vec): # number sans exponent ieee1 = xport1 & 0x00ffffff - # Get the second half of the ibm number into the second half of - # the ieee number - ieee2 = xport2 - # The fraction bit to the left of the binary point in the ieee # format was set and the number was shifted 0, 1, 2, or 3 # places. This will tell us how to adjust the ibm exponent to be a diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 0522d7e721b65..96e7532747c78 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -86,7 +86,6 @@ def _maybe_resample(series, ax, kwargs): freq = ax_freq elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): _upsample_others(ax, freq, kwargs) - ax_freq = freq else: # pragma: no cover raise ValueError('Incompatible frequency conversion') return freq, series diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index bcbac4400c953..d6e7c644cc780 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -642,6 +642,13 @@ def test_series_from_json_precise_float(self): result = read_json(s.to_json(), typ='series', precise_float=True) assert_series_equal(result, s, check_index_type=False) + def test_series_with_dtype(self): + # GH 21986 + s = Series([4.56, 4.56, 4.56]) + result = read_json(s.to_json(), typ='series', dtype=np.int64) + expected = Series([4] * 3) + assert_series_equal(result, expected) + def test_frame_from_json_precise_float(self): df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]]) result = read_json(df.to_json(), precise_float=True)
Breaking up #21974. Removes non-noqa, seemingly non-controversial, unused local variables according to PyCharm. These are mostly redefined elsewhere or not used. I added some TODO comments about other unused local variables that seem misused.
https://api.github.com/repos/pandas-dev/pandas/pulls/21986
2018-07-20T04:59:05Z
2018-07-29T15:32:50Z
2018-07-29T15:32:50Z
2018-07-29T18:16:26Z
CLN: Unreachable code, Boolean comparison, duplicate functions
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 0725bbeb6c36d..b51b41614bc49 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -142,7 +142,7 @@ def time_frame_nth(self, dtype): def time_series_nth_any(self, dtype): self.df['values'].groupby(self.df['key']).nth(0, dropna='any') - def time_groupby_nth_all(self, dtype): + def time_series_nth_all(self, dtype): self.df['values'].groupby(self.df['key']).nth(0, dropna='all') def time_series_nth(self, dtype): diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 2179999859dbb..68698f45d5623 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -390,7 +390,7 @@ def get_blkno_indexers(int64_t[:] blknos, bint group=True): start = 0 cur_blkno = blknos[start] - if group == False: + if group is False: for i in range(1, n): if blknos[i] != cur_blkno: yield cur_blkno, slice(start, i) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4d8e57820f29d..c5cb507e729f1 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -315,7 +315,6 @@ def __contains__(self, key): return True except Exception: return False - return False contains = __contains__
Breaking up #21974. This batch is: - Unreachable code - Redefined function (in asv) - Boolean equality comparison
https://api.github.com/repos/pandas-dev/pandas/pulls/21985
2018-07-20T04:44:22Z
2018-07-20T19:46:22Z
2018-07-20T19:46:22Z
2018-07-20T20:11:32Z
REF: No need to delegate to index check of whether an int is an int
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 8ffc7548059b7..e0b6048b2ad64 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2124,7 +2124,25 @@ def _getitem_scalar(self, key): return values def _validate_integer(self, key, axis): - # return a boolean if we have a valid integer indexer + """ + Check that 'key' is a valid position in the desired axis. + + Parameters + ---------- + key : int + Requested position + axis : int + Desired axis + + Returns + ------- + None + + Raises + ------ + IndexError + If 'key' is not a valid position in axis 'axis' + """ ax = self.obj._get_axis(axis) l = len(ax) @@ -2215,8 +2233,6 @@ def _getitem_axis(self, key, axis=None): # a single integer else: - key = self._convert_scalar_indexer(key, axis) - if not is_integer(key): raise TypeError("Cannot index by location index with a " "non-integer key") diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 32a56aeafc6ad..ba1f1de21871f 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -50,7 +50,7 @@ def test_scalar_error(self): def f(): s.iloc[3.0] tm.assert_raises_regex(TypeError, - 'cannot do positional indexing', + 'Cannot index by location index', f) def f(): diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 81397002abd2b..3dcfe6a68ad9f 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -126,6 +126,18 @@ def test_iloc_getitem_neg_int(self): typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) + @pytest.mark.parametrize('dims', [1, 2]) + def test_iloc_getitem_invalid_scalar(self, dims): + # GH 21982 + + if dims == 1: + s = Series(np.arange(10)) + else: + s = DataFrame(np.arange(100).reshape(10, 10)) + + tm.assert_raises_regex(TypeError, 'Cannot index by location index', + lambda: s.iloc['a']) + def test_iloc_array_not_mutating_negative_indices(self): # GH 21867
- [x] tests passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Just fixing the following nonsensical error: ``` python In [2]: pd.Series(range(10)).iloc['a'] --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-2-4a32011255fe> in <module>() ----> 1 pd.Series(range(10)).iloc['a'] [...] TypeError: cannot do positional indexing on <class 'pandas.core.indexes.range.RangeIndex'> with these indexers [a] of <class 'str'> ``` (you just cannot do positional indexing with a ``str``, regardless of the index) ... and adding a docstring while I was at it.
https://api.github.com/repos/pandas-dev/pandas/pulls/21982
2018-07-19T21:04:16Z
2018-07-20T19:18:59Z
2018-07-20T19:18:59Z
2018-07-20T19:20:22Z
ENH: Implement subtraction for object-dtype Index
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index b015495b095b6..4439529faf208 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -310,6 +310,7 @@ Other API Changes - Invalid construction of ``IntervalDtype`` will now always raise a ``TypeError`` rather than a ``ValueError`` if the subdtype is invalid (:issue:`21185`) - Trying to reindex a ``DataFrame`` with a non unique ``MultiIndex`` now raises a ``ValueError`` instead of an ``Exception`` (:issue:`21770`) - :meth:`PeriodIndex.tz_convert` and :meth:`PeriodIndex.tz_localize` have been removed (:issue:`21781`) +- :class:`Index` subtraction will attempt to operate element-wise instead of raising ``TypeError`` (:issue:`19369`) .. _whatsnew_0240.deprecations: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 83b70baf4065b..3a42c7963f21b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2630,8 +2630,10 @@ def __iadd__(self, other): return self + other def __sub__(self, other): - raise TypeError("cannot perform __sub__ with this index type: " - "{typ}".format(typ=type(self).__name__)) + return Index(np.array(self) - other) + + def __rsub__(self, other): + return Index(other - np.array(self)) def __and__(self, other): return self.intersection(other) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 7b105390db40b..754703dfc4bee 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -3,7 +3,7 @@ import pytest from datetime import datetime, timedelta - +from decimal import Decimal from collections import defaultdict import pandas.util.testing as tm @@ -864,13 +864,47 @@ def test_add(self): expected = Index(['1a', '1b', '1c']) tm.assert_index_equal('1' + index, expected) - def test_sub(self): + def test_sub_fail(self): index = self.strIndex pytest.raises(TypeError, lambda: index - 'a') pytest.raises(TypeError, lambda: index - index) pytest.raises(TypeError, lambda: index - index.tolist()) pytest.raises(TypeError, lambda: index.tolist() - index) + def test_sub_object(self): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(0), Decimal(1)]) + + result = index - Decimal(1) + tm.assert_index_equal(result, expected) + + result = index - pd.Index([Decimal(1), Decimal(1)]) + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + index - 'foo' + + with pytest.raises(TypeError): + index - np.array([2, 'foo']) + + def test_rsub_object(self): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(1), Decimal(0)]) + + result = Decimal(2) - index + tm.assert_index_equal(result, expected) + + result = np.array([Decimal(2), Decimal(2)]) - index + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + 'foo' - index + + with pytest.raises(TypeError): + np.array([True, pd.Timestamp.now()]) - index + def test_map_identity_mapping(self): # GH 12766 # TODO: replace with fixture
- [x] closes #19369 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry @jreback @jorisvandenbossche discussed briefly at the sprint. Merits more thorough testing, but I'd like to get the go-ahead to separate out arithmetic tests that are common to EA/Index/Series/Frame[1col] that are highly duplicative first.
https://api.github.com/repos/pandas-dev/pandas/pulls/21981
2018-07-19T19:13:32Z
2018-07-23T10:01:53Z
2018-07-23T10:01:53Z
2020-04-05T17:40:45Z
Backport PR #21966 on branch 0.23.x
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index 5e19ab491647d..a30fbc75f11f8 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -27,6 +27,7 @@ Bug Fixes **Groupby/Resample/Rolling** - Bug where calling :func:`DataFrameGroupBy.agg` with a list of functions including ``ohlc`` as the non-initial element would raise a ``ValueError`` (:issue:`21716`) +- Bug in ``roll_quantile`` caused a memory leak when calling ``.rolling(...).quantile(q)`` with ``q`` in (0,1) (:issue:`21965`) - **Conversion** diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 5121d293efcb6..a77433e5d1115 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1482,6 +1482,8 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, else: output[i] = NaN + skiplist_destroy(skiplist) + return output
Backport PR #21966: Fix memory leak in roll_quantile
https://api.github.com/repos/pandas-dev/pandas/pulls/21973
2018-07-19T01:42:20Z
2018-07-20T12:28:02Z
2018-07-20T12:28:02Z
2018-07-20T12:28:02Z
Backport PR #21921 on branch 0.23.x
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index a88c22e3d01f7..5e19ab491647d 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -58,3 +58,7 @@ Bug Fixes - - + +**Missing** + +- Bug in :func:`Series.clip` and :func:`DataFrame.clip` cannot accept list-like threshold containing ``NaN`` (:issue:`19992`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 02462218e8b02..facc709877285 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6433,9 +6433,11 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, # GH 17276 # numpy doesn't like NaN as a clip value # so ignore - if np.any(pd.isnull(lower)): + # GH 19992 + # numpy doesn't drop a list-like bound containing NaN + if not is_list_like(lower) and np.any(pd.isnull(lower)): lower = None - if np.any(pd.isnull(upper)): + if not is_list_like(upper) and np.any(pd.isnull(upper)): upper = None # GH 2747 (arguments were reversed) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 437d3a9d24730..415ae982673ee 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -2195,13 +2195,23 @@ def test_clip_with_na_args(self): """Should process np.nan argument as None """ # GH # 17276 tm.assert_frame_equal(self.frame.clip(np.nan), self.frame) - tm.assert_frame_equal(self.frame.clip(upper=[1, 2, np.nan]), - self.frame) - tm.assert_frame_equal(self.frame.clip(lower=[1, np.nan, 3]), - self.frame) tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan), self.frame) + # GH #19992 + df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6], + 'col_2': [7, 8, 9]}) + + result = df.clip(lower=[4, 5, np.nan], axis=0) + expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan], + 'col_2': [7, 8, np.nan]}) + tm.assert_frame_equal(result, expected) + + result = df.clip(lower=[4, 5, np.nan], axis=1) + expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6], + 'col_2': [np.nan, np.nan, np.nan]}) + tm.assert_frame_equal(result, expected) + # Matrix-like def test_dot(self): a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 1e6ea96a5de51..bcf209521f913 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1140,11 +1140,15 @@ def test_clip_with_na_args(self): s = Series([1, 2, 3]) assert_series_equal(s.clip(np.nan), Series([1, 2, 3])) - assert_series_equal(s.clip(upper=[1, 1, np.nan]), Series([1, 2, 3])) - assert_series_equal(s.clip(lower=[1, np.nan, 1]), Series([1, 2, 3])) assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3])) + # GH #19992 + assert_series_equal(s.clip(lower=[0, 4, np.nan]), + Series([1, 4, np.nan])) + assert_series_equal(s.clip(upper=[1, np.nan, 1]), + Series([1, np.nan, 1])) + def test_clip_against_series(self): # GH #6966
Backport PR #21921: BUG:Clip with a list-like threshold with a nan is broken (GH19992)
https://api.github.com/repos/pandas-dev/pandas/pulls/21967
2018-07-18T10:23:52Z
2018-07-19T01:41:03Z
2018-07-19T01:41:03Z
2018-09-08T04:42:10Z
Fix memory leak in roll_quantile
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index ac1ef78fd6fd2..6d98334ace9e2 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -31,6 +31,7 @@ Bug Fixes **Groupby/Resample/Rolling** - Bug where calling :func:`DataFrameGroupBy.agg` with a list of functions including ``ohlc`` as the non-initial element would raise a ``ValueError`` (:issue:`21716`) +- Bug in ``roll_quantile`` caused a memory leak when calling ``.rolling(...).quantile(q)`` with ``q`` in (0,1) (:issue:`21965`) - **Conversion** diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 9e704a9bd8d3f..cea77e2c88b1b 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1481,6 +1481,8 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, else: output[i] = NaN + skiplist_destroy(skiplist) + return output
closes #21965
https://api.github.com/repos/pandas-dev/pandas/pulls/21966
2018-07-18T10:08:42Z
2018-07-19T01:41:35Z
2018-07-19T01:41:35Z
2018-07-19T01:41:44Z
Trim unncessary code in datetime/np_datetime.c
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index 1ad8c780ba7a4..9e56802b92bf0 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -235,8 +235,7 @@ NPY_NO_EXPORT void add_seconds_to_datetimestruct(npy_datetimestruct *dts, * Fills in the year, month, day in 'dts' based on the days * offset from 1970. */ -static void set_datetimestruct_days(npy_int64 days, - npy_datetimestruct *dts) { +static void set_datetimestruct_days(npy_int64 days, npy_datetimestruct *dts) { const int *month_lengths; int i; @@ -318,7 +317,7 @@ int cmp_npy_datetimestruct(const npy_datetimestruct *a, /* * - * Tests for and converts a Python datetime.datetime or datetime.date + * Converts a Python datetime.datetime or datetime.date * object into a NumPy npy_datetimestruct. Uses tzinfo (if present) * to convert to UTC time. * @@ -330,68 +329,22 @@ int cmp_npy_datetimestruct(const npy_datetimestruct *a, * Returns -1 on error, 0 on success, and 1 (with no error set) * if obj doesn't have the needed date or datetime attributes. */ -int convert_pydatetime_to_datetimestruct(PyObject *obj, +int convert_pydatetime_to_datetimestruct(PyDateTime_Date *obj, npy_datetimestruct *out) { + // Assumes that obj is a valid datetime object PyObject *tmp; - int isleap; /* Initialize the output to all zeros */ memset(out, 0, sizeof(npy_datetimestruct)); out->month = 1; out->day = 1; - /* Need at least year/month/day attributes */ - if (!PyObject_HasAttrString(obj, "year") || - !PyObject_HasAttrString(obj, "month") || - !PyObject_HasAttrString(obj, "day")) { - return 1; - } - - /* Get the year */ - tmp = PyObject_GetAttrString(obj, "year"); - if (tmp == NULL) { - return -1; - } - out->year = PyInt_AsLong(tmp); - if (out->year == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - /* Get the month */ - tmp = PyObject_GetAttrString(obj, "month"); - if (tmp == NULL) { - return -1; - } - out->month = PyInt_AsLong(tmp); - if (out->month == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - /* Get the day */ - tmp = PyObject_GetAttrString(obj, "day"); - if (tmp == NULL) { - return -1; - } - out->day = PyInt_AsLong(tmp); - if (out->day == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); + out->year = PyInt_AsLong(PyObject_GetAttrString(obj, "year")); + out->month = PyInt_AsLong(PyObject_GetAttrString(obj, "month")); + out->day = PyInt_AsLong(PyObject_GetAttrString(obj, "day")); - /* Validate that the month and day are valid for the year */ - if (out->month < 1 || out->month > 12) { - goto invalid_date; - } - isleap = is_leapyear(out->year); - if (out->day < 1 || - out->day > days_per_month_table[isleap][out->month - 1]) { - goto invalid_date; - } + // TODO(anyone): If we can get PyDateTime_IMPORT to work, we could use + // PyDateTime_Check here, and less verbose attribute lookups. /* Check for time attributes (if not there, return success as a date) */ if (!PyObject_HasAttrString(obj, "hour") || @@ -401,61 +354,13 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj, return 0; } - /* Get the hour */ - tmp = PyObject_GetAttrString(obj, "hour"); - if (tmp == NULL) { - return -1; - } - out->hour = PyInt_AsLong(tmp); - if (out->hour == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - /* Get the minute */ - tmp = PyObject_GetAttrString(obj, "minute"); - if (tmp == NULL) { - return -1; - } - out->min = PyInt_AsLong(tmp); - if (out->min == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - /* Get the second */ - tmp = PyObject_GetAttrString(obj, "second"); - if (tmp == NULL) { - return -1; - } - out->sec = PyInt_AsLong(tmp); - if (out->sec == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); + out->hour = PyInt_AsLong(PyObject_GetAttrString(obj, "hour")); + out->min = PyInt_AsLong(PyObject_GetAttrString(obj, "minute")); + out->sec = PyInt_AsLong(PyObject_GetAttrString(obj, "second")); + out->us = PyInt_AsLong(PyObject_GetAttrString(obj, "microsecond")); - /* Get the microsecond */ - tmp = PyObject_GetAttrString(obj, "microsecond"); - if (tmp == NULL) { - return -1; - } - out->us = PyInt_AsLong(tmp); - if (out->us == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - if (out->hour < 0 || out->hour >= 24 || out->min < 0 || out->min >= 60 || - out->sec < 0 || out->sec >= 60 || out->us < 0 || out->us >= 1000000) { - goto invalid_time; - } - - /* Apply the time zone offset if it exists */ - if (PyObject_HasAttrString(obj, "tzinfo")) { + /* Apply the time zone offset if datetime obj is tz-aware */ + if (PyObject_HasAttrString((PyObject*)obj, "tzinfo")) { tmp = PyObject_GetAttrString(obj, "tzinfo"); if (tmp == NULL) { return -1; @@ -497,50 +402,15 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj, } return 0; - -invalid_date: - PyErr_Format(PyExc_ValueError, - "Invalid date (%d,%d,%d) when converting to NumPy datetime", - (int)out->year, (int)out->month, (int)out->day); - return -1; - -invalid_time: - PyErr_Format(PyExc_ValueError, - "Invalid time (%d,%d,%d,%d) when converting " - "to NumPy datetime", - (int)out->hour, (int)out->min, (int)out->sec, (int)out->us); - return -1; -} - -npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - npy_datetimestruct *d) { - npy_datetime result = NPY_DATETIME_NAT; - - convert_datetimestruct_to_datetime(fr, d, &result); - return result; -} - -void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - npy_datetimestruct *result) { - convert_datetime_to_datetimestruct(fr, val, result); -} - -void pandas_timedelta_to_timedeltastruct(npy_timedelta val, - NPY_DATETIMEUNIT fr, - pandas_timedeltastruct *result) { - convert_timedelta_to_timedeltastruct(fr, val, result); } /* * Converts a datetime from a datetimestruct to a datetime based * on a metadata unit. The date is assumed to be valid. - * - * Returns 0 on success, -1 on failure. */ -int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, - const npy_datetimestruct *dts, - npy_datetime *out) { +npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, + const npy_datetimestruct *dts) { npy_datetime ret; if (base == NPY_FR_Y) { @@ -632,17 +502,14 @@ int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, return -1; } } - - *out = ret; - - return 0; + return ret; } /* * Converts a datetime based on the given metadata into a datetimestruct */ -int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, - npy_datetime dt, +void pandas_datetime_to_datetimestruct(npy_datetime dt, + NPY_DATETIMEUNIT base, npy_datetimestruct *out) { npy_int64 perday; @@ -850,10 +717,7 @@ int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, PyErr_SetString(PyExc_RuntimeError, "NumPy datetime metadata is corrupted with invalid " "base unit"); - return -1; } - - return 0; } /* @@ -862,8 +726,8 @@ int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, * * Returns 0 on success, -1 on failure. */ -int convert_timedelta_to_timedeltastruct(NPY_DATETIMEUNIT base, - npy_timedelta td, +void pandas_timedelta_to_timedeltastruct(npy_timedelta td, + NPY_DATETIMEUNIT base, pandas_timedeltastruct *out) { npy_int64 frac; npy_int64 sfrac; @@ -953,8 +817,5 @@ int convert_timedelta_to_timedeltastruct(NPY_DATETIMEUNIT base, PyErr_SetString(PyExc_RuntimeError, "NumPy timedelta metadata is corrupted with " "invalid base unit"); - return -1; } - - return 0; } diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index f5c48036c16f8..4347d0c8c47d4 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -18,6 +18,7 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #define PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_ #include <numpy/ndarraytypes.h> +#include <datetime.h> typedef struct { npy_int64 days; @@ -30,11 +31,11 @@ extern const npy_datetimestruct _NS_MAX_DTS; // stuff pandas needs // ---------------------------------------------------------------------------- -int convert_pydatetime_to_datetimestruct(PyObject *obj, +int convert_pydatetime_to_datetimestruct(PyDateTime_Date *obj, npy_datetimestruct *out); -npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - npy_datetimestruct *d); +npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, + const npy_datetimestruct *dts); void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, npy_datetimestruct *result); @@ -74,9 +75,4 @@ void add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes); -int -convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, - npy_datetime dt, - npy_datetimestruct *out); - #endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_ diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index c9b0143ffc6ca..4bab32e93ab1e 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -481,16 +481,17 @@ static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, npy_datetimestruct dts; PyDatetimeScalarObject *obj = (PyDatetimeScalarObject *)_obj; PRINTMARK(); + // TODO(anyone): Does not appear to be reached in tests. - pandas_datetime_to_datetimestruct( - obj->obval, (NPY_DATETIMEUNIT)obj->obmeta.base, &dts); + pandas_datetime_to_datetimestruct(obj->obval, + (NPY_DATETIMEUNIT)obj->obmeta.base, &dts); return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); } static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { npy_datetimestruct dts; - PyObject *obj = (PyObject *)_obj; + PyDateTime_Date *obj = (PyDateTime_Date *)_obj; PRINTMARK(); diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 3c0fe98ee7b7d..76838c7a23b24 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -147,6 +147,9 @@ cdef inline void td64_to_tdstruct(int64_t td64, cdef inline int64_t pydatetime_to_dt64(datetime val, npy_datetimestruct *dts): + """ + Note we are assuming that the datetime object is timezone-naive. + """ dts.year = PyDateTime_GET_YEAR(val) dts.month = PyDateTime_GET_MONTH(val) dts.day = PyDateTime_GET_DAY(val) @@ -158,8 +161,7 @@ cdef inline int64_t pydatetime_to_dt64(datetime val, return dtstruct_to_dt64(dts) -cdef inline int64_t pydate_to_dt64(date val, - npy_datetimestruct *dts): +cdef inline int64_t pydate_to_dt64(date val, npy_datetimestruct *dts): dts.year = PyDateTime_GET_YEAR(val) dts.month = PyDateTime_GET_MONTH(val) dts.day = PyDateTime_GET_DAY(val)
`pydatetime_to_datetimestruct` does a ton of checking that boils down to "is this a valid datetime object?" Since the function only gets called after a type-check, we can assume it is a date/datetime and be a lot less verbose about it. This also rips out an unnecessary layer of functions `convert_datetime_to_datetimestruct`, `convert_timedelta_to_timedeltastruct`. cc @WillAyd you mentioned wanting to work on your C-foo. There's a comment about figuring out how to import the cpython datetime C-API. Any thoughts?
https://api.github.com/repos/pandas-dev/pandas/pulls/21962
2018-07-18T04:23:43Z
2018-07-20T12:30:14Z
2018-07-20T12:30:14Z
2020-04-05T17:41:30Z
standardize post-call treatment of get_dst_info, delay sorting calls
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index b8f97dcf2d599..acf6cd4b74362 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -104,7 +104,7 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, ndarray[int64_t] trans, deltas npy_datetimestruct dts object dt - int64_t value + int64_t value, delta ndarray[object] result = np.empty(n, dtype=object) object (*func_create)(int64_t, npy_datetimestruct, object, object) @@ -125,58 +125,67 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, raise ValueError("box must be one of 'datetime', 'date', 'time' or" " 'timestamp'") - if tz is not None: - if is_utc(tz): + if is_utc(tz) or tz is None: + for i in range(n): + value = arr[i] + if value == NPY_NAT: + result[i] = NaT + else: + dt64_to_dtstruct(value, &dts) + result[i] = func_create(value, dts, tz, freq) + elif is_tzlocal(tz): + for i in range(n): + value = arr[i] + if value == NPY_NAT: + result[i] = NaT + else: + # Python datetime objects do not support nanosecond + # resolution (yet, PEP 564). Need to compute new value + # using the i8 representation. + local_value = tz_convert_utc_to_tzlocal(value, tz) + dt64_to_dtstruct(local_value, &dts) + result[i] = func_create(value, dts, tz, freq) + else: + trans, deltas, typ = get_dst_info(tz) + + if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] for i in range(n): value = arr[i] if value == NPY_NAT: result[i] = NaT else: - dt64_to_dtstruct(value, &dts) + # Adjust datetime64 timestamp, recompute datetimestruct + dt64_to_dtstruct(value + delta, &dts) result[i] = func_create(value, dts, tz, freq) - elif is_tzlocal(tz) or is_fixed_offset(tz): + + elif typ == 'dateutil': + # no zone-name change for dateutil tzs - dst etc + # represented in single object. for i in range(n): value = arr[i] if value == NPY_NAT: result[i] = NaT else: - # Python datetime objects do not support nanosecond - # resolution (yet, PEP 564). Need to compute new value - # using the i8 representation. - local_value = tz_convert_utc_to_tzlocal(value, tz) - dt64_to_dtstruct(local_value, &dts) + # Adjust datetime64 timestamp, recompute datetimestruct + pos = trans.searchsorted(value, side='right') - 1 + dt64_to_dtstruct(value + deltas[pos], &dts) result[i] = func_create(value, dts, tz, freq) else: - trans, deltas, typ = get_dst_info(tz) - + # pytz for i in range(n): - value = arr[i] if value == NPY_NAT: result[i] = NaT else: - # Adjust datetime64 timestamp, recompute datetimestruct pos = trans.searchsorted(value, side='right') - 1 - if treat_tz_as_pytz(tz): - # find right representation of dst etc in pytz timezone - new_tz = tz._tzinfos[tz._transition_info[pos]] - else: - # no zone-name change for dateutil tzs - dst etc - # represented in single object. - new_tz = tz + # find right representation of dst etc in pytz timezone + new_tz = tz._tzinfos[tz._transition_info[pos]] dt64_to_dtstruct(value + deltas[pos], &dts) result[i] = func_create(value, dts, new_tz, freq) - else: - for i in range(n): - - value = arr[i] - if value == NPY_NAT: - result[i] = NaT - else: - dt64_to_dtstruct(value, &dts) - result[i] = func_create(value, dts, None, freq) return result diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index fae855f5495f0..7621ac912d4d5 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -526,7 +526,7 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): """ cdef: ndarray[int64_t] trans, deltas - int64_t delta, local_val + int64_t local_val Py_ssize_t pos assert obj.tzinfo is None @@ -542,22 +542,23 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - pos = trans.searchsorted(obj.value, side='right') - 1 - - # static/pytz/dateutil specific code if is_fixed_offset(tz): - # statictzinfo - assert len(deltas) == 1, len(deltas) + # static/fixed tzinfo; in this case we know len(deltas) == 1 + # This can come back with `typ` of either "fixed" or None dt64_to_dtstruct(obj.value + deltas[0], &obj.dts) - elif treat_tz_as_pytz(tz): + elif typ == 'pytz': + # i.e. treat_tz_as_pytz(tz) + pos = trans.searchsorted(obj.value, side='right') - 1 tz = tz._tzinfos[tz._transition_info[pos]] dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) - elif treat_tz_as_dateutil(tz): + elif typ == 'dateutil': + # i.e. treat_tz_as_dateutil(tz) + pos = trans.searchsorted(obj.value, side='right') - 1 dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) else: - # TODO: this case is never reached in the tests, but get_dst_info - # has a path that returns typ = None and empty deltas. - # --> Is this path possible? + # Note: as of 2018-07-17 all tzinfo objects that are _not_ + # either pytz or dateutil have is_fixed_offset(tz) == True, + # so this branch will never be reached. pass obj.tzinfo = tz @@ -1126,6 +1127,7 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): ndarray[int64_t] trans, deltas Py_ssize_t[:] pos npy_datetimestruct dts + int64_t delta if is_utc(tz): with nogil: @@ -1147,17 +1149,17 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - pos = trans.searchsorted(stamps, side='right') - 1 - - # statictzinfo if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT continue - dt64_to_dtstruct(stamps[i] + deltas[0], &dts) + dt64_to_dtstruct(stamps[i] + delta, &dts) result[i] = _normalized_stamp(&dts) else: + pos = trans.searchsorted(stamps, side='right') - 1 for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT @@ -1207,7 +1209,7 @@ def is_date_array_normalized(ndarray[int64_t] stamps, tz=None): Py_ssize_t i, n = len(stamps) ndarray[int64_t] trans, deltas npy_datetimestruct dts - int64_t local_val + int64_t local_val, delta if tz is None or is_utc(tz): for i in range(n): @@ -1223,12 +1225,22 @@ def is_date_array_normalized(ndarray[int64_t] stamps, tz=None): else: trans, deltas, typ = get_dst_info(tz) - for i in range(n): - # Adjust datetime64 timestamp, recompute datetimestruct - pos = trans.searchsorted(stamps[i]) - 1 + if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] + for i in range(n): + # Adjust datetime64 timestamp, recompute datetimestruct + dt64_to_dtstruct(stamps[i] + delta, &dts) + if (dts.hour + dts.min + dts.sec + dts.us) > 0: + return False - dt64_to_dtstruct(stamps[i] + deltas[pos], &dts) - if (dts.hour + dts.min + dts.sec + dts.us) > 0: - return False + else: + for i in range(n): + # Adjust datetime64 timestamp, recompute datetimestruct + pos = trans.searchsorted(stamps[i]) - 1 + + dt64_to_dtstruct(stamps[i] + deltas[pos], &dts) + if (dts.hour + dts.min + dts.sec + dts.us) > 0: + return False return True diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 59db371833957..76dadb4ec3e23 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -938,13 +938,14 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, npy_datetimestruct dts int64_t local_val - if is_utc(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i], &dts) - result[i] = get_period_ordinal(&dts, freq) + if is_utc(tz) or tz is None: + with nogil: + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i], &dts) + result[i] = get_period_ordinal(&dts, freq) elif is_tzlocal(tz): for i in range(n): @@ -958,10 +959,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - pos = trans.searchsorted(stamps, side='right') - 1 - - # statictzinfo if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT @@ -969,6 +968,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, dt64_to_dtstruct(stamps[i] + deltas[0], &dts) result[i] = get_period_ordinal(&dts, freq) else: + pos = trans.searchsorted(stamps, side='right') - 1 + for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 688b12005921d..0835a43411783 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -58,28 +58,19 @@ cpdef resolution(ndarray[int64_t] stamps, tz=None): if tz is not None: tz = maybe_get_tz(tz) - return _reso_local(stamps, tz) - else: - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i], &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso - return reso + return _reso_local(stamps, tz) cdef _reso_local(ndarray[int64_t] stamps, object tz): cdef: - Py_ssize_t n = len(stamps) + Py_ssize_t i, n = len(stamps) int reso = RESO_DAY, curr_reso ndarray[int64_t] trans, deltas Py_ssize_t[:] pos npy_datetimestruct dts - int64_t local_val + int64_t local_val, delta - if is_utc(tz): + if is_utc(tz) or tz is None: for i in range(n): if stamps[i] == NPY_NAT: continue @@ -100,18 +91,18 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - pos = trans.searchsorted(stamps, side='right') - 1 - - # statictzinfo if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] for i in range(n): if stamps[i] == NPY_NAT: continue - dt64_to_dtstruct(stamps[i] + deltas[0], &dts) + dt64_to_dtstruct(stamps[i] + delta, &dts) curr_reso = _reso_stamp(&dts) if curr_reso < reso: reso = curr_reso else: + pos = trans.searchsorted(stamps, side='right') - 1 for i in range(n): if stamps[i] == NPY_NAT: continue diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index b3fab83fef415..2e3b07252d45e 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -258,12 +258,18 @@ cdef object get_dst_info(object tz): dtype='i8') * 1000000000 typ = 'fixed' else: - trans = np.array([], dtype='M8[ns]') - deltas = np.array([], dtype='i8') - typ = None + # 2018-07-12 this is not reached in the tests, and this case + # is not handled in any of the functions that call + # get_dst_info. If this case _were_ hit the calling + # functions would then hit an IndexError because they assume + # `deltas` is non-empty. + # (under the just-deleted code that returned empty arrays) + raise AssertionError("dateutil tzinfo is not a FixedOffset " + "and has an empty `_trans_list`.", tz) else: # static tzinfo + # TODO: This case is not hit in tests (2018-07-17); is it possible? trans = np.array([NPY_NAT + 1], dtype=np.int64) num = int(get_utcoffset(tz, None).total_seconds()) * 1000000000 deltas = np.array([num], dtype=np.int64)
@jreback we discussed how there are a bunch of functions that do really similar things with `get_dst_info` but have their slight idiosyncrasies. This standardizes them, and is really verbose so as to delay certain calls until absolutely necessary. After this we can see about de-duplicating the 6ish occurrences of really similar code by passing a function pointer or something.
https://api.github.com/repos/pandas-dev/pandas/pulls/21960
2018-07-18T03:04:13Z
2018-07-20T12:29:21Z
2018-07-20T12:29:21Z
2018-07-25T00:43:34Z
BUG: bug in GroupBy.count where arg minlength passed to np.bincount must be None for np<1.13
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 1ac6d075946dd..37c7e9267b39a 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -536,11 +536,11 @@ Groupby/Resample/Rolling - Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` with ``as_index=False`` leading to the loss of timezone information (:issue:`15884`) - Bug in :meth:`DatetimeIndex.resample` when downsampling across a DST boundary (:issue:`8531`) -- -- - +- Bug where ``ValueError`` is wrongly raised when calling :func:`~pandas.core.groupby.SeriesGroupBy.count` method of a + ``SeriesGroupBy`` when the grouping variable only contains NaNs and numpy version < 1.13 (:issue:`21956`). - Multiple bugs in :func:`pandas.core.Rolling.min` with ``closed='left'` and a datetime-like index leading to incorrect results and also segfault. (:issue:`21704`) +- Sparse ^^^^^^ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index fdededc325b03..4c87f6122b956 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1207,7 +1207,7 @@ def count(self): mask = (ids != -1) & ~isna(val) ids = ensure_platform_int(ids) - out = np.bincount(ids[mask], minlength=ngroups or 0) + out = np.bincount(ids[mask], minlength=ngroups or None) return Series(out, index=self.grouper.result_index, diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 787d99086873e..a14b6ff014f37 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -212,3 +212,13 @@ def test_count_with_datetimelike(self, datetimelike): expected = DataFrame({'y': [2, 1]}, index=['a', 'b']) expected.index.name = "x" assert_frame_equal(expected, res) + + def test_count_with_only_nans_in_first_group(self): + # GH21956 + df = DataFrame({'A': [np.nan, np.nan], 'B': ['a', 'b'], 'C': [1, 2]}) + result = df.groupby(['A', 'B']).C.count() + mi = MultiIndex(levels=[[], ['a', 'b']], + labels=[[], []], + names=['A', 'B']) + expected = Series([], index=mi, dtype=np.int64, name='C') + assert_series_equal(result, expected, check_index_type=False)
- [x] closes #21956 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry See #21956 for details.
https://api.github.com/repos/pandas-dev/pandas/pulls/21957
2018-07-17T23:03:08Z
2018-07-28T13:51:03Z
2018-07-28T13:51:02Z
2018-08-01T10:43:13Z
DEPR: pd.read_table
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 61119089fdb42..4c1d2e2d446de 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -478,6 +478,7 @@ Deprecations - :meth:`Series.ptp` is deprecated. Use ``numpy.ptp`` instead (:issue:`21614`) - :meth:`Series.compress` is deprecated. Use ``Series[condition]`` instead (:issue:`18262`) - :meth:`Categorical.from_codes` has deprecated providing float values for the ``codes`` argument. (:issue:`21767`) +- :func:`pandas.read_table` is deprecated. Instead, use :func:`pandas.read_csv` passing ``sep='\t'`` if necessary (:issue:`21948`) .. _whatsnew_0240.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ebd35cb1a6a1a..bbe84110fd019 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1594,11 +1594,11 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, "for from_csv when changing your function calls", FutureWarning, stacklevel=2) - from pandas.io.parsers import read_table - return read_table(path, header=header, sep=sep, - parse_dates=parse_dates, index_col=index_col, - encoding=encoding, tupleize_cols=tupleize_cols, - infer_datetime_format=infer_datetime_format) + from pandas.io.parsers import read_csv + return read_csv(path, header=header, sep=sep, + parse_dates=parse_dates, index_col=index_col, + encoding=encoding, tupleize_cols=tupleize_cols, + infer_datetime_format=infer_datetime_format) def to_sparse(self, fill_value=None, kind='block'): """ diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 141a5d2389db5..0d564069c681f 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -9,7 +9,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover r""" - Read text from clipboard and pass to read_table. See read_table for the + Read text from clipboard and pass to read_csv. See read_csv for the full argument list Parameters @@ -31,7 +31,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover 'reading from clipboard only supports utf-8 encoding') from pandas.io.clipboard import clipboard_get - from pandas.io.parsers import read_table + from pandas.io.parsers import read_csv text = clipboard_get() # try to decode (if needed on PY3) @@ -51,7 +51,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover # that this came from excel and set 'sep' accordingly lines = text[:10000].split('\n')[:-1][:10] - # Need to remove leading white space, since read_table + # Need to remove leading white space, since read_csv # accepts: # a b # 0 1 2 @@ -80,7 +80,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover if kwargs.get('engine') == 'python' and PY2: text = text.encode('utf-8') - return read_table(StringIO(text), sep=sep, **kwargs) + return read_csv(StringIO(text), sep=sep, **kwargs) def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 88358ff392cb6..4b3fa08e5e4af 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -331,6 +331,10 @@ """ % (_parser_params % (_sep_doc.format(default="','"), _engine_doc)) _read_table_doc = """ + +.. deprecated:: 0.24.0 + Use :func:`pandas.read_csv` instead, passing ``sep='\t'`` if necessary. + Read general delimited file into DataFrame %s @@ -540,9 +544,13 @@ def _read(filepath_or_buffer, kwds): } -def _make_parser_function(name, sep=','): +def _make_parser_function(name, default_sep=','): - default_sep = sep + # prepare read_table deprecation + if name == "read_table": + sep = False + else: + sep = default_sep def parser_f(filepath_or_buffer, sep=sep, @@ -611,11 +619,24 @@ def parser_f(filepath_or_buffer, memory_map=False, float_precision=None): + # deprecate read_table GH21948 + if name == "read_table": + if sep is False and delimiter is None: + warnings.warn("read_table is deprecated, use read_csv " + "instead, passing sep='\\t'.", + FutureWarning, stacklevel=2) + else: + warnings.warn("read_table is deprecated, use read_csv " + "instead.", + FutureWarning, stacklevel=2) + if sep is False: + sep = default_sep + # Alias sep -> delimiter. if delimiter is None: delimiter = sep - if delim_whitespace and delimiter is not default_sep: + if delim_whitespace and delimiter != default_sep: raise ValueError("Specified a delimiter with both sep and" " delim_whitespace=True; you can only" " specify one.") @@ -687,10 +708,10 @@ def parser_f(filepath_or_buffer, return parser_f -read_csv = _make_parser_function('read_csv', sep=',') +read_csv = _make_parser_function('read_csv', default_sep=',') read_csv = Appender(_read_csv_doc)(read_csv) -read_table = _make_parser_function('read_table', sep='\t') +read_table = _make_parser_function('read_table', default_sep='\t') read_table = Appender(_read_table_doc)(read_table) diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index 7623587803b41..b0cdbe2b5bedb 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -1,5 +1,5 @@ import pytest -from pandas.io.parsers import read_table +from pandas.io.parsers import read_csv @pytest.fixture @@ -17,7 +17,7 @@ def jsonl_file(datapath): @pytest.fixture def salaries_table(datapath): """DataFrame with the salaries dataset""" - return read_table(datapath('io', 'parser', 'data', 'salaries.csv')) + return read_csv(datapath('io', 'parser', 'data', 'salaries.csv'), sep='\t') @pytest.fixture diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 191e3f37f1c37..3218742aa7636 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -21,7 +21,7 @@ import numpy as np import pandas as pd from pandas import (DataFrame, Series, Index, Timestamp, MultiIndex, - date_range, NaT, read_table) + date_range, NaT, read_csv) from pandas.compat import (range, zip, lrange, StringIO, PY3, u, lzip, is_platform_windows, is_platform_32bit) @@ -1225,8 +1225,8 @@ def test_to_string(self): lines = result.split('\n') header = lines[0].strip().split() joined = '\n'.join(re.sub(r'\s+', ' ', x).strip() for x in lines[1:]) - recons = read_table(StringIO(joined), names=header, - header=None, sep=' ') + recons = read_csv(StringIO(joined), names=header, + header=None, sep=' ') tm.assert_series_equal(recons['B'], biggie['B']) assert recons['A'].count() == biggie['A'].count() assert (np.abs(recons['A'].dropna() - diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index f6a31008bca5c..a7cc3ad989ea1 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -12,7 +12,7 @@ import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas import DataFrame -from pandas.io.parsers import read_csv, read_table +from pandas.io.parsers import read_csv from pandas.compat import BytesIO, StringIO @@ -44,7 +44,7 @@ def check_compressed_urls(salaries_table, compression, extension, mode, if mode != 'explicit': compression = mode - url_table = read_table(url, compression=compression, engine=engine) + url_table = read_csv(url, sep='\t', compression=compression, engine=engine) tm.assert_frame_equal(url_table, salaries_table) diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index b6f13039641a2..8535a51657abf 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -70,7 +70,9 @@ def read_table(self, *args, **kwds): kwds = kwds.copy() kwds['engine'] = self.engine kwds['low_memory'] = self.low_memory - return read_table(*args, **kwds) + with tm.assert_produces_warning(FutureWarning): + df = read_table(*args, **kwds) + return df class TestCParserLowMemory(BaseParser, CParserTests): @@ -88,7 +90,9 @@ def read_table(self, *args, **kwds): kwds = kwds.copy() kwds['engine'] = self.engine kwds['low_memory'] = True - return read_table(*args, **kwds) + with tm.assert_produces_warning(FutureWarning): + df = read_table(*args, **kwds) + return df class TestPythonParser(BaseParser, PythonParserTests): @@ -103,7 +107,9 @@ def read_csv(self, *args, **kwds): def read_table(self, *args, **kwds): kwds = kwds.copy() kwds['engine'] = self.engine - return read_table(*args, **kwds) + with tm.assert_produces_warning(FutureWarning): + df = read_table(*args, **kwds) + return df class TestUnsortedUsecols(object): diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 3117f6fae55da..1c64c1516077d 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -14,7 +14,7 @@ from pandas.compat import StringIO from pandas.errors import ParserError -from pandas.io.parsers import read_csv, read_table +from pandas.io.parsers import read_csv import pytest @@ -43,24 +43,24 @@ def test_c_engine(self): # specify C engine with unsupported options (raise) with tm.assert_raises_regex(ValueError, msg): - read_table(StringIO(data), engine='c', - sep=None, delim_whitespace=False) + read_csv(StringIO(data), engine='c', + sep=None, delim_whitespace=False) with tm.assert_raises_regex(ValueError, msg): - read_table(StringIO(data), engine='c', sep=r'\s') + read_csv(StringIO(data), engine='c', sep=r'\s') with tm.assert_raises_regex(ValueError, msg): - read_table(StringIO(data), engine='c', quotechar=chr(128)) + read_csv(StringIO(data), engine='c', sep='\t', quotechar=chr(128)) with tm.assert_raises_regex(ValueError, msg): - read_table(StringIO(data), engine='c', skipfooter=1) + read_csv(StringIO(data), engine='c', skipfooter=1) # specify C-unsupported options without python-unsupported options with tm.assert_produces_warning(parsers.ParserWarning): - read_table(StringIO(data), sep=None, delim_whitespace=False) + read_csv(StringIO(data), sep=None, delim_whitespace=False) with tm.assert_produces_warning(parsers.ParserWarning): - read_table(StringIO(data), quotechar=chr(128)) + read_csv(StringIO(data), sep=r'\s') with tm.assert_produces_warning(parsers.ParserWarning): - read_table(StringIO(data), sep=r'\s') + read_csv(StringIO(data), sep='\t', quotechar=chr(128)) with tm.assert_produces_warning(parsers.ParserWarning): - read_table(StringIO(data), skipfooter=1) + read_csv(StringIO(data), skipfooter=1) text = """ A B C D E one two three four @@ -70,9 +70,9 @@ def test_c_engine(self): msg = 'Error tokenizing data' with tm.assert_raises_regex(ParserError, msg): - read_table(StringIO(text), sep='\\s+') + read_csv(StringIO(text), sep='\\s+') with tm.assert_raises_regex(ParserError, msg): - read_table(StringIO(text), engine='c', sep='\\s+') + read_csv(StringIO(text), engine='c', sep='\\s+') msg = "Only length-1 thousands markers supported" data = """A|B|C diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index ceaac9818354a..991b8ee508760 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -131,7 +131,6 @@ def test_iterator(self): @pytest.mark.parametrize('reader, module, error_class, fn_ext', [ (pd.read_csv, 'os', FileNotFoundError, 'csv'), - (pd.read_table, 'os', FileNotFoundError, 'csv'), (pd.read_fwf, 'os', FileNotFoundError, 'txt'), (pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'), (pd.read_feather, 'feather', Exception, 'feather'), @@ -149,9 +148,14 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext): with pytest.raises(error_class): reader(path) + def test_read_non_existant_read_table(self): + path = os.path.join(HERE, 'data', 'does_not_exist.' + 'csv') + with pytest.raises(FileNotFoundError): + with tm.assert_produces_warning(FutureWarning): + pd.read_table(path) + @pytest.mark.parametrize('reader, module, path', [ (pd.read_csv, 'os', ('io', 'data', 'iris.csv')), - (pd.read_table, 'os', ('io', 'data', 'iris.csv')), (pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')), (pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')), (pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')), @@ -170,6 +174,22 @@ def test_read_fspath_all(self, reader, module, path, datapath): mypath = CustomFSPath(path) result = reader(mypath) expected = reader(path) + + if path.endswith('.pickle'): + # categorical + tm.assert_categorical_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) + + def test_read_fspath_all_read_table(self, datapath): + path = datapath('io', 'data', 'iris.csv') + + mypath = CustomFSPath(path) + with tm.assert_produces_warning(FutureWarning): + result = pd.read_table(mypath) + with tm.assert_produces_warning(FutureWarning): + expected = pd.read_table(path) + if path.endswith('.pickle'): # categorical tm.assert_categorical_equal(result, expected) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 3caee2b44c579..dcfeab55f94fc 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -10,7 +10,7 @@ import numpy as np from pandas.core.index import Index, MultiIndex -from pandas import Panel, DataFrame, Series, notna, isna, Timestamp +from pandas import Panel, DataFrame, Series, notna, isna, Timestamp, read_csv from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype import pandas.core.common as com @@ -512,14 +512,13 @@ def f(x): pytest.raises(com.SettingWithCopyError, f, result) def test_xs_level_multiple(self): - from pandas import read_table text = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" - df = read_table(StringIO(text), sep=r'\s+', engine='python') + df = read_csv(StringIO(text), sep=r'\s+', engine='python') result = df.xs(('a', 4), level=['one', 'four']) expected = df.xs('a').xs(4, level='four') @@ -547,14 +546,13 @@ def f(x): tm.assert_frame_equal(rs, xp) def test_xs_level0(self): - from pandas import read_table text = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" - df = read_table(StringIO(text), sep=r'\s+', engine='python') + df = read_csv(StringIO(text), sep=r'\s+', engine='python') result = df.xs('a', level=0) expected = df.xs('a')
`pd.read_table` is deprecated and replaced by `pd.read_csv`. - [x] closes #21948 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21954
2018-07-17T21:20:05Z
2018-08-02T10:49:07Z
2018-08-02T10:49:07Z
2018-08-02T10:49:07Z
BUG: fix df.where(cond) when cond is empty
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 695c4a4e16c9d..7a128f5cde7aa 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -1047,7 +1047,7 @@ Removal of prior version deprecations/changes Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- Slicing Series and Dataframes with an monotonically increasing :class:`CategoricalIndex` +- Slicing Series and DataFrames with an monotonically increasing :class:`CategoricalIndex` is now very fast and has speed comparable to slicing with an ``Int64Index``. The speed increase is both when indexing by label (using .loc) and position(.iloc) (:issue:`20395`) Slicing a monotonically increasing :class:`CategoricalIndex` itself (i.e. ``ci[1000:2000]``) @@ -1150,7 +1150,7 @@ Timezones - Fixed bug where :meth:`DataFrame.describe` and :meth:`Series.describe` on tz-aware datetimes did not show `first` and `last` result (:issue:`21328`) - Bug in :class:`DatetimeIndex` comparisons failing to raise ``TypeError`` when comparing timezone-aware ``DatetimeIndex`` against ``np.datetime64`` (:issue:`22074`) - Bug in ``DataFrame`` assignment with a timezone-aware scalar (:issue:`19843`) -- Bug in :func:`Dataframe.asof` that raised a ``TypeError`` when attempting to compare tz-naive and tz-aware timestamps (:issue:`21194`) +- Bug in :func:`DataFrame.asof` that raised a ``TypeError`` when attempting to compare tz-naive and tz-aware timestamps (:issue:`21194`) - Bug when constructing a :class:`DatetimeIndex` with :class:`Timestamp`s constructed with the ``replace`` method across DST (:issue:`18785`) - Bug when setting a new value with :meth:`DataFrame.loc` with a :class:`DatetimeIndex` with a DST transition (:issue:`18308`, :issue:`20724`) - Bug in :meth:`DatetimeIndex.unique` that did not re-localize tz-aware dates correctly (:issue:`21737`) @@ -1313,6 +1313,7 @@ Reshaping - Bug in :func:`pandas.concat` when joining resampled DataFrames with timezone aware index (:issue:`13783`) - Bug in :meth:`Series.combine_first` with ``datetime64[ns, tz]`` dtype which would return tz-naive result (:issue:`21469`) - Bug in :meth:`Series.where` and :meth:`DataFrame.where` with ``datetime64[ns, tz]`` dtype (:issue:`21546`) +- Bug in :meth:`DataFrame.where` with an empty DataFrame and empty ``cond`` having non-bool dtype (:issue:`21947`) - Bug in :meth:`Series.mask` and :meth:`DataFrame.mask` with ``list`` conditionals (:issue:`21891`) - Bug in :meth:`DataFrame.replace` raises RecursionError when converting OutOfBounds ``datetime64[ns, tz]`` (:issue:`20380`) - :func:`pandas.core.groupby.GroupBy.rank` now raises a ``ValueError`` when an invalid value is passed for argument ``na_option`` (:issue:`22124`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 71e4641d20c1b..396b092a286c1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8142,7 +8142,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) - else: + elif not cond.empty: for dt in cond.dtypes: if not is_bool_dtype(dt): raise ValueError(msg.format(dtype=dt)) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index ae04ffff37419..2467b2a89472b 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -2877,6 +2877,14 @@ def test_where_none(self): 'on mixed-type'): df.where(~isna(df), None, inplace=True) + def test_where_empty_df_and_empty_cond_having_non_bool_dtypes(self): + # see gh-21947 + df = pd.DataFrame(columns=["a"]) + cond = df.applymap(lambda x: x > 0) + + result = df.where(cond) + tm.assert_frame_equal(result, df) + def test_where_align(self): def create():
- when cond is empty, cond.dtypes are objects, which raised `ValueError: Boolean array expected for the condition, not object ` - [ ] closes #xxxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21947
2018-07-17T14:00:22Z
2018-11-06T13:06:19Z
2018-11-06T13:06:19Z
2018-11-06T17:01:47Z
DOC add Python 2.7 warning to recent whatsnew; include 23.3
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index 3ec812654ee4a..436bbeae5d08f 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -20,6 +20,8 @@ These are new features and improvements of note in each release. .. include:: whatsnew/v0.24.0.txt +.. include:: whatsnew/v0.23.3.txt + .. include:: whatsnew/v0.23.2.txt .. include:: whatsnew/v0.23.1.txt diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index cf60e86553fe3..1a514ba627fcb 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -6,6 +6,11 @@ v0.23.1 (June 12, 2018) This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes and bug fixes. We recommend that all users upgrade to this version. +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. + .. contents:: What's new in v0.23.1 :local: :backlinks: none diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt index 3f68eabdca4c2..7ec6e2632e717 100644 --- a/doc/source/whatsnew/v0.23.2.txt +++ b/doc/source/whatsnew/v0.23.2.txt @@ -11,6 +11,10 @@ and bug fixes. We recommend that all users upgrade to this version. Pandas 0.23.2 is first pandas release that's compatible with Python 3.7 (:issue:`20552`) +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. .. contents:: What's new in v0.23.2 :local: diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index a88c22e3d01f7..ac1ef78fd6fd2 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -6,6 +6,10 @@ v0.23.4 This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes and bug fixes. We recommend that all users upgrade to this version. +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. .. contents:: What's new in v0.23.4 :local: diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9e3f7ec73f852..a0076118a28a7 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -3,6 +3,11 @@ v0.24.0 (Month XX, 2018) ------------------------ +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. + .. _whatsnew_0240.enhancements: New features
#18894 laid out: > We should add a big note on the top of each whatsnew that we are planning on dropping 2.7 support as of the end of 2018. So far, this warning only appeared in v0.23.0, and can be easily missed these days. This PR adds them to the recent whatsnews, and also includes v0.23.3 in `whatsnew.rst`
https://api.github.com/repos/pandas-dev/pandas/pulls/21944
2018-07-17T06:09:47Z
2018-07-17T12:01:52Z
2018-07-17T12:01:52Z
2018-08-05T17:27:30Z
CLN: Remove PeriodIndex.tz_convert, tz_localize
diff --git a/doc/source/api.rst b/doc/source/api.rst index fff944651588e..beded99318a5e 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1870,8 +1870,6 @@ Methods PeriodIndex.asfreq PeriodIndex.strftime PeriodIndex.to_timestamp - PeriodIndex.tz_convert - PeriodIndex.tz_localize Scalars ------- diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9e3f7ec73f852..c7c24a284fea7 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -304,7 +304,7 @@ Other API Changes a ``KeyError`` (:issue:`21678`). - Invalid construction of ``IntervalDtype`` will now always raise a ``TypeError`` rather than a ``ValueError`` if the subdtype is invalid (:issue:`21185`) - Trying to reindex a ``DataFrame`` with a non unique ``MultiIndex`` now raises a ``ValueError`` instead of an ``Exception`` (:issue:`21770`) -- +- :meth:`PeriodIndex.tz_convert` and :meth:`PeriodIndex.tz_localize` have been removed (:issue:`21781`) .. _whatsnew_0240.deprecations: diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a8e0c7f1aaa6a..f97f93d975af2 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -140,8 +140,6 @@ class PeriodIndex(PeriodArrayMixin, DatelikeOps, DatetimeIndexOpsMixin, asfreq strftime to_timestamp - tz_convert - tz_localize Examples -------- @@ -805,50 +803,6 @@ def __setstate__(self, state): _unpickle_compat = __setstate__ - def tz_convert(self, tz): - """ - Convert tz-aware DatetimeIndex from one time zone to another (using - pytz/dateutil) - - Parameters - ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time. Corresponding timestamps would be converted to - time zone of the TimeSeries. - None will remove timezone holding UTC time. - - Returns - ------- - normalized : DatetimeIndex - - Notes - ----- - Not currently implemented for PeriodIndex - """ - raise NotImplementedError("Not yet implemented for PeriodIndex") - - def tz_localize(self, tz, ambiguous='raise'): - """ - Localize tz-naive DatetimeIndex to given time zone (using - pytz/dateutil), or remove timezone from tz-aware DatetimeIndex - - Parameters - ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time. Corresponding timestamps would be converted to - time zone of the TimeSeries. - None will remove timezone holding local time. - - Returns - ------- - localized : DatetimeIndex - - Notes - ----- - Not currently implemented for PeriodIndex - """ - raise NotImplementedError("Not yet implemented for PeriodIndex") - PeriodIndex._add_comparison_methods() PeriodIndex._add_numeric_methods_disabled() diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index fb9bd74d9876d..b1d9d362d1402 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -747,7 +747,6 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self): def test_frame_to_period(self): K = 5 - from pandas.core.indexes.period import period_range dr = date_range('1/1/2000', '1/1/2001') pr = period_range('1/1/2000', '1/1/2001') @@ -776,14 +775,6 @@ def test_frame_to_period(self): @pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert']) def test_tz_convert_and_localize(self, fn): l0 = date_range('20140701', periods=5, freq='D') - - # TODO: l1 should be a PeriodIndex for testing - # after GH2106 is addressed - with pytest.raises(NotImplementedError): - period_range('20140701', periods=1).tz_convert('UTC') - with pytest.raises(NotImplementedError): - period_range('20140701', periods=1).tz_localize('UTC') - # l1 = period_range('20140701', periods=5, freq='D') l1 = date_range('20140701', periods=5, freq='D') int_idx = Index(range(5))
- [x] closes #21781 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - Removed PeriodIndex.tz_convert and PeriodIndex.tz_localize from PeriodIndex. - Removed references to those methods in test_tz_convert_and_localize within test_timeseries - Removed methods from the PeriodIndex class doc and updated whatsnew test_tz_convert_and_localize also contains a TODO referencing changing the test to use PeriodIndex for a timezone test, which if it is never gaining timezone info should probably be removed. I just wanted to check that this was a good idea before doing it.
https://api.github.com/repos/pandas-dev/pandas/pulls/21935
2018-07-16T17:22:39Z
2018-07-18T01:00:19Z
2018-07-18T01:00:19Z
2018-07-19T19:45:33Z
BUG: Properly handle lists for .mask
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index a0076118a28a7..01a0c35117ea3 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -160,7 +160,7 @@ For situations where you need an ``ndarray`` of ``Interval`` objects, use :meth:`numpy.asarray` or ``idx.astype(object)``. .. ipython:: python - + np.asarray(idx) idx.values.astype(object) @@ -487,6 +487,7 @@ Reshaping - Bug in :func:`pandas.concat` when joining resampled DataFrames with timezone aware index (:issue:`13783`) - Bug in :meth:`Series.combine_first` with ``datetime64[ns, tz]`` dtype which would return tz-naive result (:issue:`21469`) - Bug in :meth:`Series.where` and :meth:`DataFrame.where` with ``datetime64[ns, tz]`` dtype (:issue:`21546`) +- Bug in :meth:`Series.mask` and :meth:`DataFrame.mask` with ``list`` conditionals (:issue:`21891`) - - diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7305da4f56506..b682f5e65f876 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7941,6 +7941,10 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, inplace = validate_bool_kwarg(inplace, 'inplace') cond = com._apply_if_callable(cond, self) + # see gh-21891 + if not hasattr(cond, "__invert__"): + cond = np.array(cond) + return self.where(~cond, other=other, inplace=inplace, axis=axis, level=level, try_cast=try_cast, errors=errors) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 9ca2b7e3c8a6a..2eed6b47df9e3 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -2966,6 +2966,13 @@ def test_mask(self): assert_frame_equal(rs, df.mask(df <= 0, other)) assert_frame_equal(rs, df.mask(~cond, other)) + # see gh-21891 + df = DataFrame([1, 2]) + res = df.mask([[True], [False]]) + + exp = DataFrame([np.nan, 2]) + tm.assert_frame_equal(res, exp) + def test_mask_inplace(self): # GH8801 df = DataFrame(np.random.randn(5, 3)) diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index bd54d5f57d12d..e2a9b3586648d 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -617,6 +617,13 @@ def test_mask(): expected = Series([1, 2, np.nan, np.nan]) assert_series_equal(result, expected) + # see gh-21891 + s = Series([1, 2]) + res = s.mask([True, False]) + + exp = Series([np.nan, 2]) + tm.assert_series_equal(res, exp) + def test_mask_inplace(): s = Series(np.random.randn(5))
Title is self-explanatory. Closes #21891.
https://api.github.com/repos/pandas-dev/pandas/pulls/21934
2018-07-16T16:37:41Z
2018-07-17T19:40:40Z
2018-07-17T19:40:39Z
2018-07-17T19:47:31Z
ENH: add iso-format support to to_timedelta (#21877)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index ed4022d422b4d..04e56c373edc0 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -84,6 +84,7 @@ Other Enhancements - :meth:`Series.nlargest`, :meth:`Series.nsmallest`, :meth:`DataFrame.nlargest`, and :meth:`DataFrame.nsmallest` now accept the value ``"all"`` for the ``keep`` argument. This keeps all ties for the nth largest/smallest value (:issue:`16818`) - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) +- :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`) - .. _whatsnew_0240.api_breaking: diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index b9405b15a0980..f7a6cf0c6dafc 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -183,7 +183,11 @@ cpdef convert_to_timedelta64(object ts, object unit): ts = cast_from_unit(ts, unit) ts = np.timedelta64(ts) elif is_string_object(ts): - ts = np.timedelta64(parse_timedelta_string(ts)) + if len(ts) > 0 and ts[0] == 'P': + ts = parse_iso_format_string(ts) + else: + ts = parse_timedelta_string(ts) + ts = np.timedelta64(ts) elif hasattr(ts, 'delta'): ts = np.timedelta64(delta_to_nanoseconds(ts), 'ns') diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py index 68dc0003e2312..447e2b40050f6 100644 --- a/pandas/tests/indexes/timedeltas/test_construction.py +++ b/pandas/tests/indexes/timedeltas/test_construction.py @@ -44,6 +44,13 @@ def test_constructor(self): tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'), expected) + def test_constructor_iso(self): + # GH #21877 + expected = timedelta_range('1s', periods=9, freq='s') + durations = ['P0DT0H0M{}S'.format(i) for i in range(1, 10)] + result = to_timedelta(durations) + tm.assert_index_equal(result, expected) + def test_constructor_coverage(self): rng = timedelta_range('1 days', periods=10.5) exp = timedelta_range('1 days', periods=10) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 6472bd4245622..017606dc42d59 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -233,6 +233,11 @@ def check(value): assert tup.microseconds == 999 assert tup.nanoseconds == 0 + def test_iso_conversion(self): + # GH #21877 + expected = Timedelta(1, unit='s') + assert to_timedelta('P0DT0H0M1S') == expected + def test_nat_converters(self): assert to_timedelta('nat', box=False).astype('int64') == iNaT assert to_timedelta('nan', box=False).astype('int64') == iNaT
- [x] closes #21877 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21933
2018-07-16T15:38:53Z
2018-07-20T12:42:01Z
2018-07-20T12:42:01Z
2018-07-20T12:49:14Z
DEPR: Series.compress
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 137fd5aafe5bd..65dad1304d780 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -382,7 +382,7 @@ Deprecations - :meth:`DataFrame.to_stata`, :meth:`read_stata`, :class:`StataReader` and :class:`StataWriter` have deprecated the ``encoding`` argument. The encoding of a Stata dta file is determined by the file type and cannot be changed (:issue:`21244`). - :meth:`MultiIndex.to_hierarchical` is deprecated and will be removed in a future version (:issue:`21613`) - :meth:`Series.ptp` is deprecated. Use ``numpy.ptp`` instead (:issue:`21614`) -- +- :meth:`Series.compress` is deprecated. Use ``Series[condition]`` instead (:issue:`18262`) .. _whatsnew_0240.prior_deprecations: diff --git a/pandas/core/series.py b/pandas/core/series.py index 3571e908fc6a7..c53caac980790 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -510,10 +510,15 @@ def compress(self, condition, *args, **kwargs): """ Return selected slices of an array along given axis as a Series + .. deprecated:: 0.24.0 + See also -------- numpy.ndarray.compress """ + msg = ("Series.compress(condition) is deprecated. " + "Use Series[condition] instead.") + warnings.warn(msg, FutureWarning, stacklevel=2) nv.validate_compress(args, kwargs) return self[condition] diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 8c0f4b11149fe..69969bd090b9b 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -585,7 +585,9 @@ def test_compress(self): index=list('abcde'), name='foo') expected = Series(s.values.compress(cond), index=list('ac'), name='foo') - tm.assert_series_equal(s.compress(cond), expected) + with tm.assert_produces_warning(FutureWarning): + result = s.compress(cond) + tm.assert_series_equal(result, expected) def test_numpy_compress(self): cond = [True, False, True, False, False]
xref #18262
https://api.github.com/repos/pandas-dev/pandas/pulls/21930
2018-07-16T12:59:18Z
2018-07-25T10:31:37Z
2018-07-25T10:31:37Z
2018-07-26T13:16:43Z
CLN: Address MulitIndex Test Follow Ups in Issue #21918
diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py index 072356e4923a6..4cc0504417801 100644 --- a/pandas/tests/indexes/multi/test_analytics.py +++ b/pandas/tests/indexes/multi/test_analytics.py @@ -1,4 +1,11 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pandas as pd +import pandas.util.testing as tm import pytest +from pandas import Index, MultiIndex, date_range, period_range +from pandas.compat import lrange def test_shift(idx): @@ -6,3 +13,316 @@ def test_shift(idx): # GH8083 test the base class for shift pytest.raises(NotImplementedError, idx.shift, 1) pytest.raises(NotImplementedError, idx.shift, 1, 2) + + +def test_bounds(idx): + idx._bounds + + +def test_groupby(idx): + groups = idx.groupby(np.array([1, 1, 1, 2, 2, 2])) + labels = idx.get_values().tolist() + exp = {1: labels[:3], 2: labels[3:]} + tm.assert_dict_equal(groups, exp) + + # GH5620 + groups = idx.groupby(idx) + exp = {key: [key] for key in idx} + tm.assert_dict_equal(groups, exp) + + +def test_truncate(): + major_axis = Index(lrange(4)) + minor_axis = Index(lrange(2)) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + + index = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels]) + + result = index.truncate(before=1) + assert 'foo' not in result.levels[0] + assert 1 in result.levels[0] + + result = index.truncate(after=1) + assert 2 not in result.levels[0] + assert 1 in result.levels[0] + + result = index.truncate(before=1, after=2) + assert len(result.levels[0]) == 2 + + # after < before + pytest.raises(ValueError, index.truncate, 3, 1) + + +def test_where(): + i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) + + def f(): + i.where(True) + + pytest.raises(NotImplementedError, f) + + +def test_where_array_like(): + i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) + klasses = [list, tuple, np.array, pd.Series] + cond = [False, True] + + for klass in klasses: + def f(): + return i.where(klass(cond)) + pytest.raises(NotImplementedError, f) + +# TODO: reshape + + +def test_reorder_levels(idx): + # this blows up + tm.assert_raises_regex(IndexError, '^Too many levels', + idx.reorder_levels, [2, 1, 0]) + + +def test_numpy_repeat(): + reps = 2 + numbers = [1, 2, 3] + names = np.array(['foo', 'bar']) + + m = MultiIndex.from_product([ + numbers, names], names=names) + expected = MultiIndex.from_product([ + numbers, names.repeat(reps)], names=names) + tm.assert_index_equal(np.repeat(m, reps), expected) + + msg = "the 'axis' parameter is not supported" + tm.assert_raises_regex( + ValueError, msg, np.repeat, m, reps, axis=1) + + +def test_append_mixed_dtypes(): + # GH 13660 + dti = date_range('2011-01-01', freq='M', periods=3, ) + dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern') + pi = period_range('2011-01', freq='M', periods=3) + + mi = MultiIndex.from_arrays([[1, 2, 3], + [1.1, np.nan, 3.3], + ['a', 'b', 'c'], + dti, dti_tz, pi]) + assert mi.nlevels == 6 + + res = mi.append(mi) + exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3], + [1.1, np.nan, 3.3, 1.1, np.nan, 3.3], + ['a', 'b', 'c', 'a', 'b', 'c'], + dti.append(dti), + dti_tz.append(dti_tz), + pi.append(pi)]) + tm.assert_index_equal(res, exp) + + other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'], + ['x', 'y', 'z'], ['x', 'y', 'z'], + ['x', 'y', 'z'], ['x', 'y', 'z']]) + + res = mi.append(other) + exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'], + [1.1, np.nan, 3.3, 'x', 'y', 'z'], + ['a', 'b', 'c', 'x', 'y', 'z'], + dti.append(pd.Index(['x', 'y', 'z'])), + dti_tz.append(pd.Index(['x', 'y', 'z'])), + pi.append(pd.Index(['x', 'y', 'z']))]) + tm.assert_index_equal(res, exp) + + +def test_take(idx): + indexer = [4, 3, 0, 2] + result = idx.take(indexer) + expected = idx[indexer] + assert result.equals(expected) + + # TODO: Remove Commented Code + # if not isinstance(idx, + # (DatetimeIndex, PeriodIndex, TimedeltaIndex)): + # GH 10791 + with pytest.raises(AttributeError): + idx.freq + + +def test_take_invalid_kwargs(idx): + idx = idx + indices = [1, 2] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + tm.assert_raises_regex(TypeError, msg, idx.take, + indices, foo=2) + + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, out=indices) + + msg = "the 'mode' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, mode='clip') + + +def test_take_fill_value(): + # GH 12631 + vals = [['A', 'B'], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] + idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) + + result = idx.take(np.array([1, 0, -1])) + exp_vals = [('A', pd.Timestamp('2011-01-02')), + ('A', pd.Timestamp('2011-01-01')), + ('B', pd.Timestamp('2011-01-02'))] + expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + exp_vals = [('A', pd.Timestamp('2011-01-02')), + ('A', pd.Timestamp('2011-01-01')), + (np.nan, pd.NaT)] + expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, + fill_value=True) + exp_vals = [('A', pd.Timestamp('2011-01-02')), + ('A', pd.Timestamp('2011-01-01')), + ('B', pd.Timestamp('2011-01-02'))] + expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) + tm.assert_index_equal(result, expected) + + msg = ('When allow_fill=True and fill_value is not None, ' + 'all indices must be >= -1') + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + with pytest.raises(IndexError): + idx.take(np.array([1, -5])) + + +def test_iter(idx): + result = list(idx) + expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'), + ('baz', 'two'), ('qux', 'one'), ('qux', 'two')] + assert result == expected + + +def test_sub(idx): + + first = idx + + # - now raises (previously was set op difference) + with pytest.raises(TypeError): + first - idx[-3:] + with pytest.raises(TypeError): + idx[-3:] - first + with pytest.raises(TypeError): + idx[-3:] - first.tolist() + with pytest.raises(TypeError): + first.tolist() - idx[-3:] + + +def test_map(idx): + # callable + index = idx + + # we don't infer UInt64 + if isinstance(index, pd.UInt64Index): + expected = index.astype('int64') + else: + expected = index + + result = index.map(lambda x: x) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "mapper", + [ + lambda values, idx: {i: e for e, i in zip(values, idx)}, + lambda values, idx: pd.Series(values, idx)]) +def test_map_dictlike(idx, mapper): + + if isinstance(idx, (pd.CategoricalIndex, pd.IntervalIndex)): + pytest.skip("skipping tests for {}".format(type(idx))) + + identity = mapper(idx.values, idx) + + # we don't infer to UInt64 for a dict + if isinstance(idx, pd.UInt64Index) and isinstance(identity, dict): + expected = idx.astype('int64') + else: + expected = idx + + result = idx.map(identity) + tm.assert_index_equal(result, expected) + + # empty mappable + expected = pd.Index([np.nan] * len(idx)) + result = idx.map(mapper(expected, idx)) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize('func', [ + np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10, + np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin, + np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, + np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, + np.rad2deg +]) +def test_numpy_ufuncs(func): + # test ufuncs of numpy 1.9.2. see: + # http://docs.scipy.org/doc/numpy/reference/ufuncs.html + + # some functions are skipped because it may return different result + # for unicode input depending on numpy version + + # copy and paste from idx fixture as pytest doesn't support + # parameters and fixtures at the same time. + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + index_names = ['first', 'second'] + + idx = MultiIndex( + levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=index_names, + verify_integrity=False + ) + + with pytest.raises(Exception): + with np.errstate(all='ignore'): + func(idx) + + +@pytest.mark.parametrize('func', [ + np.isfinite, np.isinf, np.isnan, np.signbit +]) +def test_numpy_type_funcs(func): + # for func in [np.isfinite, np.isinf, np.isnan, np.signbit]: + # copy and paste from idx fixture as pytest doesn't support + # parameters and fixtures at the same time. + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + index_names = ['first', 'second'] + + idx = MultiIndex( + levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=index_names, + verify_integrity=False + ) + + with pytest.raises(Exception): + func(idx) diff --git a/pandas/tests/indexes/multi/test_astype.py b/pandas/tests/indexes/multi/test_astype.py new file mode 100644 index 0000000000000..e0e23609290e5 --- /dev/null +++ b/pandas/tests/indexes/multi/test_astype.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pandas.util.testing as tm +import pytest +from pandas.util.testing import assert_copy +from pandas.core.dtypes.dtypes import CategoricalDtype + + +def test_astype(idx): + expected = idx.copy() + actual = idx.astype('O') + assert_copy(actual.levels, expected.levels) + assert_copy(actual.labels, expected.labels) + assert [level.name for level in actual.levels] == list(expected.names) + + with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"): + idx.astype(np.dtype(int)) + + +@pytest.mark.parametrize('ordered', [True, False]) +def test_astype_category(idx, ordered): + # GH 18630 + msg = '> 1 ndim Categorical are not supported at this time' + with tm.assert_raises_regex(NotImplementedError, msg): + idx.astype(CategoricalDtype(ordered=ordered)) + + if ordered is False: + # dtype='category' defaults to ordered=False, so only test once + with tm.assert_raises_regex(NotImplementedError, msg): + idx.astype('category') diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py index 9577662bda366..4b8d0553886b2 100644 --- a/pandas/tests/indexes/multi/test_constructor.py +++ b/pandas/tests/indexes/multi/test_constructor.py @@ -234,29 +234,30 @@ def test_from_arrays_empty(): tm.assert_index_equal(result, expected) -def test_from_arrays_invalid_input(): +@pytest.mark.parametrize('invalid_array', [ + (1), + ([1]), + ([1, 2]), + ([[1], 2]), + ('a'), + (['a']), + (['a', 'b']), + ([['a'], 'b']), +]) +def test_from_arrays_invalid_input(invalid_array): invalid_inputs = [1, [1], [1, 2], [[1], 2], 'a', ['a'], ['a', 'b'], [['a'], 'b']] for i in invalid_inputs: pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i) -def test_from_arrays_different_lengths(): +@pytest.mark.parametrize('idx1, idx2', [ + ([1, 2, 3], ['a', 'b']), + ([], ['a', 'b']), + ([1, 2, 3], []) +]) +def test_from_arrays_different_lengths(idx1, idx2): # see gh-13599 - idx1 = [1, 2, 3] - idx2 = ['a', 'b'] - tm.assert_raises_regex(ValueError, '^all arrays must ' - 'be same length$', - MultiIndex.from_arrays, [idx1, idx2]) - - idx1 = [] - idx2 = ['a', 'b'] - tm.assert_raises_regex(ValueError, '^all arrays must ' - 'be same length$', - MultiIndex.from_arrays, [idx1, idx2]) - - idx1 = [1, 2, 3] - idx2 = [] tm.assert_raises_regex(ValueError, '^all arrays must ' 'be same length$', MultiIndex.from_arrays, [idx1, idx2]) @@ -305,66 +306,87 @@ def test_from_tuples_index_values(idx): assert (result.values == idx.values).all() -def test_from_product_empty(): +def test_from_product_empty_zero_levels(): # 0 levels with tm.assert_raises_regex( ValueError, "Must pass non-zero number of levels/labels"): MultiIndex.from_product([]) - # 1 level + +def test_from_product_empty_one_level(): result = MultiIndex.from_product([[]], names=['A']) expected = pd.Index([], name='A') tm.assert_index_equal(result.levels[0], expected) - # 2 levels - l1 = [[], ['foo', 'bar', 'baz'], []] - l2 = [[], [], ['a', 'b', 'c']] + +@pytest.mark.parametrize('first, second', [ + ([], []), + (['foo', 'bar', 'baz'], []), + ([], ['a', 'b', 'c']), +]) +def test_from_product_empty_two_levels(first, second): names = ['A', 'B'] - for first, second in zip(l1, l2): - result = MultiIndex.from_product([first, second], names=names) - expected = MultiIndex(levels=[first, second], - labels=[[], []], names=names) - tm.assert_index_equal(result, expected) + result = MultiIndex.from_product([first, second], names=names) + expected = MultiIndex(levels=[first, second], + labels=[[], []], names=names) + tm.assert_index_equal(result, expected) + +@pytest.mark.parametrize('N', list(range(4))) +def test_from_product_empty_three_levels(N): # GH12258 names = ['A', 'B', 'C'] - for N in range(4): - lvl2 = lrange(N) - result = MultiIndex.from_product([[], lvl2, []], names=names) - expected = MultiIndex(levels=[[], lvl2, []], - labels=[[], [], []], names=names) - tm.assert_index_equal(result, expected) + lvl2 = lrange(N) + result = MultiIndex.from_product([[], lvl2, []], names=names) + expected = MultiIndex(levels=[[], lvl2, []], + labels=[[], [], []], names=names) + tm.assert_index_equal(result, expected) -def test_from_product_invalid_input(): - invalid_inputs = [1, [1], [1, 2], [[1], 2], - 'a', ['a'], ['a', 'b'], [['a'], 'b']] - for i in invalid_inputs: - pytest.raises(TypeError, MultiIndex.from_product, iterables=i) +@pytest.mark.parametrize('invalid_input', [ + 1, + [1], + [1, 2], + [[1], 2], + 'a', + ['a'], + ['a', 'b'], + [['a'], 'b'], +]) +def test_from_product_invalid_input(invalid_input): + pytest.raises(TypeError, MultiIndex.from_product, iterables=invalid_input) def test_from_product_datetimeindex(): dt_index = date_range('2000-01-01', periods=2) mi = pd.MultiIndex.from_product([[1, 2], dt_index]) - etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp( - '2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp( - '2000-01-01')), (2, pd.Timestamp('2000-01-02'))]) + etalon = construct_1d_object_array_from_listlike([ + (1, pd.Timestamp('2000-01-01')), + (1, pd.Timestamp('2000-01-02')), + (2, pd.Timestamp('2000-01-01')), + (2, pd.Timestamp('2000-01-02')), + ]) tm.assert_numpy_array_equal(mi.values, etalon) -def test_from_product_index_series_categorical(): +@pytest.mark.parametrize('ordered', [False, True]) +@pytest.mark.parametrize('f', [ + lambda x: x, + lambda x: pd.Series(x), + lambda x: x.values +]) +def test_from_product_index_series_categorical(ordered, f): # GH13743 first = ['foo', 'bar'] - for ordered in [False, True]: - idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), - ordered=ordered) - expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"), - categories=list("bac"), - ordered=ordered) - for arr in [idx, pd.Series(idx), idx.values]: - result = pd.MultiIndex.from_product([first, arr]) - tm.assert_index_equal(result.get_level_values(1), expected) + idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), + ordered=ordered) + expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"), + categories=list("bac"), + ordered=ordered) + + result = pd.MultiIndex.from_product([first, f(idx)]) + tm.assert_index_equal(result.get_level_values(1), expected) def test_from_product(): @@ -409,19 +431,28 @@ def test_create_index_existing_name(idx): index = idx index.names = ['foo', 'bar'] result = pd.Index(index) - tm.assert_index_equal( - result, Index(Index([('foo', 'one'), ('foo', 'two'), - ('bar', 'one'), ('baz', 'two'), - ('qux', 'one'), ('qux', 'two')], - dtype='object'), - names=['foo', 'bar'])) + expected = Index( + Index([ + ('foo', 'one'), ('foo', 'two'), + ('bar', 'one'), ('baz', 'two'), + ('qux', 'one'), ('qux', 'two')], + dtype='object' + ), + names=['foo', 'bar'] + ) + tm.assert_index_equal(result, expected) result = pd.Index(index, names=['A', 'B']) - tm.assert_index_equal( - result, - Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'), - ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], - dtype='object'), names=['A', 'B'])) + expected = Index( + Index([ + ('foo', 'one'), ('foo', 'two'), + ('bar', 'one'), ('baz', 'two'), + ('qux', 'one'), ('qux', 'two')], + dtype='object' + ), + names=['A', 'B'] + ) + tm.assert_index_equal(result, expected) def test_tuples_with_name_string(): diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py index aaed4467816da..7b91a1d14d7e8 100644 --- a/pandas/tests/indexes/multi/test_contains.py +++ b/pandas/tests/indexes/multi/test_contains.py @@ -43,8 +43,10 @@ def test_isin_nan_pypy(): def test_isin(): values = [('foo', 2), ('bar', 3), ('quux', 4)] - idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange( - 4)]) + idx = MultiIndex.from_arrays([ + ['qux', 'baz', 'foo', 'bar'], + np.arange(4) + ]) result = idx.isin(values) expected = np.array([False, False, True, True]) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py index 282f2fa84efe0..f6c5c0c5eb346 100644 --- a/pandas/tests/indexes/multi/test_copy.py +++ b/pandas/tests/indexes/multi/test_copy.py @@ -3,8 +3,8 @@ from copy import copy, deepcopy import pandas.util.testing as tm -from pandas import (CategoricalIndex, IntervalIndex, MultiIndex, PeriodIndex, - RangeIndex, Series, compat) +import pytest +from pandas import MultiIndex def assert_multiindex_copied(copy, original): @@ -41,84 +41,46 @@ def test_view(idx): assert_multiindex_copied(i_view, idx) -def test_copy_name(idx): - # gh-12309: Check that the "name" argument - # passed at initialization is honored. - - # TODO: Remove or refactor MultiIndex not tested. - for name, index in compat.iteritems({'idx': idx}): - if isinstance(index, MultiIndex): - continue - - first = index.__class__(index, copy=True, name='mario') - second = first.__class__(first, copy=False) - - # Even though "copy=False", we want a new object. - assert first is not second - - # Not using tm.assert_index_equal() since names differ. - assert index.equals(first) - - assert first.name == 'mario' - assert second.name == 'mario' - - s1 = Series(2, index=first) - s2 = Series(3, index=second[:-1]) - - if not isinstance(index, CategoricalIndex): - # See gh-13365 - s3 = s1 * s2 - assert s3.index.name == 'mario' - - -def test_ensure_copied_data(idx): - # Check the "copy" argument of each Index.__new__ is honoured - # GH12309 - # TODO: REMOVE THIS TEST. MultiIndex is tested seperately as noted below. - - for name, index in compat.iteritems({'idx': idx}): - init_kwargs = {} - if isinstance(index, PeriodIndex): - # Needs "freq" specification: - init_kwargs['freq'] = index.freq - elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)): - # RangeIndex cannot be initialized from data - # MultiIndex and CategoricalIndex are tested separately - continue - - index_type = index.__class__ - result = index_type(index.values, copy=True, **init_kwargs) - tm.assert_index_equal(index, result) - tm.assert_numpy_array_equal(index.values, result.values, - check_same='copy') - - if isinstance(index, PeriodIndex): - # .values an object array of Period, thus copied - result = index_type(ordinal=index.asi8, copy=False, - **init_kwargs) - tm.assert_numpy_array_equal(index._ndarray_values, - result._ndarray_values, - check_same='same') - elif isinstance(index, IntervalIndex): - # checked in test_interval.py - pass - else: - result = index_type(index.values, copy=False, **init_kwargs) - tm.assert_numpy_array_equal(index.values, result.values, - check_same='same') - tm.assert_numpy_array_equal(index._ndarray_values, - result._ndarray_values, - check_same='same') - - -def test_copy_and_deepcopy(indices): - - if isinstance(indices, MultiIndex): - return - for func in (copy, deepcopy): - idx_copy = func(indices) - assert idx_copy is not indices - assert idx_copy.equals(indices) - - new_copy = indices.copy(deep=True, name="banana") - assert new_copy.name == "banana" +@pytest.mark.parametrize('func', [copy, deepcopy]) +def test_copy_and_deepcopy(func): + + idx = MultiIndex( + levels=[['foo', 'bar'], ['fizz', 'buzz']], + labels=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=['first', 'second'] + ) + idx_copy = func(idx) + assert idx_copy is not idx + assert idx_copy.equals(idx) + + +@pytest.mark.parametrize('deep', [True, False]) +def test_copy_method(deep): + idx = MultiIndex( + levels=[['foo', 'bar'], ['fizz', 'buzz']], + labels=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=['first', 'second'] + ) + idx_copy = idx.copy(deep=deep) + assert idx_copy.equals(idx) + + +@pytest.mark.parametrize('deep', [True, False]) +@pytest.mark.parametrize('kwarg, value', [ + ('names', ['thrid', 'fourth']), + ('levels', [['foo2', 'bar2'], ['fizz2', 'buzz2']]), + ('labels', [[1, 0, 0, 0], [1, 1, 0, 0]]) +]) +def test_copy_method_kwargs(deep, kwarg, value): + # gh-12309: Check that the "name" argument as well other kwargs are honored + idx = MultiIndex( + levels=[['foo', 'bar'], ['fizz', 'buzz']], + labels=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=['first', 'second'] + ) + + idx_copy = idx.copy(**{kwarg: value, 'deep': deep}) + if kwarg == 'names': + assert getattr(idx_copy, kwarg) == value + else: + assert list(list(i) for i in getattr(idx_copy, kwarg)) == value diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py index 0bebe3165e2e8..7770ee96bbfb3 100644 --- a/pandas/tests/indexes/multi/test_equivalence.py +++ b/pandas/tests/indexes/multi/test_equivalence.py @@ -4,29 +4,25 @@ import numpy as np import pandas as pd import pandas.util.testing as tm -from pandas import Index, MultiIndex, RangeIndex, Series, compat +from pandas import Index, MultiIndex, Series from pandas.compat import lrange, lzip, range def test_equals(idx): - # TODO: Remove or Refactor. MultiIndex not tested. - for name, idx in compat.iteritems({'idx': idx}): - assert idx.equals(idx) - assert idx.equals(idx.copy()) - assert idx.equals(idx.astype(object)) - - assert not idx.equals(list(idx)) - assert not idx.equals(np.array(idx)) - - # Cannot pass in non-int64 dtype to RangeIndex - if not isinstance(idx, RangeIndex): - same_values = Index(idx, dtype=object) - assert idx.equals(same_values) - assert same_values.equals(idx) - - if idx.nlevels == 1: - # do not test MultiIndex - assert not idx.equals(pd.Series(idx)) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + + assert not idx.equals(list(idx)) + assert not idx.equals(np.array(idx)) + + same_values = Index(idx, dtype=object) + assert idx.equals(same_values) + assert same_values.equals(idx) + + if idx.nlevels == 1: + # do not test MultiIndex + assert not idx.equals(pd.Series(idx)) def test_equals_op(idx): diff --git a/pandas/tests/indexes/multi/test_format.py b/pandas/tests/indexes/multi/test_format.py index 21e8a199cadd9..63936a74b6b8c 100644 --- a/pandas/tests/indexes/multi/test_format.py +++ b/pandas/tests/indexes/multi/test_format.py @@ -100,11 +100,6 @@ def test_repr_roundtrip(): tm.assert_index_equal(result, mi_u, exact=True) -def test_str(): - # tested elsewhere - pass - - def test_unicode_string_with_unicode(): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} idx = pd.DataFrame(d).set_index(["a", "b"]).index diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py index 56fd4c04cb96e..30be5b546f7c7 100644 --- a/pandas/tests/indexes/multi/test_get_set.py +++ b/pandas/tests/indexes/multi/test_get_set.py @@ -9,6 +9,16 @@ from pandas.compat import range +def assert_matching(actual, expected, check_dtype=False): + # avoid specifying internal representation + # as much as possible + assert len(actual) == len(expected) + for act, exp in zip(actual, expected): + act = np.asarray(act) + exp = np.asarray(exp) + tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype) + + def test_get_level_number_integer(idx): idx.names = [1, 0] assert idx._get_level_number(1) == 0 @@ -164,15 +174,6 @@ def test_set_levels(idx): levels = idx.levels new_levels = [[lev + 'a' for lev in level] for level in levels] - def assert_matching(actual, expected, check_dtype=False): - # avoid specifying internal representation - # as much as possible - assert len(actual) == len(expected) - for act, exp in zip(actual, expected): - act = np.asarray(act) - exp = np.asarray(exp) - tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype) - # level changing [w/o mutation] ind2 = idx.set_levels(new_levels) assert_matching(ind2.levels, new_levels) @@ -254,15 +255,6 @@ def test_set_labels(idx): minor_labels = [(x + 1) % 1 for x in minor_labels] new_labels = [major_labels, minor_labels] - def assert_matching(actual, expected): - # avoid specifying internal representation - # as much as possible - assert len(actual) == len(expected) - for act, exp in zip(actual, expected): - act = np.asarray(act) - exp = np.asarray(exp, dtype=np.int8) - tm.assert_numpy_array_equal(act, exp) - # label changing [w/o mutation] ind2 = idx.set_labels(new_labels) assert_matching(ind2.labels, new_labels) @@ -389,21 +381,22 @@ def test_set_names_with_nlevel_1(inplace): tm.assert_index_equal(result, expected) -def test_set_levels_categorical(): +@pytest.mark.parametrize('ordered', [True, False]) +def test_set_levels_categorical(ordered): # GH13854 index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]]) - for ordered in [False, True]: - cidx = CategoricalIndex(list("bac"), ordered=ordered) - result = index.set_levels(cidx, 0) - expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], - labels=index.labels) - tm.assert_index_equal(result, expected) - - result_lvl = result.get_level_values(0) - expected_lvl = CategoricalIndex(list("bacb"), - categories=cidx.categories, - ordered=cidx.ordered) - tm.assert_index_equal(result_lvl, expected_lvl) + + cidx = CategoricalIndex(list("bac"), ordered=ordered) + result = index.set_levels(cidx, 0) + expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], + labels=index.labels) + tm.assert_index_equal(result, expected) + + result_lvl = result.get_level_values(0) + expected_lvl = CategoricalIndex(list("bacb"), + categories=cidx.categories, + ordered=cidx.ordered) + tm.assert_index_equal(result_lvl, expected_lvl) def test_set_value_keeps_names(): diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 0b528541e5eb6..ebd50909bae98 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -109,31 +109,6 @@ def test_slice_locs_not_contained(): assert result == (0, len(index)) -def test_insert_base(idx): - - result = idx[1:4] - - # test 0th element - assert idx[0:4].equals(result.insert(0, idx[0])) - - -def test_delete_base(idx): - - expected = idx[1:] - result = idx.delete(0) - assert result.equals(expected) - assert result.name == expected.name - - expected = idx[:-1] - result = idx.delete(-1) - assert result.equals(expected) - assert result.name == expected.name - - with pytest.raises((IndexError, ValueError)): - # either depending on numpy version - result = idx.delete(len(idx)) - - def test_putmask_with_wrong_mask(idx): # GH18368 diff --git a/pandas/tests/indexes/multi/test_join.py b/pandas/tests/indexes/multi/test_join.py index 4a386c6e8dbe4..ac3958956bae7 100644 --- a/pandas/tests/indexes/multi/test_join.py +++ b/pandas/tests/indexes/multi/test_join.py @@ -8,10 +8,11 @@ from pandas import Index, MultiIndex -@pytest.mark.parametrize('other', - [Index(['three', 'one', 'two']), - Index(['one']), - Index(['one', 'three'])]) +@pytest.mark.parametrize('other', [ + Index(['three', 'one', 'two']), + Index(['one']), + Index(['one', 'three']), +]) def test_join_level(idx, other, join_type): join_index, lidx, ridx = other.join(idx, how=join_type, level='second', diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py index 01465ea4c2f3b..79fcff965e725 100644 --- a/pandas/tests/indexes/multi/test_missing.py +++ b/pandas/tests/indexes/multi/test_missing.py @@ -4,7 +4,7 @@ import pandas as pd import pandas.util.testing as tm import pytest -from pandas import Int64Index, MultiIndex, PeriodIndex, UInt64Index, isna +from pandas import Int64Index, MultiIndex, PeriodIndex, UInt64Index from pandas._libs.tslib import iNaT from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin @@ -78,27 +78,9 @@ def test_nulls(idx): # this is really a smoke test for the methods # as these are adequately tested for function elsewhere - # TODO: Remove or Refactor. MultiIndex not Implemeted. - for name, index in [('idx', idx), ]: - if len(index) == 0: - tm.assert_numpy_array_equal( - index.isna(), np.array([], dtype=bool)) - elif isinstance(index, MultiIndex): - idx = index.copy() - msg = "isna is not defined for MultiIndex" - with tm.assert_raises_regex(NotImplementedError, msg): - idx.isna() - else: - - if not index.hasnans: - tm.assert_numpy_array_equal( - index.isna(), np.zeros(len(index), dtype=bool)) - tm.assert_numpy_array_equal( - index.notna(), np.ones(len(index), dtype=bool)) - else: - result = isna(index) - tm.assert_numpy_array_equal(index.isna(), result) - tm.assert_numpy_array_equal(index.notna(), ~result) + msg = "isna is not defined for MultiIndex" + with tm.assert_raises_regex(NotImplementedError, msg): + idx.isna() @pytest.mark.xfail diff --git a/pandas/tests/indexes/multi/test_operations.py b/pandas/tests/indexes/multi/test_operations.py deleted file mode 100644 index d38cb28039595..0000000000000 --- a/pandas/tests/indexes/multi/test_operations.py +++ /dev/null @@ -1,448 +0,0 @@ -# -*- coding: utf-8 -*- - -import numpy as np -import pandas as pd -import pandas.util.testing as tm -import pytest -from pandas import (DatetimeIndex, Float64Index, Index, Int64Index, MultiIndex, - PeriodIndex, TimedeltaIndex, UInt64Index, date_range, - period_range) -from pandas.compat import lrange, range -from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin -from pandas.util.testing import assert_copy - - -def check_level_names(index, names): - assert [level.name for level in index.levels] == list(names) - - -def test_insert(idx): - # key contained in all levels - new_index = idx.insert(0, ('bar', 'two')) - assert new_index.equal_levels(idx) - assert new_index[0] == ('bar', 'two') - - # key not contained in all levels - new_index = idx.insert(0, ('abc', 'three')) - - exp0 = Index(list(idx.levels[0]) + ['abc'], name='first') - tm.assert_index_equal(new_index.levels[0], exp0) - - exp1 = Index(list(idx.levels[1]) + ['three'], name='second') - tm.assert_index_equal(new_index.levels[1], exp1) - assert new_index[0] == ('abc', 'three') - - # key wrong length - msg = "Item must have length equal to number of levels" - with tm.assert_raises_regex(ValueError, msg): - idx.insert(0, ('foo2',)) - - left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]], - columns=['1st', '2nd', '3rd']) - left.set_index(['1st', '2nd'], inplace=True) - ts = left['3rd'].copy(deep=True) - - left.loc[('b', 'x'), '3rd'] = 2 - left.loc[('b', 'a'), '3rd'] = -1 - left.loc[('b', 'b'), '3rd'] = 3 - left.loc[('a', 'x'), '3rd'] = 4 - left.loc[('a', 'w'), '3rd'] = 5 - left.loc[('a', 'a'), '3rd'] = 6 - - ts.loc[('b', 'x')] = 2 - ts.loc['b', 'a'] = -1 - ts.loc[('b', 'b')] = 3 - ts.loc['a', 'x'] = 4 - ts.loc[('a', 'w')] = 5 - ts.loc['a', 'a'] = 6 - - right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2], - ['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4], - ['a', 'w', 5], ['a', 'a', 6]], - columns=['1st', '2nd', '3rd']) - right.set_index(['1st', '2nd'], inplace=True) - # FIXME data types changes to float because - # of intermediate nan insertion; - tm.assert_frame_equal(left, right, check_dtype=False) - tm.assert_series_equal(ts, right['3rd']) - - # GH9250 - idx = [('test1', i) for i in range(5)] + \ - [('test2', i) for i in range(6)] + \ - [('test', 17), ('test', 18)] - - left = pd.Series(np.linspace(0, 10, 11), - pd.MultiIndex.from_tuples(idx[:-2])) - - left.loc[('test', 17)] = 11 - left.loc[('test', 18)] = 12 - - right = pd.Series(np.linspace(0, 12, 13), - pd.MultiIndex.from_tuples(idx)) - - tm.assert_series_equal(left, right) - - -def test_bounds(idx): - idx._bounds - - -def test_append(idx): - result = idx[:3].append(idx[3:]) - assert result.equals(idx) - - foos = [idx[:1], idx[1:3], idx[3:]] - result = foos[0].append(foos[1:]) - assert result.equals(idx) - - # empty - result = idx.append([]) - assert result.equals(idx) - - -def test_groupby(idx): - groups = idx.groupby(np.array([1, 1, 1, 2, 2, 2])) - labels = idx.get_values().tolist() - exp = {1: labels[:3], 2: labels[3:]} - tm.assert_dict_equal(groups, exp) - - # GH5620 - groups = idx.groupby(idx) - exp = {key: [key] for key in idx} - tm.assert_dict_equal(groups, exp) - - -def test_truncate(): - major_axis = Index(lrange(4)) - minor_axis = Index(lrange(2)) - - major_labels = np.array([0, 0, 1, 2, 3, 3]) - minor_labels = np.array([0, 1, 0, 1, 0, 1]) - - index = MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels]) - - result = index.truncate(before=1) - assert 'foo' not in result.levels[0] - assert 1 in result.levels[0] - - result = index.truncate(after=1) - assert 2 not in result.levels[0] - assert 1 in result.levels[0] - - result = index.truncate(before=1, after=2) - assert len(result.levels[0]) == 2 - - # after < before - pytest.raises(ValueError, index.truncate, 3, 1) - - -def test_where(): - i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - - def f(): - i.where(True) - - pytest.raises(NotImplementedError, f) - - -def test_where_array_like(): - i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - klasses = [list, tuple, np.array, pd.Series] - cond = [False, True] - - for klass in klasses: - def f(): - return i.where(klass(cond)) - pytest.raises(NotImplementedError, f) - - -def test_reorder_levels(idx): - # this blows up - tm.assert_raises_regex(IndexError, '^Too many levels', - idx.reorder_levels, [2, 1, 0]) - - -def test_astype(idx): - expected = idx.copy() - actual = idx.astype('O') - assert_copy(actual.levels, expected.levels) - assert_copy(actual.labels, expected.labels) - check_level_names(actual, expected.names) - - with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"): - idx.astype(np.dtype(int)) - - -@pytest.mark.parametrize('ordered', [True, False]) -def test_astype_category(idx, ordered): - # GH 18630 - msg = '> 1 ndim Categorical are not supported at this time' - with tm.assert_raises_regex(NotImplementedError, msg): - idx.astype(CategoricalDtype(ordered=ordered)) - - if ordered is False: - # dtype='category' defaults to ordered=False, so only test once - with tm.assert_raises_regex(NotImplementedError, msg): - idx.astype('category') - - -def test_repeat(): - reps = 2 - numbers = [1, 2, 3] - names = np.array(['foo', 'bar']) - - m = MultiIndex.from_product([ - numbers, names], names=names) - expected = MultiIndex.from_product([ - numbers, names.repeat(reps)], names=names) - tm.assert_index_equal(m.repeat(reps), expected) - - with tm.assert_produces_warning(FutureWarning): - result = m.repeat(n=reps) - tm.assert_index_equal(result, expected) - - -def test_numpy_repeat(): - reps = 2 - numbers = [1, 2, 3] - names = np.array(['foo', 'bar']) - - m = MultiIndex.from_product([ - numbers, names], names=names) - expected = MultiIndex.from_product([ - numbers, names.repeat(reps)], names=names) - tm.assert_index_equal(np.repeat(m, reps), expected) - - msg = "the 'axis' parameter is not supported" - tm.assert_raises_regex( - ValueError, msg, np.repeat, m, reps, axis=1) - - -def test_append_mixed_dtypes(): - # GH 13660 - dti = date_range('2011-01-01', freq='M', periods=3, ) - dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern') - pi = period_range('2011-01', freq='M', periods=3) - - mi = MultiIndex.from_arrays([[1, 2, 3], - [1.1, np.nan, 3.3], - ['a', 'b', 'c'], - dti, dti_tz, pi]) - assert mi.nlevels == 6 - - res = mi.append(mi) - exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3], - [1.1, np.nan, 3.3, 1.1, np.nan, 3.3], - ['a', 'b', 'c', 'a', 'b', 'c'], - dti.append(dti), - dti_tz.append(dti_tz), - pi.append(pi)]) - tm.assert_index_equal(res, exp) - - other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'], - ['x', 'y', 'z'], ['x', 'y', 'z'], - ['x', 'y', 'z'], ['x', 'y', 'z']]) - - res = mi.append(other) - exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'], - [1.1, np.nan, 3.3, 'x', 'y', 'z'], - ['a', 'b', 'c', 'x', 'y', 'z'], - dti.append(pd.Index(['x', 'y', 'z'])), - dti_tz.append(pd.Index(['x', 'y', 'z'])), - pi.append(pd.Index(['x', 'y', 'z']))]) - tm.assert_index_equal(res, exp) - - -def test_take(idx): - indexer = [4, 3, 0, 2] - result = idx.take(indexer) - expected = idx[indexer] - assert result.equals(expected) - - if not isinstance(idx, - (DatetimeIndex, PeriodIndex, TimedeltaIndex)): - # GH 10791 - with pytest.raises(AttributeError): - idx.freq - - -def test_take_invalid_kwargs(idx): - idx = idx - indices = [1, 2] - - msg = r"take\(\) got an unexpected keyword argument 'foo'" - tm.assert_raises_regex(TypeError, msg, idx.take, - indices, foo=2) - - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, out=indices) - - msg = "the 'mode' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, mode='clip') - - -def test_take_fill_value(): - # GH 12631 - vals = [['A', 'B'], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] - idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) - - result = idx.take(np.array([1, 0, -1])) - exp_vals = [('A', pd.Timestamp('2011-01-02')), - ('A', pd.Timestamp('2011-01-01')), - ('B', pd.Timestamp('2011-01-02'))] - expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - exp_vals = [('A', pd.Timestamp('2011-01-02')), - ('A', pd.Timestamp('2011-01-01')), - (np.nan, pd.NaT)] - expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, - fill_value=True) - exp_vals = [('A', pd.Timestamp('2011-01-02')), - ('A', pd.Timestamp('2011-01-01')), - ('B', pd.Timestamp('2011-01-02'))] - expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) - tm.assert_index_equal(result, expected) - - msg = ('When allow_fill=True and fill_value is not None, ' - 'all indices must be >= -1') - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - with pytest.raises(IndexError): - idx.take(np.array([1, -5])) - - -def test_iter(idx): - result = list(idx) - expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'), - ('baz', 'two'), ('qux', 'one'), ('qux', 'two')] - assert result == expected - - -def test_sub(idx): - - first = idx - - # - now raises (previously was set op difference) - with pytest.raises(TypeError): - first - idx[-3:] - with pytest.raises(TypeError): - idx[-3:] - first - with pytest.raises(TypeError): - idx[-3:] - first.tolist() - with pytest.raises(TypeError): - first.tolist() - idx[-3:] - - -def test_argsort(idx): - result = idx.argsort() - expected = idx.values.argsort() - tm.assert_numpy_array_equal(result, expected) - - -def test_map(idx): - # callable - index = idx - - # we don't infer UInt64 - if isinstance(index, pd.UInt64Index): - expected = index.astype('int64') - else: - expected = index - - result = index.map(lambda x: x) - tm.assert_index_equal(result, expected) - - -@pytest.mark.parametrize( - "mapper", - [ - lambda values, idx: {i: e for e, i in zip(values, idx)}, - lambda values, idx: pd.Series(values, idx)]) -def test_map_dictlike(idx, mapper): - - if isinstance(idx, (pd.CategoricalIndex, pd.IntervalIndex)): - pytest.skip("skipping tests for {}".format(type(idx))) - - identity = mapper(idx.values, idx) - - # we don't infer to UInt64 for a dict - if isinstance(idx, pd.UInt64Index) and isinstance(identity, dict): - expected = idx.astype('int64') - else: - expected = idx - - result = idx.map(identity) - tm.assert_index_equal(result, expected) - - # empty mappable - expected = pd.Index([np.nan] * len(idx)) - result = idx.map(mapper(expected, idx)) - tm.assert_index_equal(result, expected) - - -def test_numpy_ufuncs(idx): - # test ufuncs of numpy 1.9.2. see: - # http://docs.scipy.org/doc/numpy/reference/ufuncs.html - - # some functions are skipped because it may return different result - # for unicode input depending on numpy version - - for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10, - np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin, - np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, - np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, - np.rad2deg]: - if isinstance(idx, DatetimeIndexOpsMixin): - # raise TypeError or ValueError (PeriodIndex) - # PeriodIndex behavior should be changed in future version - with pytest.raises(Exception): - with np.errstate(all='ignore'): - func(idx) - elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)): - # coerces to float (e.g. np.sin) - with np.errstate(all='ignore'): - result = func(idx) - exp = Index(func(idx.values), name=idx.name) - - tm.assert_index_equal(result, exp) - assert isinstance(result, pd.Float64Index) - else: - # raise AttributeError or TypeError - if len(idx) == 0: - continue - else: - with pytest.raises(Exception): - with np.errstate(all='ignore'): - func(idx) - - for func in [np.isfinite, np.isinf, np.isnan, np.signbit]: - if isinstance(idx, DatetimeIndexOpsMixin): - # raise TypeError or ValueError (PeriodIndex) - with pytest.raises(Exception): - func(idx) - elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)): - # Results in bool array - result = func(idx) - assert isinstance(result, np.ndarray) - assert not isinstance(result, Index) - else: - if len(idx) == 0: - continue - else: - with pytest.raises(Exception): - func(idx) diff --git a/pandas/tests/indexes/multi/test_reshape.py b/pandas/tests/indexes/multi/test_reshape.py new file mode 100644 index 0000000000000..85eec6a232180 --- /dev/null +++ b/pandas/tests/indexes/multi/test_reshape.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- + + +import numpy as np +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas import Index, MultiIndex + + +def test_insert(idx): + # key contained in all levels + new_index = idx.insert(0, ('bar', 'two')) + assert new_index.equal_levels(idx) + assert new_index[0] == ('bar', 'two') + + # key not contained in all levels + new_index = idx.insert(0, ('abc', 'three')) + + exp0 = Index(list(idx.levels[0]) + ['abc'], name='first') + tm.assert_index_equal(new_index.levels[0], exp0) + + exp1 = Index(list(idx.levels[1]) + ['three'], name='second') + tm.assert_index_equal(new_index.levels[1], exp1) + assert new_index[0] == ('abc', 'three') + + # key wrong length + msg = "Item must have length equal to number of levels" + with tm.assert_raises_regex(ValueError, msg): + idx.insert(0, ('foo2',)) + + left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]], + columns=['1st', '2nd', '3rd']) + left.set_index(['1st', '2nd'], inplace=True) + ts = left['3rd'].copy(deep=True) + + left.loc[('b', 'x'), '3rd'] = 2 + left.loc[('b', 'a'), '3rd'] = -1 + left.loc[('b', 'b'), '3rd'] = 3 + left.loc[('a', 'x'), '3rd'] = 4 + left.loc[('a', 'w'), '3rd'] = 5 + left.loc[('a', 'a'), '3rd'] = 6 + + ts.loc[('b', 'x')] = 2 + ts.loc['b', 'a'] = -1 + ts.loc[('b', 'b')] = 3 + ts.loc['a', 'x'] = 4 + ts.loc[('a', 'w')] = 5 + ts.loc['a', 'a'] = 6 + + right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2], + ['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4], + ['a', 'w', 5], ['a', 'a', 6]], + columns=['1st', '2nd', '3rd']) + right.set_index(['1st', '2nd'], inplace=True) + # FIXME data types changes to float because + # of intermediate nan insertion; + tm.assert_frame_equal(left, right, check_dtype=False) + tm.assert_series_equal(ts, right['3rd']) + + # GH9250 + idx = [('test1', i) for i in range(5)] + \ + [('test2', i) for i in range(6)] + \ + [('test', 17), ('test', 18)] + + left = pd.Series(np.linspace(0, 10, 11), + pd.MultiIndex.from_tuples(idx[:-2])) + + left.loc[('test', 17)] = 11 + left.loc[('test', 18)] = 12 + + right = pd.Series(np.linspace(0, 12, 13), + pd.MultiIndex.from_tuples(idx)) + + tm.assert_series_equal(left, right) + + +def test_append(idx): + result = idx[:3].append(idx[3:]) + assert result.equals(idx) + + foos = [idx[:1], idx[1:3], idx[3:]] + result = foos[0].append(foos[1:]) + assert result.equals(idx) + + # empty + result = idx.append([]) + assert result.equals(idx) + + +def test_repeat(): + reps = 2 + numbers = [1, 2, 3] + names = np.array(['foo', 'bar']) + + m = MultiIndex.from_product([ + numbers, names], names=names) + expected = MultiIndex.from_product([ + numbers, names.repeat(reps)], names=names) + tm.assert_index_equal(m.repeat(reps), expected) + + with tm.assert_produces_warning(FutureWarning): + result = m.repeat(n=reps) + tm.assert_index_equal(result, expected) + + +def test_insert_base(idx): + + result = idx[1:4] + + # test 0th element + assert idx[0:4].equals(result.insert(0, idx[0])) + + +def test_delete_base(idx): + + expected = idx[1:] + result = idx.delete(0) + assert result.equals(expected) + assert result.name == expected.name + + expected = idx[:-1] + result = idx.delete(-1) + assert result.equals(expected) + assert result.name == expected.name + + with pytest.raises((IndexError, ValueError)): + # either depending on numpy version + result = idx.delete(len(idx)) diff --git a/pandas/tests/indexes/multi/test_set_ops.py b/pandas/tests/indexes/multi/test_set_ops.py index 79a3837aac7f8..3f61cf2b6ff3f 100644 --- a/pandas/tests/indexes/multi/test_set_ops.py +++ b/pandas/tests/indexes/multi/test_set_ops.py @@ -1,11 +1,9 @@ # -*- coding: utf-8 -*- - import numpy as np import pandas as pd import pandas.util.testing as tm -from pandas import (CategoricalIndex, DatetimeIndex, MultiIndex, PeriodIndex, - Series, TimedeltaIndex) +from pandas import MultiIndex, Series def test_setops_errorcases(idx): @@ -27,29 +25,18 @@ def test_intersection_base(idx): second = idx[:3] intersect = first.intersection(second) - if isinstance(idx, CategoricalIndex): - pass - else: - assert tm.equalContents(intersect, second) + assert tm.equalContents(intersect, second) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - if isinstance(idx, PeriodIndex): - msg = "can only call with other PeriodIndex-ed objects" - with tm.assert_raises_regex(ValueError, msg): - result = first.intersection(case) - elif isinstance(idx, CategoricalIndex): - pass - else: - result = first.intersection(case) - assert tm.equalContents(result, second) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with tm.assert_raises_regex(TypeError, msg): - result = first.intersection([1, 2, 3]) + result = first.intersection(case) + assert tm.equalContents(result, second) + + msg = "other must be a MultiIndex or a list of tuples" + with tm.assert_raises_regex(TypeError, msg): + result = first.intersection([1, 2, 3]) def test_union_base(idx): @@ -63,20 +50,12 @@ def test_union_base(idx): cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - if isinstance(idx, PeriodIndex): - msg = "can only call with other PeriodIndex-ed objects" - with tm.assert_raises_regex(ValueError, msg): - result = first.union(case) - elif isinstance(idx, CategoricalIndex): - pass - else: - result = first.union(case) - assert tm.equalContents(result, everything) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with tm.assert_raises_regex(TypeError, msg): - result = first.union([1, 2, 3]) + result = first.union(case) + assert tm.equalContents(result, everything) + + msg = "other must be a MultiIndex or a list of tuples" + with tm.assert_raises_regex(TypeError, msg): + result = first.union([1, 2, 3]) def test_difference_base(idx): @@ -85,63 +64,37 @@ def test_difference_base(idx): answer = idx[4:] result = first.difference(second) - if isinstance(idx, CategoricalIndex): - pass - else: - assert tm.equalContents(result, answer) + assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - if isinstance(idx, PeriodIndex): - msg = "can only call with other PeriodIndex-ed objects" - with tm.assert_raises_regex(ValueError, msg): - result = first.difference(case) - elif isinstance(idx, CategoricalIndex): - pass - elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)): - assert result.__class__ == answer.__class__ - tm.assert_numpy_array_equal(result.sort_values().asi8, - answer.sort_values().asi8) - else: - result = first.difference(case) - assert tm.equalContents(result, answer) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with tm.assert_raises_regex(TypeError, msg): - result = first.difference([1, 2, 3]) + result = first.difference(case) + assert tm.equalContents(result, answer) + + msg = "other must be a MultiIndex or a list of tuples" + with tm.assert_raises_regex(TypeError, msg): + result = first.difference([1, 2, 3]) def test_symmetric_difference(idx): first = idx[1:] second = idx[:-1] - if isinstance(idx, CategoricalIndex): - pass - else: - answer = idx[[0, -1]] - result = first.symmetric_difference(second) - assert tm.equalContents(result, answer) + answer = idx[[0, -1]] + result = first.symmetric_difference(second) + assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - if isinstance(idx, PeriodIndex): - msg = "can only call with other PeriodIndex-ed objects" - with tm.assert_raises_regex(ValueError, msg): - result = first.symmetric_difference(case) - elif isinstance(idx, CategoricalIndex): - pass - else: - result = first.symmetric_difference(case) - assert tm.equalContents(result, answer) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with tm.assert_raises_regex(TypeError, msg): - first.symmetric_difference([1, 2, 3]) + result = first.symmetric_difference(case) + assert tm.equalContents(result, answer) + + msg = "other must be a MultiIndex or a list of tuples" + with tm.assert_raises_regex(TypeError, msg): + first.symmetric_difference([1, 2, 3]) def test_empty(idx): diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py index d6165c17c6717..ee29ea1be8aea 100644 --- a/pandas/tests/indexes/multi/test_sorting.py +++ b/pandas/tests/indexes/multi/test_sorting.py @@ -215,7 +215,8 @@ def test_reconstruct_remove_unused(): @pytest.mark.parametrize('first_type,second_type', [ ('int64', 'int64'), - ('datetime64[D]', 'str')]) + ('datetime64[D]', 'str') +]) def test_remove_unused_levels_large(first_type, second_type): # GH16556 @@ -254,3 +255,9 @@ def test_remove_unused_nan(level0, level1): tm.assert_index_equal(result, mi) for level in 0, 1: assert('unused' not in result.levels[level]) + + +def test_argsort(idx): + result = idx.argsort() + expected = idx.values.argsort() + tm.assert_numpy_array_equal(result, expected)
- [x] closes #21918 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry - N/A Addresses most of the issues. Left blowing away the old pickles for another issue which is still open.
https://api.github.com/repos/pandas-dev/pandas/pulls/21928
2018-07-16T03:25:41Z
2018-07-24T22:01:57Z
2018-07-24T22:01:57Z
2018-07-24T22:02:05Z
Docstrings, de-duplicate EAMixin/DatetimeLikeIndex __new__ code
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index fe4e461b0bd4f..aadfbdd4303c4 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -634,6 +634,7 @@ class ExtensionOpsMixin(object): """ A base class for linking the operators to their dunder names """ + @classmethod def _add_arithmetic_ops(cls): cls.__add__ = cls._create_arithmetic_method(operator.add) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ec430e4bf17b1..7bb1c45998eb2 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -5,6 +5,7 @@ import numpy as np from pandas._libs import lib, iNaT, NaT +from pandas._libs.tslibs import timezones from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds, Timedelta from pandas._libs.tslibs.period import ( DIFFERENT_FREQ_INDEX, IncompatibleFrequency) @@ -13,7 +14,7 @@ from pandas import compat from pandas.tseries import frequencies -from pandas.tseries.offsets import Tick +from pandas.tseries.offsets import Tick, DateOffset from pandas.core.dtypes.common import ( needs_i8_conversion, @@ -23,10 +24,13 @@ is_timedelta64_dtype, is_object_dtype) from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame, ABCIndexClass +from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas.core.common as com from pandas.core.algorithms import checked_add_with_arr +from .base import ExtensionOpsMixin + def _make_comparison_op(op, cls): # TODO: share code with indexes.base version? Main difference is that @@ -87,7 +91,7 @@ def _shallow_copy(self, values=None, **kwargs): return self._simple_new(values, **attributes) -class DatetimeLikeArrayMixin(AttributesMixin): +class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin): """ Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray @@ -464,7 +468,10 @@ def _addsub_offset_array(self, other, op): "{cls} not vectorized" .format(cls=type(self).__name__), PerformanceWarning) - res_values = op(self.astype('O').values, np.array(other)) + # For EA self.astype('O') returns a numpy array, not an Index + left = lib.values_from_object(self.astype('O')) + + res_values = op(left, np.array(other)) kwargs = {} if not is_period_dtype(self): kwargs['freq'] = 'infer' @@ -551,3 +558,96 @@ def validate_periods(periods): raise TypeError('periods must be a number, got {periods}' .format(periods=periods)) return periods + + +def validate_endpoints(closed): + """ + Check that the `closed` argument is among [None, "left", "right"] + + Parameters + ---------- + closed : {None, "left", "right"} + + Returns + ------- + left_closed : bool + right_closed : bool + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_closed = False + right_closed = False + + if closed is None: + left_closed = True + right_closed = True + elif closed == "left": + left_closed = True + elif closed == "right": + right_closed = True + else: + raise ValueError("Closed has to be either 'left', 'right' or None") + + return left_closed, right_closed + + +def maybe_infer_freq(freq): + """ + Comparing a DateOffset to the string "infer" raises, so we need to + be careful about comparisons. Make a dummy variable `freq_infer` to + signify the case where the given freq is "infer" and set freq to None + to avoid comparison trouble later on. + + Parameters + ---------- + freq : {DateOffset, None, str} + + Returns + ------- + freq : {DateOffset, None} + freq_infer : bool + """ + freq_infer = False + if not isinstance(freq, DateOffset): + # if a passed freq is None, don't infer automatically + if freq != 'infer': + freq = frequencies.to_offset(freq) + else: + freq_infer = True + freq = None + return freq, freq_infer + + +def validate_tz_from_dtype(dtype, tz): + """ + If the given dtype is a DatetimeTZDtype, extract the implied + tzinfo object from it and check that it does not conflict with the given + tz. + + Parameters + ---------- + dtype : dtype, str + tz : None, tzinfo + + Returns + ------- + tz : consensus tzinfo + + Raises + ------ + ValueError : on tzinfo mismatch + """ + if dtype is not None: + try: + dtype = DatetimeTZDtype.construct_from_string(dtype) + dtz = getattr(dtype, 'tz', None) + if dtz is not None: + if tz is not None and not timezones.tz_compare(tz, dtz): + raise ValueError("cannot supply both a tz and a dtype" + " with a tz") + tz = dtz + except TypeError: + pass + return tz diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index c5e85cb5892f4..78e6d1f222160 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -29,7 +29,7 @@ import pandas.core.common as com from pandas.core.algorithms import checked_add_with_arr -from pandas.tseries.frequencies import to_offset, DateOffset +from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import Tick from pandas.core.arrays import datetimelike as dtl @@ -84,10 +84,11 @@ def f(self): return property(f) -def _dt_array_cmp(opname, cls): +def _dt_array_cmp(cls, op): """ Wrap comparison operations to convert datetime-like to datetime64 """ + opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False def wrapper(self, other): @@ -181,12 +182,10 @@ def __new__(cls, values, freq=None, tz=None): # e.g. DatetimeIndex tz = values.tz - if (freq is not None and not isinstance(freq, DateOffset) and - freq != 'infer'): - freq = to_offset(freq) + freq, freq_infer = dtl.maybe_infer_freq(freq) result = cls._simple_new(values, freq=freq, tz=tz) - if freq == 'infer': + if freq_infer: inferred = result.inferred_freq if inferred: result.freq = to_offset(inferred) @@ -289,17 +288,7 @@ def __iter__(self): # ----------------------------------------------------------------- # Comparison Methods - @classmethod - def _add_comparison_methods(cls): - """add in comparison methods""" - cls.__eq__ = _dt_array_cmp('__eq__', cls) - cls.__ne__ = _dt_array_cmp('__ne__', cls) - cls.__lt__ = _dt_array_cmp('__lt__', cls) - cls.__gt__ = _dt_array_cmp('__gt__', cls) - cls.__le__ = _dt_array_cmp('__le__', cls) - cls.__ge__ = _dt_array_cmp('__ge__', cls) - # TODO: Some classes pass __eq__ while others pass operator.eq; - # standardize this. + _create_comparison_method = classmethod(_dt_array_cmp) def _has_same_tz(self, other): zzone = self._timezone @@ -441,14 +430,7 @@ def _local_timestamps(self): This is used to calculate time-of-day information as if the timestamps were timezone-naive. """ - values = self.asi8 - indexer = values.argsort() - result = conversion.tz_convert(values.take(indexer), utc, self.tz) - - n = len(indexer) - reverse = np.empty(n, dtype=np.int_) - reverse.put(indexer, np.arange(n)) - return result.take(reverse) + return conversion.tz_convert(self.asi8, utc, self.tz) def tz_convert(self, tz): """ @@ -1102,4 +1084,4 @@ def to_julian_date(self): ) / 24.0) -DatetimeArrayMixin._add_comparison_methods() +DatetimeArrayMixin._add_comparison_ops() diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 66b1fb8db25c0..cb5afa34add2a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -40,10 +40,11 @@ def f(self): return property(f) -def _period_array_cmp(opname, cls): +def _period_array_cmp(cls, op): """ Wrap comparison operations to convert Period-like to PeriodDtype """ + opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False def wrapper(self, other): @@ -268,6 +269,8 @@ def asfreq(self, freq=None, how='E'): # ------------------------------------------------------------------ # Arithmetic Methods + _create_comparison_method = classmethod(_period_array_cmp) + def _sub_datelike(self, other): assert other is not NaT return NotImplemented @@ -381,18 +384,8 @@ def _maybe_convert_timedelta(self, other): raise IncompatibleFrequency(msg.format(cls=type(self).__name__, freqstr=self.freqstr)) - @classmethod - def _add_comparison_methods(cls): - """ add in comparison methods """ - cls.__eq__ = _period_array_cmp('__eq__', cls) - cls.__ne__ = _period_array_cmp('__ne__', cls) - cls.__lt__ = _period_array_cmp('__lt__', cls) - cls.__gt__ = _period_array_cmp('__gt__', cls) - cls.__le__ = _period_array_cmp('__le__', cls) - cls.__ge__ = _period_array_cmp('__ge__', cls) - -PeriodArrayMixin._add_comparison_methods() +PeriodArrayMixin._add_comparison_ops() # ------------------------------------------------------------------- diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index a28f7fc9c32fa..efa7c0b0e44d4 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -17,7 +17,7 @@ import pandas.core.common as com -from pandas.tseries.offsets import Tick, DateOffset +from pandas.tseries.offsets import Tick from pandas.tseries.frequencies import to_offset from . import datetimelike as dtl @@ -54,10 +54,11 @@ def f(self): return property(f) -def _td_array_cmp(opname, cls): +def _td_array_cmp(cls, op): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ + opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False def wrapper(self, other): @@ -126,25 +127,23 @@ def _simple_new(cls, values, freq=None, **kwargs): def __new__(cls, values, freq=None, start=None, end=None, periods=None, closed=None): - if (freq is not None and not isinstance(freq, DateOffset) and - freq != 'infer'): - freq = to_offset(freq) - periods = dtl.validate_periods(periods) + freq, freq_infer = dtl.maybe_infer_freq(freq) if values is None: + # TODO: Remove this block and associated kwargs; GH#20535 if freq is None and com._any_none(periods, start, end): raise ValueError('Must provide freq argument if no data is ' 'supplied') - else: - return cls._generate_range(start, end, periods, freq, - closed=closed) + periods = dtl.validate_periods(periods) + return cls._generate_range(start, end, periods, freq, + closed=closed) result = cls._simple_new(values, freq=freq) - if freq == 'infer': + if freq_infer: inferred = result.inferred_freq if inferred: - result._freq = to_offset(inferred) + result.freq = to_offset(inferred) return result @@ -161,23 +160,12 @@ def _generate_range(cls, start, end, periods, freq, closed=None, **kwargs): if end is not None: end = Timedelta(end) - left_closed = False - right_closed = False - if start is None and end is None: if closed is not None: raise ValueError("Closed has to be None if not both of start" "and end are defined") - if closed is None: - left_closed = True - right_closed = True - elif closed == "left": - left_closed = True - elif closed == "right": - right_closed = True - else: - raise ValueError("Closed has to be either 'left', 'right' or None") + left_closed, right_closed = dtl.validate_endpoints(closed) if freq is not None: index = _generate_regular_range(start, end, periods, freq) @@ -197,6 +185,8 @@ def _generate_range(cls, start, end, periods, freq, closed=None, **kwargs): # ---------------------------------------------------------------- # Arithmetic Methods + _create_comparison_method = classmethod(_td_array_cmp) + def _add_offset(self, other): assert not isinstance(other, Tick) raise TypeError("cannot add the type {typ} to a {cls}" @@ -266,19 +256,6 @@ def _evaluate_with_timedelta_like(self, other, op): return NotImplemented - # ---------------------------------------------------------------- - # Comparison Methods - - @classmethod - def _add_comparison_methods(cls): - """add in comparison methods""" - cls.__eq__ = _td_array_cmp('__eq__', cls) - cls.__ne__ = _td_array_cmp('__ne__', cls) - cls.__lt__ = _td_array_cmp('__lt__', cls) - cls.__gt__ = _td_array_cmp('__gt__', cls) - cls.__le__ = _td_array_cmp('__le__', cls) - cls.__ge__ = _td_array_cmp('__ge__', cls) - # ---------------------------------------------------------------- # Conversion Methods - Vectorized analogues of Timedelta methods @@ -392,7 +369,7 @@ def f(x): return result -TimedeltaArrayMixin._add_comparison_methods() +TimedeltaArrayMixin._add_comparison_ops() # --------------------------------------------------------------------- diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 3ae5eb3a8dbf5..8f05a9a887830 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -789,9 +789,8 @@ def shift(self, n, freq=None): start = self[0] + n * self.freq end = self[-1] + n * self.freq attribs = self._get_attributes_dict() - attribs['start'] = start - attribs['end'] = end - return type(self)(**attribs) + return self._generate_range(start=start, end=end, periods=None, + **attribs) def repeat(self, repeats, *args, **kwargs): """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 7257be421c3e1..6ed752d3a213d 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -27,7 +27,6 @@ pandas_dtype, ensure_int64) from pandas.core.dtypes.generic import ABCSeries -from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna import pandas.core.dtypes.concat as _concat @@ -41,7 +40,7 @@ from pandas.core.indexes.datetimelike import ( DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin) from pandas.tseries.offsets import ( - DateOffset, generate_range, Tick, CDay, prefix_mapping) + generate_range, Tick, CDay, prefix_mapping) from pandas.core.tools.timedeltas import to_timedelta from pandas.util._decorators import ( @@ -84,10 +83,12 @@ def func(self, *args, **kwargs): return func -def _dt_index_cmp(opname, cls): +def _dt_index_cmp(cls, op): """ Wrap comparison operations to convert datetime-like to datetime64 """ + opname = '__{name}__'.format(name=op.__name__) + def wrapper(self, other): result = getattr(DatetimeArrayMixin, opname)(self, other) if is_bool_dtype(result): @@ -238,12 +239,12 @@ def _join_i8_wrapper(joinf, **kwargs): @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ - cls.__eq__ = _dt_index_cmp('__eq__', cls) - cls.__ne__ = _dt_index_cmp('__ne__', cls) - cls.__lt__ = _dt_index_cmp('__lt__', cls) - cls.__gt__ = _dt_index_cmp('__gt__', cls) - cls.__le__ = _dt_index_cmp('__le__', cls) - cls.__ge__ = _dt_index_cmp('__ge__', cls) + cls.__eq__ = _dt_index_cmp(cls, operator.eq) + cls.__ne__ = _dt_index_cmp(cls, operator.ne) + cls.__lt__ = _dt_index_cmp(cls, operator.lt) + cls.__gt__ = _dt_index_cmp(cls, operator.gt) + cls.__le__ = _dt_index_cmp(cls, operator.le) + cls.__ge__ = _dt_index_cmp(cls, operator.ge) _engine_type = libindex.DatetimeEngine @@ -289,39 +290,20 @@ def __new__(cls, data=None, if name is None and hasattr(data, 'name'): name = data.name - freq_infer = False - if not isinstance(freq, DateOffset): - - # if a passed freq is None, don't infer automatically - if freq != 'infer': - freq = to_offset(freq) - else: - freq_infer = True - freq = None - - periods = dtl.validate_periods(periods) + freq, freq_infer = dtl.maybe_infer_freq(freq) # if dtype has an embedded tz, capture it - if dtype is not None: - try: - dtype = DatetimeTZDtype.construct_from_string(dtype) - dtz = getattr(dtype, 'tz', None) - if dtz is not None: - if tz is not None and str(tz) != str(dtz): - raise ValueError("cannot supply both a tz and a dtype" - " with a tz") - tz = dtz - except TypeError: - pass + tz = dtl.validate_tz_from_dtype(dtype, tz) if data is None: + # TODO: Remove this block and associated kwargs; GH#20535 if freq is None and com._any_none(periods, start, end): - msg = 'Must provide freq argument if no data is supplied' - raise ValueError(msg) - else: - return cls._generate_range(start, end, periods, name, freq, - tz=tz, normalize=normalize, - closed=closed, ambiguous=ambiguous) + raise ValueError('Must provide freq argument if no data is ' + 'supplied') + periods = dtl.validate_periods(periods) + return cls._generate_range(start, end, periods, name, freq, + tz=tz, normalize=normalize, + closed=closed, ambiguous=ambiguous) if not isinstance(data, (np.ndarray, Index, ABCSeries)): if is_scalar(data): @@ -407,23 +389,12 @@ def _generate_range(cls, start, end, periods, name, freq, tz=None, if end is not None: end = Timestamp(end) - left_closed = False - right_closed = False - if start is None and end is None: if closed is not None: raise ValueError("Closed has to be None if not both of start" "and end are defined") - if closed is None: - left_closed = True - right_closed = True - elif closed == "left": - left_closed = True - elif closed == "right": - right_closed = True - else: - raise ValueError("Closed has to be either 'left', 'right' or None") + left_closed, right_closed = dtl.validate_endpoints(closed) try: inferred_tz = timezones.infer_tzinfo(start, end) @@ -540,12 +511,6 @@ def _convert_for_op(self, value): return _to_m8(value) raise ValueError('Passed item and index have different timezone') - def _local_timestamps(self): - if self.is_monotonic: - return conversion.tz_convert(self.asi8, utc, self.tz) - else: - return DatetimeArrayMixin._local_timestamps(self) - @classmethod def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None, **kwargs): @@ -1744,7 +1709,6 @@ def _generate_regular_range(cls, start, end, periods, freq): "if a 'period' is given.") data = np.arange(b, e, stride, dtype=np.int64) - # TODO: Do we need to use _simple_new here? just return data.view? data = cls._simple_new(data.view(_NS_DTYPE), None, tz=tz) else: if isinstance(start, Timestamp): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4d8e57820f29d..350c609acbb4f 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -804,7 +804,7 @@ def __setstate__(self, state): _unpickle_compat = __setstate__ -PeriodIndex._add_comparison_methods() +PeriodIndex._add_comparison_ops() PeriodIndex._add_numeric_methods_disabled() PeriodIndex._add_logical_methods_disabled() PeriodIndex._add_datetimelike_methods() diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index dc26c9cc0c248..af34ec8b22824 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,4 +1,5 @@ """ implement the TimedeltaIndex """ +import operator import numpy as np from pandas.core.dtypes.common import ( @@ -34,7 +35,6 @@ TimelikeOps, DatetimeIndexOpsMixin) from pandas.core.tools.timedeltas import ( to_timedelta, _coerce_scalar_to_timedelta_type) -from pandas.tseries.offsets import DateOffset from pandas._libs import (lib, index as libindex, join as libjoin, Timedelta, NaT, iNaT) @@ -51,10 +51,12 @@ def f(self): return property(f) -def _td_index_cmp(opname, cls): +def _td_index_cmp(cls, op): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ + opname = '__{name}__'.format(name=op.__name__) + def wrapper(self, other): result = getattr(TimedeltaArrayMixin, opname)(self, other) if is_bool_dtype(result): @@ -155,12 +157,12 @@ def _join_i8_wrapper(joinf, **kwargs): @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ - cls.__eq__ = _td_index_cmp('__eq__', cls) - cls.__ne__ = _td_index_cmp('__ne__', cls) - cls.__lt__ = _td_index_cmp('__lt__', cls) - cls.__gt__ = _td_index_cmp('__gt__', cls) - cls.__le__ = _td_index_cmp('__le__', cls) - cls.__ge__ = _td_index_cmp('__ge__', cls) + cls.__eq__ = _td_index_cmp(cls, operator.eq) + cls.__ne__ = _td_index_cmp(cls, operator.ne) + cls.__lt__ = _td_index_cmp(cls, operator.lt) + cls.__gt__ = _td_index_cmp(cls, operator.gt) + cls.__le__ = _td_index_cmp(cls, operator.le) + cls.__ge__ = _td_index_cmp(cls, operator.ge) _engine_type = libindex.TimedeltaEngine @@ -181,25 +183,16 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, else: return data._shallow_copy() - freq_infer = False - if not isinstance(freq, DateOffset): - - # if a passed freq is None, don't infer automatically - if freq != 'infer': - freq = to_offset(freq) - else: - freq_infer = True - freq = None - - periods = dtl.validate_periods(periods) + freq, freq_infer = dtl.maybe_infer_freq(freq) if data is None: + # TODO: Remove this block and associated kwargs; GH#20535 if freq is None and com._any_none(periods, start, end): - msg = 'Must provide freq argument if no data is supplied' - raise ValueError(msg) - else: - return cls._generate_range(start, end, periods, name, freq, - closed=closed) + raise ValueError('Must provide freq argument if no data is ' + 'supplied') + periods = dtl.validate_periods(periods) + return cls._generate_range(start, end, periods, name, freq, + closed=closed) if unit is not None: data = to_timedelta(data, unit=unit, box=False) @@ -226,7 +219,6 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, inferred = subarr.inferred_freq if inferred: subarr.freq = to_offset(inferred) - return subarr return subarr diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 387a70fe37253..59cd4743f857b 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -250,7 +250,8 @@ def infer_freq(index, warn=True): if is_period_arraylike(index): raise TypeError("PeriodIndex given. Check the `freq` attribute " "instead of using infer_freq.") - elif isinstance(index, pd.TimedeltaIndex): + elif is_timedelta64_dtype(index): + # Allow TimedeltaIndex and TimedeltaArray inferer = _TimedeltaFrequencyInferer(index, warn=warn) return inferer.get_freq()
There's a lot of duplication in the constructors and constructor-helpers. This starts to whittle that down, writes some docstrings long the way. Also use `ExtensionOpsMixin` to define comparison operators on the EAMixin classes. We determined that the DatetimeIndex._local_timestamps method had an unecessary monotonicy check, so took that out.
https://api.github.com/repos/pandas-dev/pandas/pulls/21926
2018-07-16T00:26:50Z
2018-07-20T20:43:38Z
2018-07-20T20:43:38Z
2018-07-20T21:03:21Z
move rename functionality out of internals
diff --git a/.coveragerc b/.coveragerc index f5c8b701a79a8..13baa100b84b7 100644 --- a/.coveragerc +++ b/.coveragerc @@ -17,6 +17,7 @@ exclude_lines = # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError + AbstractMethodError # Don't complain if non-runnable code isn't run: if 0: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 85bd6065314f4..c5ca6eafdb427 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -289,10 +289,7 @@ def set_axis(a, i): for i, a in cls._AXIS_NAMES.items(): set_axis(a, i) - # addtl parms - if isinstance(ns, dict): - for k, v in ns.items(): - setattr(cls, k, v) + assert not isinstance(ns, dict) def _construct_axes_dict(self, axes=None, **kwargs): """Return an axes dictionary for myself.""" @@ -3406,8 +3403,10 @@ def add_prefix(self, prefix): 2 3 5 3 4 6 """ - new_data = self._data.add_prefix(prefix) - return self._constructor(new_data).__finalize__(self) + f = functools.partial('{prefix}{}'.format, prefix=prefix) + + mapper = {self._info_axis_name: f} + return self.rename(**mapper) def add_suffix(self, suffix): """ @@ -3463,8 +3462,10 @@ def add_suffix(self, suffix): 2 3 5 3 4 6 """ - new_data = self._data.add_suffix(suffix) - return self._constructor(new_data).__finalize__(self) + f = functools.partial('{}{suffix}'.format, suffix=suffix) + + mapper = {self._info_axis_name: f} + return self.rename(**mapper) _shared_docs['sort_values'] = """ Sort by the values along either axis @@ -3980,6 +3981,7 @@ def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False, return self._constructor(new_data).__finalize__(self) + # TODO: unused; remove? def _reindex_axis(self, new_index, fill_method, axis, copy): new_data = self._data.reindex_axis(new_index, axis=axis, method=fill_method, copy=copy) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e64ba44bb8a92..63738594799f5 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -176,20 +176,11 @@ def rename_axis(self, mapper, axis, copy=True, level=None): axis : int copy : boolean, default True level : int, default None - """ obj = self.copy(deep=copy) obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) return obj - def add_prefix(self, prefix): - f = partial('{prefix}{}'.format, prefix=prefix) - return self.rename_axis(f, axis=0) - - def add_suffix(self, suffix): - f = partial('{}{suffix}'.format, suffix=suffix) - return self.rename_axis(f, axis=0) - @property def _is_single_block(self): if self.ndim == 1: @@ -222,12 +213,10 @@ def _rebuild_blknos_and_blklocs(self): self._blknos = new_blknos self._blklocs = new_blklocs - # make items read only for now - def _get_items(self): + @property + def items(self): return self.axes[0] - items = property(fget=_get_items) - def _get_counts(self, f): """ return a dict of the counts of the function in BlockManager """ self._consolidate_inplace()
- [ ] <s>closes #16045</s><b>update</b>Not anymore - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` @jorisvandenbossche can you confirm this is what you had in mind in that issue?
https://api.github.com/repos/pandas-dev/pandas/pulls/21924
2018-07-15T20:24:21Z
2018-09-08T02:46:54Z
2018-09-08T02:46:54Z
2018-09-08T03:10:23Z
[BUG] change types to Py_ssize_t to fix #21905
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index d0090852fa5af..fae855f5495f0 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -527,7 +527,7 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): cdef: ndarray[int64_t] trans, deltas int64_t delta, local_val - Py_ssize_t posn + Py_ssize_t pos assert obj.tzinfo is None @@ -782,7 +782,6 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): cdef: ndarray[int64_t] utc_dates, tt, result, trans, deltas Py_ssize_t i, j, pos, n = len(vals) - ndarray[Py_ssize_t] posn int64_t v, offset, delta npy_datetimestruct dts @@ -1124,7 +1123,8 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): cdef: Py_ssize_t n = len(stamps) ndarray[int64_t] result = np.empty(n, dtype=np.int64) - ndarray[int64_t] trans, deltas, pos + ndarray[int64_t] trans, deltas + Py_ssize_t[:] pos npy_datetimestruct dts if is_utc(tz): diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index ebd8402c6fdf7..b8965288a878b 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -934,7 +934,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, cdef: Py_ssize_t n = len(stamps) ndarray[int64_t] result = np.empty(n, dtype=np.int64) - ndarray[int64_t] trans, deltas, pos + ndarray[int64_t] trans, deltas + Py_ssize_t[:] pos npy_datetimestruct dts int64_t local_val diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index e8eb27fd4544b..688b12005921d 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -74,7 +74,8 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): cdef: Py_ssize_t n = len(stamps) int reso = RESO_DAY, curr_reso - ndarray[int64_t] trans, deltas, pos + ndarray[int64_t] trans, deltas + Py_ssize_t[:] pos npy_datetimestruct dts int64_t local_val
May close #21905, will need to check with OP.
https://api.github.com/repos/pandas-dev/pandas/pulls/21923
2018-07-15T20:16:15Z
2018-07-17T00:37:13Z
2018-07-17T00:37:13Z
2020-04-05T17:42:33Z
BUG:Clip with a list-like threshold with a nan is broken (GH19992)
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index ac1ef78fd6fd2..f1cedf139f7d6 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -62,3 +62,7 @@ Bug Fixes - - + +**Missing** + +- Bug in :func:`Series.clip` and :func:`DataFrame.clip` cannot accept list-like threshold containing ``NaN`` (:issue:`19992`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b682f5e65f876..610bcf5d1d6c4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6520,9 +6520,11 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, # GH 17276 # numpy doesn't like NaN as a clip value # so ignore - if np.any(pd.isnull(lower)): + # GH 19992 + # numpy doesn't drop a list-like bound containing NaN + if not is_list_like(lower) and np.any(pd.isnull(lower)): lower = None - if np.any(pd.isnull(upper)): + if not is_list_like(upper) and np.any(pd.isnull(upper)): upper = None # GH 2747 (arguments were reversed) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index a399fa2b68680..b48395efaf5c8 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1859,13 +1859,23 @@ def test_clip_with_na_args(self): """Should process np.nan argument as None """ # GH # 17276 tm.assert_frame_equal(self.frame.clip(np.nan), self.frame) - tm.assert_frame_equal(self.frame.clip(upper=[1, 2, np.nan]), - self.frame) - tm.assert_frame_equal(self.frame.clip(lower=[1, np.nan, 3]), - self.frame) tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan), self.frame) + # GH #19992 + df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6], + 'col_2': [7, 8, 9]}) + + result = df.clip(lower=[4, 5, np.nan], axis=0) + expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan], + 'col_2': [7, 8, np.nan]}) + tm.assert_frame_equal(result, expected) + + result = df.clip(lower=[4, 5, np.nan], axis=1) + expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6], + 'col_2': [np.nan, np.nan, np.nan]}) + tm.assert_frame_equal(result, expected) + # Matrix-like def test_dot(self): a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 28a77bbb1d3fa..8c0f4b11149fe 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -942,11 +942,15 @@ def test_clip_with_na_args(self): s = Series([1, 2, 3]) assert_series_equal(s.clip(np.nan), Series([1, 2, 3])) - assert_series_equal(s.clip(upper=[1, 1, np.nan]), Series([1, 2, 3])) - assert_series_equal(s.clip(lower=[1, np.nan, 1]), Series([1, 2, 3])) assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3])) + # GH #19992 + assert_series_equal(s.clip(lower=[0, 4, np.nan]), + Series([1, 4, np.nan])) + assert_series_equal(s.clip(upper=[1, np.nan, 1]), + Series([1, np.nan, 1])) + def test_clip_against_series(self): # GH #6966
- fix bug #19992 - 2 tests amended in frame/test_analytics.py and series/test_analytics.py - whatsnew entry added
https://api.github.com/repos/pandas-dev/pandas/pulls/21921
2018-07-15T15:18:30Z
2018-07-18T10:23:30Z
2018-07-18T10:23:30Z
2018-07-18T10:23:47Z
[BUG][BLD] revert DEF component of #21878
diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index f45b4320b4d3d..98eca92fd1ab2 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -30,6 +30,23 @@ PANDAS_INLINE PyObject* get_value_1d(PyArrayObject* ap, Py_ssize_t i) { return PyArray_Scalar(item, PyArray_DESCR(ap), (PyObject*)ap); } +// returns ASCII or UTF8 (py3) view on python str +// python object owns memory, should not be freed +PANDAS_INLINE const char* get_c_string(PyObject* obj) { +#if PY_VERSION_HEX >= 0x03000000 + return PyUnicode_AsUTF8(obj); +#else + return PyString_AsString(obj); +#endif +} + +PANDAS_INLINE PyObject* char_to_string(const char* data) { +#if PY_VERSION_HEX >= 0x03000000 + return PyUnicode_FromString(data); +#else + return PyString_FromString(data); +#endif +} void set_array_not_contiguous(PyArrayObject* ao) { ao->flags &= ~(NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS); diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd index 728eb63dc836c..7ce2181f32553 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/src/util.pxd @@ -4,9 +4,7 @@ cnp.import_array() cimport cpython from cpython cimport PyTypeObject -from cpython.string cimport PyString_FromString, PyString_AsString -DEF PY3 = bytes != str cdef extern from "Python.h": # Note: importing extern-style allows us to declare these as nogil @@ -17,8 +15,6 @@ cdef extern from "Python.h": bint PyFloat_Check(object obj) nogil bint PyComplex_Check(object obj) nogil bint PyObject_TypeCheck(object obj, PyTypeObject* type) nogil - char* PyUnicode_AsUTF8(object unicode) - object PyUnicode_FromString(const char* u) nogil cdef extern from "numpy/arrayobject.h": @@ -74,6 +70,8 @@ cdef extern from "numpy_helper.h": int assign_value_1d(ndarray, Py_ssize_t, object) except -1 cnp.int64_t get_nat() object get_value_1d(ndarray, Py_ssize_t) + char *get_c_string(object) except NULL + object char_to_string(char*) ctypedef fused numeric: cnp.int8_t @@ -104,26 +102,6 @@ cdef extern from "headers/stdint.h": enum: INT64_MIN -cdef inline const char* get_c_string(object obj) except NULL: - """ - returns ASCII or UTF8 (py3) view on python str - python object owns memory, should not be freed - """ - # TODO: this docstring is copied verbatim from version that was - # directly in numpy_helper.C; is it still accurate? - IF PY3: - return PyUnicode_AsUTF8(obj) - ELSE: - return PyString_AsString(obj) - - -cdef inline object char_to_string(const char* data): - IF PY3: - return PyUnicode_FromString(data) - ELSE: - return PyString_FromString(data) - - cdef inline object get_value_at(ndarray arr, object loc): cdef: Py_ssize_t i, sz diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 576b3ecc1f8e2..b8f97dcf2d599 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -53,7 +53,7 @@ from tslibs.timestamps cimport (create_timestamp_from_ts, from tslibs.timestamps import Timestamp -DEF PY2 = str == bytes +cdef bint PY2 = str == bytes cdef inline object create_datetime_from_ts( @@ -555,9 +555,8 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', if len(val) == 0 or val in nat_strings: iresult[i] = NPY_NAT continue - if PY2: - if PyUnicode_Check(val): - val = val.encode('utf-8') + if PyUnicode_Check(val) and PY2: + val = val.encode('utf-8') try: _string_to_dts(val, &dts, &out_local, &out_tzoffset) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index ebd8402c6fdf7..266d312aca0ae 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -52,7 +52,7 @@ from nattype cimport _nat_scalar_rules, NPY_NAT, is_null_datetimelike from offsets cimport to_offset from offsets import _Tick -DEF PY2 = str == bytes +cdef bint PY2 = str == bytes cdef extern from "period_helper.h": @@ -728,7 +728,7 @@ cdef object _period_strftime(int64_t value, int freq, object fmt): result = result.replace(str_extra_fmts[i], repl) - IF PY2: + if PY2: result = result.decode('utf-8', 'ignore') return result
#21878 introduced a subtle build problem that is not caught by the CI. Running `python setup.py build_ext --inplace` followed by `python3 setup.py build_ext --inplace` causes compile-time errors in py3 (or if running these in the opposite order, errors in py2). These are fixed by running `python setup.py clean` in between. This reverts the relevant changes. For anyone curious: cython supports syntax: ``` DEF foo = [...] IF foo: [...] ELSE: [...] ``` and these IF/ELSE conditions get evaluated at compile-time. #21878 incorrectly assumed that "compile-time" in this context meant ".c -> .so" time, not ".pyx -> .c" time. In this we used: ``` DEF PY2 = str == bytes IF PY2: [...] ELSE: [...] ``` so after running `setup.py build_ext --inplace` in py2, the ".c" file we end up with has already gotten rid of the PY3 branches. When we run `python3 setup.py build_ext --inplace` cython uses the existing .c file, tries to compile it to .so, and breaks.
https://api.github.com/repos/pandas-dev/pandas/pulls/21919
2018-07-15T03:41:31Z
2018-07-15T17:59:25Z
2018-07-15T17:59:25Z
2018-07-15T17:59:47Z
DOC: Updated the DataFrame.assign docstring
diff --git a/ci/doctests.sh b/ci/doctests.sh index e7fe80e60eb6d..48774a1e4d00d 100755 --- a/ci/doctests.sh +++ b/ci/doctests.sh @@ -21,7 +21,7 @@ if [ "$DOCTEST" ]; then # DataFrame / Series docstrings pytest --doctest-modules -v pandas/core/frame.py \ - -k"-assign -axes -combine -isin -itertuples -join -nlargest -nsmallest -nunique -pivot_table -quantile -query -reindex -reindex_axis -replace -round -set_index -stack -to_dict -to_stata" + -k"-axes -combine -isin -itertuples -join -nlargest -nsmallest -nunique -pivot_table -quantile -query -reindex -reindex_axis -replace -round -set_index -stack -to_dict -to_stata" if [ $? -ne "0" ]; then RET=1 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 959b0a4fd1890..a28a8939d9a2d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3273,7 +3273,7 @@ def assign(self, **kwargs): Parameters ---------- - kwargs : keyword, value pairs + **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not @@ -3283,7 +3283,7 @@ def assign(self, **kwargs): Returns ------- - df : DataFrame + DataFrame A new DataFrame with the new columns in addition to all the existing columns. @@ -3303,48 +3303,34 @@ def assign(self, **kwargs): Examples -------- - >>> df = pd.DataFrame({'A': range(1, 11), 'B': np.random.randn(10)}) + >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, + ... index=['Portland', 'Berkeley']) + >>> df + temp_c + Portland 17.0 + Berkeley 25.0 Where the value is a callable, evaluated on `df`: - - >>> df.assign(ln_A = lambda x: np.log(x.A)) - A B ln_A - 0 1 0.426905 0.000000 - 1 2 -0.780949 0.693147 - 2 3 -0.418711 1.098612 - 3 4 -0.269708 1.386294 - 4 5 -0.274002 1.609438 - 5 6 -0.500792 1.791759 - 6 7 1.649697 1.945910 - 7 8 -1.495604 2.079442 - 8 9 0.549296 2.197225 - 9 10 -0.758542 2.302585 - - Where the value already exists and is inserted: - - >>> newcol = np.log(df['A']) - >>> df.assign(ln_A=newcol) - A B ln_A - 0 1 0.426905 0.000000 - 1 2 -0.780949 0.693147 - 2 3 -0.418711 1.098612 - 3 4 -0.269708 1.386294 - 4 5 -0.274002 1.609438 - 5 6 -0.500792 1.791759 - 6 7 1.649697 1.945910 - 7 8 -1.495604 2.079442 - 8 9 0.549296 2.197225 - 9 10 -0.758542 2.302585 - - Where the keyword arguments depend on each other - - >>> df = pd.DataFrame({'A': [1, 2, 3]}) - - >>> df.assign(B=df.A, C=lambda x:x['A']+ x['B']) - A B C - 0 1 1 2 - 1 2 2 4 - 2 3 3 6 + >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) + temp_c temp_f + Portland 17.0 62.6 + Berkeley 25.0 77.0 + + Alternatively, the same behavior can be achieved by directly + referencing an existing Series or sequence: + >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) + temp_c temp_f + Portland 17.0 62.6 + Berkeley 25.0 77.0 + + In Python 3.6+, you can create multiple columns within the same assign + where one of the columns depends on another one defined within the same + assign: + >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, + ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) + temp_c temp_f temp_k + Portland 17.0 62.6 290.15 + Berkeley 25.0 77.0 298.15 """ data = self.copy()
Updated the DataFrame.assign docstring example to use np.arange instead of np.random.randn to pass the validation test.
https://api.github.com/repos/pandas-dev/pandas/pulls/21917
2018-07-14T23:11:49Z
2018-09-22T23:36:23Z
2018-09-22T23:36:23Z
2018-09-27T12:53:25Z
Use the Agg backend for docs builds
diff --git a/doc/make.py b/doc/make.py index 4d54a2415a194..d85747458148d 100755 --- a/doc/make.py +++ b/doc/make.py @@ -363,6 +363,10 @@ def main(): sys.path.append(args.python_path) globals()['pandas'] = importlib.import_module('pandas') + # Set the matplotlib backend to the non-interactive Agg backend for all + # child processes. + os.environ['MPLBACKEND'] = 'module://matplotlib.backends.backend_agg' + builder = DocBuilder(args.num_jobs, not args.no_api, args.single, args.verbosity) getattr(builder, args.command)()
This uses a non-interactive Agg matplotlib backend to build docs, which avoids trying to use the default MacOS backend, which can fail in some environments. Closes #21913.
https://api.github.com/repos/pandas-dev/pandas/pulls/21914
2018-07-14T16:56:55Z
2018-07-17T12:18:53Z
2018-07-17T12:18:53Z
2018-07-17T12:19:03Z
Add statsmodels to optional dependencies
diff --git a/ci/requirements-optional-conda.txt b/ci/requirements-optional-conda.txt index 9e4e8e99b5205..18aac30f04aea 100644 --- a/ci/requirements-optional-conda.txt +++ b/ci/requirements-optional-conda.txt @@ -22,6 +22,7 @@ s3fs scipy seaborn sqlalchemy +statsmodels xarray xlrd xlsxwriter diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt index 3cce3f5339883..28dafc43b09c0 100644 --- a/ci/requirements-optional-pip.txt +++ b/ci/requirements-optional-pip.txt @@ -24,6 +24,7 @@ s3fs scipy seaborn sqlalchemy +statsmodels xarray xlrd xlsxwriter
Some of the documentation uses methods from statsmodels, which isn't included in the optional dependency list. Fixes #21911. - [x] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21912
2018-07-14T16:27:07Z
2018-07-15T02:14:57Z
2018-07-15T02:14:57Z
2018-07-15T02:14:57Z
Change ._data to ._parent for accessors
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index eebdfe8a54a9d..003ba7608dea4 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2401,7 +2401,7 @@ class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): def __init__(self, data): self._validate(data) - self.categorical = data.values + self._parent = data.values self.index = data.index self.name = data.name self._freeze() @@ -2413,19 +2413,19 @@ def _validate(data): "'category' dtype") def _delegate_property_get(self, name): - return getattr(self.categorical, name) + return getattr(self._parent, name) def _delegate_property_set(self, name, new_values): - return setattr(self.categorical, name, new_values) + return setattr(self._parent, name, new_values) @property def codes(self): from pandas import Series - return Series(self.categorical.codes, index=self.index) + return Series(self._parent.codes, index=self.index) def _delegate_method(self, name, *args, **kwargs): from pandas import Series - method = getattr(self.categorical, name) + method = getattr(self._parent, name) res = method(*args, **kwargs) if res is not None: return Series(res, index=self.index, name=self.name) diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index d7b4ea63cd48c..6ab8c4659c31e 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -27,14 +27,14 @@ def __init__(self, data, orig): raise TypeError("cannot convert an object of type {0} to a " "datetimelike index".format(type(data))) - self.values = data + self._parent = data self.orig = orig self.name = getattr(data, 'name', None) self.index = getattr(data, 'index', None) self._freeze() def _get_values(self): - data = self.values + data = self._parent if is_datetime64_dtype(data.dtype): return DatetimeIndex(data, copy=False, name=self.name) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 6deec52811aff..b9dfc3b8fc69f 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -927,7 +927,7 @@ def str_extract(arr, pat, flags=0, expand=True): if expand: return _str_extract_frame(arr._orig, pat, flags=flags) else: - result, name = _str_extract_noexpand(arr._data, pat, flags=flags) + result, name = _str_extract_noexpand(arr._parent, pat, flags=flags) return arr._wrap_result(result, name=name, expand=expand) @@ -1721,7 +1721,7 @@ def str_encode(arr, encoding, errors="strict"): def _noarg_wrapper(f, docstring=None, **kargs): def wrapper(self): - result = _na_map(f, self._data, **kargs) + result = _na_map(f, self._parent, **kargs) return self._wrap_result(result) wrapper.__name__ = f.__name__ @@ -1735,15 +1735,15 @@ def wrapper(self): def _pat_wrapper(f, flags=False, na=False, **kwargs): def wrapper1(self, pat): - result = f(self._data, pat) + result = f(self._parent, pat) return self._wrap_result(result) def wrapper2(self, pat, flags=0, **kwargs): - result = f(self._data, pat, flags=flags, **kwargs) + result = f(self._parent, pat, flags=flags, **kwargs) return self._wrap_result(result) def wrapper3(self, pat, na=np.nan): - result = f(self._data, pat, na=na) + result = f(self._parent, pat, na=na) return self._wrap_result(result) wrapper = wrapper3 if na else wrapper2 if flags else wrapper1 @@ -1783,7 +1783,7 @@ def __init__(self, data): self._is_categorical = is_categorical_dtype(data) # .values.categories works for both Series/Index - self._data = data.values.categories if self._is_categorical else data + self._parent = data.values.categories if self._is_categorical else data # save orig to blow up categoricals to the right type self._orig = data self._freeze() @@ -2334,14 +2334,14 @@ def cat(self, others=None, sep=None, na_rep=None, join=None): 'side': 'beginning', 'method': 'split'}) def split(self, pat=None, n=-1, expand=False): - result = str_split(self._data, pat, n=n) + result = str_split(self._parent, pat, n=n) return self._wrap_result(result, expand=expand) @Appender(_shared_docs['str_split'] % { 'side': 'end', 'method': 'rsplit'}) def rsplit(self, pat=None, n=-1, expand=False): - result = str_rsplit(self._data, pat, n=n) + result = str_rsplit(self._parent, pat, n=n) return self._wrap_result(result, expand=expand) _shared_docs['str_partition'] = (""" @@ -2432,7 +2432,7 @@ def rsplit(self, pat=None, n=-1, expand=False): }) def partition(self, pat=' ', expand=True): f = lambda x: x.partition(pat) - result = _na_map(f, self._data) + result = _na_map(f, self._parent) return self._wrap_result(result, expand=expand) @Appender(_shared_docs['str_partition'] % { @@ -2443,45 +2443,45 @@ def partition(self, pat=' ', expand=True): }) def rpartition(self, pat=' ', expand=True): f = lambda x: x.rpartition(pat) - result = _na_map(f, self._data) + result = _na_map(f, self._parent) return self._wrap_result(result, expand=expand) @copy(str_get) def get(self, i): - result = str_get(self._data, i) + result = str_get(self._parent, i) return self._wrap_result(result) @copy(str_join) def join(self, sep): - result = str_join(self._data, sep) + result = str_join(self._parent, sep) return self._wrap_result(result) @copy(str_contains) def contains(self, pat, case=True, flags=0, na=np.nan, regex=True): - result = str_contains(self._data, pat, case=case, flags=flags, na=na, + result = str_contains(self._parent, pat, case=case, flags=flags, na=na, regex=regex) return self._wrap_result(result) @copy(str_match) def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=None): - result = str_match(self._data, pat, case=case, flags=flags, na=na, + result = str_match(self._parent, pat, case=case, flags=flags, na=na, as_indexer=as_indexer) return self._wrap_result(result) @copy(str_replace) def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True): - result = str_replace(self._data, pat, repl, n=n, case=case, + result = str_replace(self._parent, pat, repl, n=n, case=case, flags=flags, regex=regex) return self._wrap_result(result) @copy(str_repeat) def repeat(self, repeats): - result = str_repeat(self._data, repeats) + result = str_repeat(self._parent, repeats) return self._wrap_result(result) @copy(str_pad) def pad(self, width, side='left', fillchar=' '): - result = str_pad(self._data, width, side=side, fillchar=fillchar) + result = str_pad(self._parent, width, side=side, fillchar=fillchar) return self._wrap_result(result) _shared_docs['str_pad'] = (""" @@ -2574,27 +2574,27 @@ def zfill(self, width): 4 NaN dtype: object """ - result = str_pad(self._data, width, side='left', fillchar='0') + result = str_pad(self._parent, width, side='left', fillchar='0') return self._wrap_result(result) @copy(str_slice) def slice(self, start=None, stop=None, step=None): - result = str_slice(self._data, start, stop, step) + result = str_slice(self._parent, start, stop, step) return self._wrap_result(result) @copy(str_slice_replace) def slice_replace(self, start=None, stop=None, repl=None): - result = str_slice_replace(self._data, start, stop, repl) + result = str_slice_replace(self._parent, start, stop, repl) return self._wrap_result(result) @copy(str_decode) def decode(self, encoding, errors="strict"): - result = str_decode(self._data, encoding, errors) + result = str_decode(self._parent, encoding, errors) return self._wrap_result(result) @copy(str_encode) def encode(self, encoding, errors="strict"): - result = str_encode(self._data, encoding, errors) + result = str_encode(self._parent, encoding, errors) return self._wrap_result(result) _shared_docs['str_strip'] = (r""" @@ -2663,38 +2663,38 @@ def encode(self, encoding, errors="strict"): @Appender(_shared_docs['str_strip'] % dict(side='left and right sides', method='strip')) def strip(self, to_strip=None): - result = str_strip(self._data, to_strip, side='both') + result = str_strip(self._parent, to_strip, side='both') return self._wrap_result(result) @Appender(_shared_docs['str_strip'] % dict(side='left side', method='lstrip')) def lstrip(self, to_strip=None): - result = str_strip(self._data, to_strip, side='left') + result = str_strip(self._parent, to_strip, side='left') return self._wrap_result(result) @Appender(_shared_docs['str_strip'] % dict(side='right side', method='rstrip')) def rstrip(self, to_strip=None): - result = str_strip(self._data, to_strip, side='right') + result = str_strip(self._parent, to_strip, side='right') return self._wrap_result(result) @copy(str_wrap) def wrap(self, width, **kwargs): - result = str_wrap(self._data, width, **kwargs) + result = str_wrap(self._parent, width, **kwargs) return self._wrap_result(result) @copy(str_get_dummies) def get_dummies(self, sep='|'): # we need to cast to Series of strings as only that has all # methods available for making the dummies... - data = self._orig.astype(str) if self._is_categorical else self._data + data = self._orig.astype(str) if self._is_categorical else self._parent result, name = str_get_dummies(data, sep) return self._wrap_result(result, use_codes=(not self._is_categorical), name=name, expand=True) @copy(str_translate) def translate(self, table, deletechars=None): - result = str_translate(self._data, table, deletechars) + result = str_translate(self._parent, table, deletechars) return self._wrap_result(result) count = _pat_wrapper(str_count, flags=True) @@ -2737,14 +2737,15 @@ def extractall(self, pat, flags=0): dict(side='lowest', method='find', also='rfind : Return highest indexes in each strings')) def find(self, sub, start=0, end=None): - result = str_find(self._data, sub, start=start, end=end, side='left') + result = str_find(self._parent, sub, start=start, end=end, side='left') return self._wrap_result(result) @Appender(_shared_docs['find'] % dict(side='highest', method='rfind', also='find : Return lowest indexes in each strings')) def rfind(self, sub, start=0, end=None): - result = str_find(self._data, sub, start=start, end=end, side='right') + result = str_find(self._parent, sub, + start=start, end=end, side='right') return self._wrap_result(result) def normalize(self, form): @@ -2763,7 +2764,7 @@ def normalize(self, form): """ import unicodedata f = lambda x: unicodedata.normalize(form, compat.u_safe(x)) - result = _na_map(f, self._data) + result = _na_map(f, self._parent) return self._wrap_result(result) _shared_docs['index'] = (""" @@ -2794,14 +2795,16 @@ def normalize(self, form): dict(side='lowest', similar='find', method='index', also='rindex : Return highest indexes in each strings')) def index(self, sub, start=0, end=None): - result = str_index(self._data, sub, start=start, end=end, side='left') + result = str_index(self._parent, sub, + start=start, end=end, side='left') return self._wrap_result(result) @Appender(_shared_docs['index'] % dict(side='highest', similar='rfind', method='rindex', also='index : Return lowest indexes in each strings')) def rindex(self, sub, start=0, end=None): - result = str_index(self._data, sub, start=start, end=end, side='right') + result = str_index(self._parent, sub, + start=start, end=end, side='right') return self._wrap_result(result) _shared_docs['len'] = (""" diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 7ce4c23f81ad6..e81b162645b94 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2727,7 +2727,7 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, class BasePlotMethods(PandasObject): def __init__(self, data): - self._data = data + self._parent = data # can be Series or DataFrame def __call__(self, *args, **kwargs): raise NotImplementedError @@ -2755,7 +2755,7 @@ def __call__(self, kind='line', ax=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, label=None, secondary_y=False, **kwds): - return plot_series(self._data, kind=kind, ax=ax, figsize=figsize, + return plot_series(self._parent, kind=kind, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, @@ -2954,7 +2954,7 @@ def __call__(self, x=None, y=None, kind='line', ax=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds): - return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax, + return plot_frame(self._parent, kind=kind, x=x, y=y, ax=ax, subplots=subplots, sharex=sharex, sharey=sharey, layout=layout, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style,
The idea is to reduce the number of distinct meanings `._data` has. With this it is down to just `Index._data` and `NDFrame._data`, I think.
https://api.github.com/repos/pandas-dev/pandas/pulls/21906
2018-07-13T22:41:36Z
2018-08-08T10:52:34Z
2018-08-08T10:52:34Z
2018-08-08T15:50:33Z
BUG: issues with hash-function for Float64HashTable (GH21866)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 8fe3023e9537c..b723e9cc6dca8 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -471,6 +471,7 @@ Numeric - Bug in :class:`Series` ``__rmatmul__`` doesn't support matrix vector multiplication (:issue:`21530`) - Bug in :func:`factorize` fails with read-only array (:issue:`12813`) +- Fixed bug in :func:`unique` handled signed zeros inconsistently: for some inputs 0.0 and -0.0 were treated as equal and for some inputs as different. Now they are treated as equal for all inputs (:issue:`21866`) - - diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h index dd75ae5ec7e28..e9fb49e8a5e42 100644 --- a/pandas/_libs/src/klib/khash_python.h +++ b/pandas/_libs/src/klib/khash_python.h @@ -19,7 +19,20 @@ khint64_t PANDAS_INLINE asint64(double key) { memcpy(&val, &key, sizeof(double)); return val; } -#define kh_float64_hash_func(key) (khint32_t)((asint64(key))>>33^(asint64(key))^(asint64(key))<<11) + +// correct for all inputs but not -0.0 and NaNs +#define kh_float64_hash_func_0_NAN(key) (khint32_t)((asint64(key))>>33^(asint64(key))^(asint64(key))<<11) + +// correct for all inputs but not NaNs +#define kh_float64_hash_func_NAN(key) ((key) == 0.0 ? \ + kh_float64_hash_func_0_NAN(0.0) : \ + kh_float64_hash_func_0_NAN(key)) + +// correct for all +#define kh_float64_hash_func(key) ((key) != (key) ? \ + kh_float64_hash_func_NAN(Py_NAN) : \ + kh_float64_hash_func_NAN(key)) + #define kh_float64_hash_equal(a, b) ((a) == (b) || ((b) != (b) && (a) != (a))) #define KHASH_MAP_INIT_FLOAT64(name, khval_t) \ diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 25e64aa82cc36..3e754355bcb26 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -7,6 +7,7 @@ from numpy import nan from datetime import datetime from itertools import permutations +import struct from pandas import (Series, Categorical, CategoricalIndex, Timestamp, DatetimeIndex, Index, IntervalIndex) import pandas as pd @@ -500,6 +501,25 @@ def test_obj_none_preservation(self): tm.assert_numpy_array_equal(result, expected, strict_nan=True) + def test_signed_zero(self): + # GH 21866 + a = np.array([-0.0, 0.0]) + result = pd.unique(a) + expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent + tm.assert_numpy_array_equal(result, expected) + + def test_different_nans(self): + # GH 21866 + # create different nans from bit-patterns: + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent + result = pd.unique(a) + expected = np.array([np.nan]) + tm.assert_numpy_array_equal(result, expected) + class TestIsin(object): @@ -1087,6 +1107,31 @@ def test_lookup_nan(self, writable): tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.int64)) + def test_add_signed_zeros(self): + # GH 21866 inconsistent hash-function for float64 + # default hash-function would lead to different hash-buckets + # for 0.0 and -0.0 if there are more than 2^30 hash-buckets + # but this would mean 16GB + N = 4 # 12 * 10**8 would trigger the error, if you have enough memory + m = ht.Float64HashTable(N) + m.set_item(0.0, 0) + m.set_item(-0.0, 0) + assert len(m) == 1 # 0.0 and -0.0 are equivalent + + def test_add_different_nans(self): + # GH 21866 inconsistent hash-function for float64 + # create different nans from bit-patterns: + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + # default hash function would lead to different hash-buckets + # for NAN1 and NAN2 even if there are only 4 buckets: + m = ht.Float64HashTable() + m.set_item(NAN1, 0) + m.set_item(NAN2, 0) + assert len(m) == 1 # NAN1 and NAN2 are equivalent + def test_lookup_overflow(self, writable): xs = np.array([1, 2, 2**63], dtype=np.uint64) # GH 21688 ensure we can deal with readonly memory views
The following issues 1) hash(0.0) != hash(-0.0) 2) hash(x) != hash(y) for different x,y which are nans are solved by setting: 1) hash(-0.0):=hash(0.0) 2) hash(x):=hash(np.nan) for every x which is nan - [x] closes #21866 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21904
2018-07-13T22:09:40Z
2018-07-25T19:13:29Z
2018-07-25T19:13:29Z
2018-08-09T19:34:37Z
[REF] implement internals as dir
diff --git a/pandas/core/internals.py b/pandas/core/internals/__init__.py similarity index 99% rename from pandas/core/internals.py rename to pandas/core/internals/__init__.py index 5a87a8368dc88..fde3aaa14ac5d 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import warnings import copy from warnings import catch_warnings diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 7fbf7ec05e91e..39418fb72bf4a 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -13,11 +13,12 @@ from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex, Series, Categorical, TimedeltaIndex, SparseArray) from pandas.compat import OrderedDict, lrange -from pandas.core.internals import (BlockPlacement, SingleBlockManager, +from pandas.core.internals import (SingleBlockManager, make_block, BlockManager) import pandas.core.algorithms as algos import pandas.util.testing as tm import pandas as pd +from pandas._libs.internals import BlockPlacement from pandas.util.testing import (assert_almost_equal, assert_frame_equal, randn, assert_series_equal) from pandas.compat import zip, u
In the name of a) cleaning up internals and b) isolating BlockManager from everything else, this separates core.internals into `internals.managers`, `internals.blocks`, `internals.concat`.
https://api.github.com/repos/pandas-dev/pandas/pulls/21903
2018-07-13T22:06:01Z
2018-07-21T17:32:23Z
2018-07-21T17:32:23Z
2018-07-23T11:33:29Z
TST/CLN: correctly skip in indexes/common; add test for duplicated
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index bb82d5578481b..56f59851d6d04 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -35,10 +35,6 @@ def verify_pickle(self, indices): assert indices.equals(unpickled) def test_pickle_compat_construction(self): - # this is testing for pickle compat - if self._holder is None: - return - # need an object to create with pytest.raises(TypeError, self._holder) @@ -236,7 +232,7 @@ def test_set_name_methods(self, indices): # don't tests a MultiIndex here (as its tested separated) if isinstance(indices, MultiIndex): - return + pytest.skip('Skip check for MultiIndex') original_name = indices.name new_ind = indices.set_names([new_name]) assert new_ind.name == new_name @@ -333,7 +329,8 @@ def test_copy_and_deepcopy(self, indices): from copy import copy, deepcopy if isinstance(indices, MultiIndex): - return + pytest.skip('Skip check for MultiIndex') + for func in (copy, deepcopy): idx_copy = func(indices) assert idx_copy is not indices @@ -342,20 +339,50 @@ def test_copy_and_deepcopy(self, indices): new_copy = indices.copy(deep=True, name="banana") assert new_copy.name == "banana" - def test_duplicates(self, indices): + def test_has_duplicates(self, indices): if type(indices) is not self._holder: - return + pytest.skip('Can only check if we have the correct type') if not len(indices) or isinstance(indices, MultiIndex): - return + # MultiIndex tested separately in: + # tests/indexes/multi/test_unique_and_duplicates + pytest.skip('Skip check for empty Index and MultiIndex') + idx = self._holder([indices[0]] * 5) assert not idx.is_unique assert idx.has_duplicates + @pytest.mark.parametrize('keep', ['first', 'last', False]) + def test_duplicated(self, indices, keep): + if type(indices) is not self._holder: + pytest.skip('Can only check if we know the index type') + if not len(indices) or isinstance(indices, MultiIndex): + # MultiIndex tested separately in: + # tests/indexes/multi/test_unique_and_duplicates + pytest.skip('Skip check for empty Index and MultiIndex') + + idx = self._holder(indices) + if idx.has_duplicates: + # We are testing the duplicated-method here, so we need to know + # exactly which indices are duplicate and how (for the result). + # This is not possible if "idx" has duplicates already, which we + # therefore remove. This is seemingly circular, as drop_duplicates + # invokes duplicated, but in the end, it all works out because we + # cross-check with Series.duplicated, which is tested separately. + idx = idx.drop_duplicates() + + n, k = len(idx), 10 + duplicated_selection = np.random.choice(n, k * n) + expected = pd.Series(duplicated_selection).duplicated(keep=keep).values + idx = self._holder(idx.values[duplicated_selection]) + + result = idx.duplicated(keep=keep) + tm.assert_numpy_array_equal(result, expected) + def test_unique(self, indices): # don't test a MultiIndex here (as its tested separated) # don't test a CategoricalIndex because categories change (GH 18291) if isinstance(indices, (MultiIndex, CategoricalIndex)): - return + pytest.skip('Skip check for MultiIndex/CategoricalIndex') # GH 17896 expected = indices.drop_duplicates() @@ -375,7 +402,7 @@ def test_unique_na(self): def test_get_unique_index(self, indices): # MultiIndex tested separately if not len(indices) or isinstance(indices, MultiIndex): - return + pytest.skip('Skip check for empty Index and MultiIndex') idx = indices[[0] * 5] idx_unique = indices[[0]] @@ -394,7 +421,7 @@ def test_get_unique_index(self, indices): # nans: if not indices._can_hold_na: - return + pytest.skip('Skip na-check if index cannot hold na') if needs_i8_conversion(indices): vals = indices.asi8[[0] * 5] @@ -423,7 +450,7 @@ def test_sort(self, indices): def test_mutability(self, indices): if not len(indices): - return + pytest.skip('Skip check for empty Index') pytest.raises(TypeError, indices.__setitem__, 0, indices[0]) def test_view(self, indices): @@ -761,7 +788,7 @@ def test_equals_op(self): # GH9947, GH10637 index_a = self.create_index() if isinstance(index_a, PeriodIndex): - return + pytest.skip('Skip check for PeriodIndex') n = len(index_a) index_b = index_a[0:-1] @@ -989,11 +1016,11 @@ def test_searchsorted_monotonic(self, indices): # not implemented for tuple searches in MultiIndex # or Intervals searches in IntervalIndex if isinstance(indices, (MultiIndex, IntervalIndex)): - return + pytest.skip('Skip check for MultiIndex/IntervalIndex') # nothing to test if the index is empty if indices.empty: - return + pytest.skip('Skip check for empty Index') value = indices[0] # determine the expected results (handle dupes for 'right') diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index a2a4170256088..2221fd023b561 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -590,12 +590,15 @@ def test_is_unique(self, values, expected): ci = CategoricalIndex(values) assert ci.is_unique is expected - def test_duplicates(self): + def test_has_duplicates(self): idx = CategoricalIndex([0, 0, 0], name='foo') assert not idx.is_unique assert idx.has_duplicates + def test_drop_duplicates(self): + + idx = CategoricalIndex([0, 0, 0], name='foo') expected = CategoricalIndex([0], name='foo') tm.assert_index_equal(idx.drop_duplicates(), expected) tm.assert_index_equal(idx.unique(), expected) diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 38f4b341116b8..2a9efd92df8a3 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -806,7 +806,7 @@ def test_explicit_conversions(self): result = a - fidx tm.assert_index_equal(result, expected) - def test_duplicates(self): + def test_has_duplicates(self): for ind in self.indices: if not len(ind): continue
Splitting up #21645 * Added tests for `duplicated` * Following https://github.com/pandas-dev/pandas/pull/21645#discussion_r202192191, turned several blank `return` statements (which falsely pass the test) into `pytest.skip`.
https://api.github.com/repos/pandas-dev/pandas/pulls/21902
2018-07-13T21:20:41Z
2018-08-10T10:37:22Z
2018-08-10T10:37:21Z
2018-08-10T17:17:00Z
TST/CLN: clean up indexes/multi/test_unique_and_duplicates
diff --git a/pandas/tests/indexes/multi/conftest.py b/pandas/tests/indexes/multi/conftest.py index 6cf9003500b61..afe651d22c6a7 100644 --- a/pandas/tests/indexes/multi/conftest.py +++ b/pandas/tests/indexes/multi/conftest.py @@ -15,13 +15,25 @@ def idx(): major_labels = np.array([0, 0, 1, 2, 3, 3]) minor_labels = np.array([0, 1, 0, 1, 0, 1]) index_names = ['first', 'second'] - index = MultiIndex( - levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels], - names=index_names, - verify_integrity=False - ) - return index + mi = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=index_names, verify_integrity=False) + return mi + + +@pytest.fixture +def idx_dup(): + # compare tests/indexes/multi/conftest.py + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 0, 1, 1]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + index_names = ['first', 'second'] + mi = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=index_names, verify_integrity=False) + return mi @pytest.fixture diff --git a/pandas/tests/indexes/multi/test_unique_and_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py similarity index 58% rename from pandas/tests/indexes/multi/test_unique_and_duplicates.py rename to pandas/tests/indexes/multi/test_duplicates.py index c1000e5b6e0f6..1cdf0ca6e013e 100644 --- a/pandas/tests/indexes/multi/test_unique_and_duplicates.py +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -2,56 +2,54 @@ import warnings from itertools import product +import pytest import numpy as np -import pandas as pd -import pandas.util.testing as tm -import pytest -from pandas import MultiIndex + from pandas.compat import range, u +from pandas import MultiIndex, DatetimeIndex +from pandas._libs import hashtable +import pandas.util.testing as tm @pytest.mark.parametrize('names', [None, ['first', 'second']]) def test_unique(names): - mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], - names=names) + mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names) res = mi.unique() - exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) + exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) tm.assert_index_equal(res, exp) - mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')], - names=names) + mi = MultiIndex.from_arrays([list('aaaa'), list('abab')], + names=names) res = mi.unique() - exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')], - names=mi.names) + exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names) tm.assert_index_equal(res, exp) - mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')], - names=names) + mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names) res = mi.unique() - exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names) + exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names) tm.assert_index_equal(res, exp) # GH #20568 - empty MI - mi = pd.MultiIndex.from_arrays([[], []], names=names) + mi = MultiIndex.from_arrays([[], []], names=names) res = mi.unique() tm.assert_index_equal(mi, res) def test_unique_datetimelike(): - idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', - '2015-01-01', 'NaT', 'NaT']) - idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02', - '2015-01-02', 'NaT', '2015-01-01'], - tz='Asia/Tokyo') - result = pd.MultiIndex.from_arrays([idx1, idx2]).unique() - - eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT']) - eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02', - 'NaT', '2015-01-01'], - tz='Asia/Tokyo') - exp = pd.MultiIndex.from_arrays([eidx1, eidx2]) + idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', + '2015-01-01', 'NaT', 'NaT']) + idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02', + '2015-01-02', 'NaT', '2015-01-01'], + tz='Asia/Tokyo') + result = MultiIndex.from_arrays([idx1, idx2]).unique() + + eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT']) + eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02', + 'NaT', '2015-01-01'], + tz='Asia/Tokyo') + exp = MultiIndex.from_arrays([eidx1, eidx2]) tm.assert_index_equal(result, exp) @@ -63,41 +61,51 @@ def test_unique_level(idx, level): tm.assert_index_equal(result, expected) # With already unique level - mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], - names=['first', 'second']) + mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], + names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) tm.assert_index_equal(result, expected) # With empty MI - mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second']) + mi = MultiIndex.from_arrays([[], []], names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) +@pytest.mark.parametrize('dropna', [True, False]) +def test_get_unique_index(idx, dropna): + mi = idx[[0, 1, 0, 1, 1, 0, 0]] + expected = mi._shallow_copy(mi[[0, 1]]) + + result = mi._get_unique_index(dropna=dropna) + assert result.unique + tm.assert_index_equal(result, expected) + + def test_duplicate_multiindex_labels(): # GH 17464 # Make sure that a MultiIndex with duplicate levels throws a ValueError with pytest.raises(ValueError): - ind = pd.MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)]) + mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)]) # And that using set_levels with duplicate levels fails - ind = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'], - [1, 2, 1, 2, 3]]) + mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'], + [1, 2, 1, 2, 3]]) with pytest.raises(ValueError): - ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], - inplace=True) + mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], + inplace=True) @pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2], [1, 'a', 1]]) def test_duplicate_level_names(names): # GH18872, GH19029 - mi = pd.MultiIndex.from_product([[0, 1]] * 3, names=names) + mi = MultiIndex.from_product([[0, 1]] * 3, names=names) assert mi.names == names # With .rename() - mi = pd.MultiIndex.from_product([[0, 1]] * 3) + mi = MultiIndex.from_product([[0, 1]] * 3) mi = mi.rename(names) assert mi.names == names @@ -109,27 +117,34 @@ def test_duplicate_level_names(names): def test_duplicate_meta_data(): # GH 10115 - index = MultiIndex( + mi = MultiIndex( levels=[[0, 1], [0, 1, 2]], labels=[[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) - for idx in [index, - index.set_names([None, None]), - index.set_names([None, 'Num']), - index.set_names(['Upper', 'Num']), ]: + for idx in [mi, + mi.set_names([None, None]), + mi.set_names([None, 'Num']), + mi.set_names(['Upper', 'Num']), ]: assert idx.has_duplicates assert idx.drop_duplicates().names == idx.names -def test_duplicates(idx): +def test_has_duplicates(idx, idx_dup): + # see fixtures + assert idx.is_unique assert not idx.has_duplicates - assert idx.append(idx).has_duplicates + assert not idx_dup.is_unique + assert idx_dup.has_duplicates - index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[ - [0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) - assert index.has_duplicates + mi = MultiIndex(levels=[[0, 1], [0, 1, 2]], + labels=[[0, 0, 0, 0, 1, 1, 1], + [0, 1, 2, 0, 0, 1, 2]]) + assert not mi.is_unique + assert mi.has_duplicates + +def test_has_duplicates_from_tuples(): # GH 9075 t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169), (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119), @@ -150,9 +165,11 @@ def test_duplicates(idx): (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123), (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)] - index = pd.MultiIndex.from_tuples(t) - assert not index.has_duplicates + mi = MultiIndex.from_tuples(t) + assert not mi.has_duplicates + +def test_has_duplicates_overflow(): # handle int64 overflow if possible def check(nlevels, with_nulls): labels = np.tile(np.arange(500), 2) @@ -171,20 +188,20 @@ def check(nlevels, with_nulls): levels = [level] * nlevels + [[0, 1]] # no dups - index = MultiIndex(levels=levels, labels=labels) - assert not index.has_duplicates + mi = MultiIndex(levels=levels, labels=labels) + assert not mi.has_duplicates # with a dup if with_nulls: def f(a): return np.insert(a, 1000, a[0]) labels = list(map(f, labels)) - index = MultiIndex(levels=levels, labels=labels) + mi = MultiIndex(levels=levels, labels=labels) else: - values = index.values.tolist() - index = MultiIndex.from_tuples(values + [values[0]]) + values = mi.values.tolist() + mi = MultiIndex.from_tuples(values + [values[0]]) - assert index.has_duplicates + assert mi.has_duplicates # no overflow check(4, False) @@ -194,17 +211,31 @@ def f(a): check(8, False) check(8, True) + +@pytest.mark.parametrize('keep, expected', [ + ('first', np.array([False, False, False, True, True, False])), + ('last', np.array([False, True, True, False, False, False])), + (False, np.array([False, True, True, True, True, False])) +]) +def test_duplicated(idx_dup, keep, expected): + result = idx_dup.duplicated(keep=keep) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize('keep', ['first', 'last', False]) +def test_duplicated_large(keep): # GH 9125 n, k = 200, 5000 levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)] labels = [np.random.choice(n, k * n) for lev in levels] mi = MultiIndex(levels=levels, labels=labels) - for keep in ['first', 'last', False]: - left = mi.duplicated(keep=keep) - right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep) - tm.assert_numpy_array_equal(left, right) + result = mi.duplicated(keep=keep) + expected = hashtable.duplicated_object(mi.values, keep=keep) + tm.assert_numpy_array_equal(result, expected) + +def test_get_duplicates(): # GH5873 for a in [101, 102]: mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]]) @@ -212,11 +243,10 @@ def f(a): with warnings.catch_warnings(record=True): # Deprecated - see GH20239 - assert mi.get_duplicates().equals(MultiIndex.from_arrays( - [[], []])) + assert mi.get_duplicates().equals(MultiIndex.from_arrays([[], []])) - tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( - 2, dtype='bool')) + tm.assert_numpy_array_equal(mi.duplicated(), + np.zeros(2, dtype='bool')) for n in range(1, 6): # 1st level shape for m in range(1, 5): # 2nd level shape @@ -232,28 +262,5 @@ def f(a): assert mi.get_duplicates().equals(MultiIndex.from_arrays( [[], []])) - tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( - len(mi), dtype='bool')) - - -def test_get_unique_index(idx): - idx = idx[[0, 1, 0, 1, 1, 0, 0]] - expected = idx._shallow_copy(idx[[0, 1]]) - - for dropna in [False, True]: - result = idx._get_unique_index(dropna=dropna) - assert result.unique - tm.assert_index_equal(result, expected) - - -def test_unique_na(): - idx = pd.Index([2, np.nan, 2, 1], name='my_index') - expected = pd.Index([2, np.nan, 1], name='my_index') - result = idx.unique() - tm.assert_index_equal(result, expected) - - -def test_duplicate_level_names_access_raises(idx): - idx.names = ['foo', 'foo'] - tm.assert_raises_regex(ValueError, 'name foo occurs multiple times', - idx._get_level_number, 'foo') + tm.assert_numpy_array_equal(mi.duplicated(), + np.zeros(len(mi), dtype='bool')) diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py index a9fbb55679173..68e8bb0cf58f2 100644 --- a/pandas/tests/indexes/multi/test_names.py +++ b/pandas/tests/indexes/multi/test_names.py @@ -115,3 +115,10 @@ def test_names(idx, index_names): ind_names = list(index.names) level_names = [level.name for level in index.levels] assert ind_names == level_names + + +def test_duplicate_level_names_access_raises(idx): + # GH19029 + idx.names = ['foo', 'foo'] + tm.assert_raises_regex(ValueError, 'name foo occurs multiple times', + idx._get_level_number, 'foo')
Splitting up #21645 * Added tests for `duplicated`, including a fixture for a `MultiIndex` with duplicates * Broke up a huge test (`test_duplicates`) into smaller chunks * removed a test (`test_unique_na`) that was for an `Index` (not `MultiIndex`), and exists verbatim in `tests/indexes/common.py` * moved `test_duplicate_level_names_access_raises` to its appropriate module
https://api.github.com/repos/pandas-dev/pandas/pulls/21900
2018-07-13T21:14:16Z
2018-07-16T10:57:07Z
2018-07-16T10:57:07Z
2018-07-17T07:18:59Z
TST/CLN: series.duplicated; parametrisation; fix warning
diff --git a/pandas/conftest.py b/pandas/conftest.py index c1376670ffbf0..a979c3fc3bfac 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -248,7 +248,19 @@ def tz_aware_fixture(request): return request.param -@pytest.fixture(params=[str, 'str', 'U']) +UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"] +SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"] +ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES + +FLOAT_DTYPES = [float, "float32", "float64"] +COMPLEX_DTYPES = [complex, "complex64", "complex128"] +STRING_DTYPES = [str, 'str', 'U'] + +ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES +ALL_NUMPY_DTYPES = ALL_REAL_DTYPES + COMPLEX_DTYPES + STRING_DTYPES + + +@pytest.fixture(params=STRING_DTYPES) def string_dtype(request): """Parametrized fixture for string dtypes. @@ -259,9 +271,6 @@ def string_dtype(request): return request.param -FLOAT_DTYPES = [float, "float32", "float64"] - - @pytest.fixture(params=FLOAT_DTYPES) def float_dtype(request): """ @@ -274,7 +283,7 @@ def float_dtype(request): return request.param -@pytest.fixture(params=[complex, "complex64", "complex128"]) +@pytest.fixture(params=COMPLEX_DTYPES) def complex_dtype(request): """ Parameterized fixture for complex dtypes. @@ -286,12 +295,6 @@ def complex_dtype(request): return request.param -UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"] -SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"] -ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES -ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES - - @pytest.fixture(params=SIGNED_INT_DTYPES) def sint_dtype(request): """ @@ -358,6 +361,31 @@ def any_real_dtype(request): return request.param +@pytest.fixture(params=ALL_NUMPY_DTYPES) +def any_numpy_dtype(request): + """ + Parameterized fixture for all numpy dtypes. + + * int8 + * uint8 + * int16 + * uint16 + * int32 + * uint32 + * int64 + * uint64 + * float32 + * float64 + * complex64 + * complex128 + * str + * 'str' + * 'U' + """ + + return request.param + + @pytest.fixture def mock(): """ diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index fd14118bd833f..28a77bbb1d3fa 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -907,144 +907,6 @@ def test_matmul(self): pytest.raises(Exception, a.dot, a.values[:3]) pytest.raises(ValueError, a.dot, b.T) - def test_value_counts_nunique(self): - - # basics.rst doc example - series = Series(np.random.randn(500)) - series[20:500] = np.nan - series[10:20] = 5000 - result = series.nunique() - assert result == 11 - - # GH 18051 - s = pd.Series(pd.Categorical([])) - assert s.nunique() == 0 - s = pd.Series(pd.Categorical([np.nan])) - assert s.nunique() == 0 - - def test_unique(self): - - # 714 also, dtype=float - s = Series([1.2345] * 100) - s[::2] = np.nan - result = s.unique() - assert len(result) == 2 - - s = Series([1.2345] * 100, dtype='f4') - s[::2] = np.nan - result = s.unique() - assert len(result) == 2 - - # NAs in object arrays #714 - s = Series(['foo'] * 100, dtype='O') - s[::2] = np.nan - result = s.unique() - assert len(result) == 2 - - # decision about None - s = Series([1, 2, 3, None, None, None], dtype=object) - result = s.unique() - expected = np.array([1, 2, 3, None], dtype=object) - tm.assert_numpy_array_equal(result, expected) - - # GH 18051 - s = pd.Series(pd.Categorical([])) - tm.assert_categorical_equal(s.unique(), pd.Categorical([]), - check_dtype=False) - s = pd.Series(pd.Categorical([np.nan])) - tm.assert_categorical_equal(s.unique(), pd.Categorical([np.nan]), - check_dtype=False) - - @pytest.mark.parametrize( - "tc1, tc2", - [ - ( - Series([1, 2, 3, 3], dtype=np.dtype('int_')), - Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('int_')) - ), - ( - Series([1, 2, 3, 3], dtype=np.dtype('uint')), - Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('uint')) - ), - ( - Series([1, 2, 3, 3], dtype=np.dtype('float_')), - Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('float_')) - ), - ( - Series([1, 2, 3, 3], dtype=np.dtype('unicode_')), - Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('unicode_')) - ) - ] - ) - def test_drop_duplicates_non_bool(self, tc1, tc2): - # Test case 1 - expected = Series([False, False, False, True]) - assert_series_equal(tc1.duplicated(), expected) - assert_series_equal(tc1.drop_duplicates(), tc1[~expected]) - sc = tc1.copy() - sc.drop_duplicates(inplace=True) - assert_series_equal(sc, tc1[~expected]) - - expected = Series([False, False, True, False]) - assert_series_equal(tc1.duplicated(keep='last'), expected) - assert_series_equal(tc1.drop_duplicates(keep='last'), tc1[~expected]) - sc = tc1.copy() - sc.drop_duplicates(keep='last', inplace=True) - assert_series_equal(sc, tc1[~expected]) - - expected = Series([False, False, True, True]) - assert_series_equal(tc1.duplicated(keep=False), expected) - assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected]) - sc = tc1.copy() - sc.drop_duplicates(keep=False, inplace=True) - assert_series_equal(sc, tc1[~expected]) - - # Test case 2 - expected = Series([False, False, False, False, True, True, False]) - assert_series_equal(tc2.duplicated(), expected) - assert_series_equal(tc2.drop_duplicates(), tc2[~expected]) - sc = tc2.copy() - sc.drop_duplicates(inplace=True) - assert_series_equal(sc, tc2[~expected]) - - expected = Series([False, True, True, False, False, False, False]) - assert_series_equal(tc2.duplicated(keep='last'), expected) - assert_series_equal(tc2.drop_duplicates(keep='last'), tc2[~expected]) - sc = tc2.copy() - sc.drop_duplicates(keep='last', inplace=True) - assert_series_equal(sc, tc2[~expected]) - - expected = Series([False, True, True, False, True, True, False]) - assert_series_equal(tc2.duplicated(keep=False), expected) - assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected]) - sc = tc2.copy() - sc.drop_duplicates(keep=False, inplace=True) - assert_series_equal(sc, tc2[~expected]) - - def test_drop_duplicates_bool(self): - tc = Series([True, False, True, False]) - - expected = Series([False, False, True, True]) - assert_series_equal(tc.duplicated(), expected) - assert_series_equal(tc.drop_duplicates(), tc[~expected]) - sc = tc.copy() - sc.drop_duplicates(inplace=True) - assert_series_equal(sc, tc[~expected]) - - expected = Series([True, True, False, False]) - assert_series_equal(tc.duplicated(keep='last'), expected) - assert_series_equal(tc.drop_duplicates(keep='last'), tc[~expected]) - sc = tc.copy() - sc.drop_duplicates(keep='last', inplace=True) - assert_series_equal(sc, tc[~expected]) - - expected = Series([True, True, True, True]) - assert_series_equal(tc.duplicated(keep=False), expected) - assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected]) - sc = tc.copy() - sc.drop_duplicates(keep=False, inplace=True) - assert_series_equal(sc, tc[~expected]) - def test_clip(self): val = self.ts.median() @@ -1416,7 +1278,8 @@ def test_ptp(self): N = 1000 arr = np.random.randn(N) ser = Series(arr) - assert np.ptp(ser) == np.ptp(arr) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + assert np.ptp(ser) == np.ptp(arr) # GH11163 s = Series([3, 5, np.nan, -3, 10]) @@ -1457,10 +1320,6 @@ def test_empty_timeseries_redections_return_nat(self): assert Series([], dtype=dtype).min() is pd.NaT assert Series([], dtype=dtype).max() is pd.NaT - def test_unique_data_ownership(self): - # it works! #1807 - Series(Series(["a", "c", "b"]).unique()).sort_values() - def test_repeat(self): s = Series(np.random.randn(3), index=['a', 'b', 'c']) @@ -1537,29 +1396,6 @@ def test_searchsorted_sorter(self): e = np.array([0, 2], dtype=np.intp) tm.assert_numpy_array_equal(r, e) - def test_is_unique(self): - # GH11946 - s = Series(np.random.randint(0, 10, size=1000)) - assert not s.is_unique - s = Series(np.arange(1000)) - assert s.is_unique - - def test_is_unique_class_ne(self, capsys): - # GH 20661 - class Foo(object): - def __init__(self, val): - self._value = val - - def __ne__(self, other): - raise Exception("NEQ not supported") - - li = [Foo(i) for i in range(5)] - s = pd.Series(li, index=[i for i in range(5)]) - _, err = capsys.readouterr() - s.is_unique - _, err = capsys.readouterr() - assert len(err) == 0 - def test_is_monotonic(self): s = Series(np.random.randint(0, 10, size=1000)) diff --git a/pandas/tests/series/test_duplicates.py b/pandas/tests/series/test_duplicates.py new file mode 100644 index 0000000000000..2e4d64188307c --- /dev/null +++ b/pandas/tests/series/test_duplicates.py @@ -0,0 +1,140 @@ +# coding=utf-8 + +import pytest + +import numpy as np + +from pandas import Series, Categorical +import pandas.util.testing as tm + + +def test_value_counts_nunique(): + # basics.rst doc example + series = Series(np.random.randn(500)) + series[20:500] = np.nan + series[10:20] = 5000 + result = series.nunique() + assert result == 11 + + # GH 18051 + s = Series(Categorical([])) + assert s.nunique() == 0 + s = Series(Categorical([np.nan])) + assert s.nunique() == 0 + + +def test_unique(): + # GH714 also, dtype=float + s = Series([1.2345] * 100) + s[::2] = np.nan + result = s.unique() + assert len(result) == 2 + + s = Series([1.2345] * 100, dtype='f4') + s[::2] = np.nan + result = s.unique() + assert len(result) == 2 + + # NAs in object arrays #714 + s = Series(['foo'] * 100, dtype='O') + s[::2] = np.nan + result = s.unique() + assert len(result) == 2 + + # decision about None + s = Series([1, 2, 3, None, None, None], dtype=object) + result = s.unique() + expected = np.array([1, 2, 3, None], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # GH 18051 + s = Series(Categorical([])) + tm.assert_categorical_equal(s.unique(), Categorical([]), check_dtype=False) + s = Series(Categorical([np.nan])) + tm.assert_categorical_equal(s.unique(), Categorical([np.nan]), + check_dtype=False) + + +def test_unique_data_ownership(): + # it works! #1807 + Series(Series(["a", "c", "b"]).unique()).sort_values() + + +def test_is_unique(): + # GH11946 + s = Series(np.random.randint(0, 10, size=1000)) + assert not s.is_unique + s = Series(np.arange(1000)) + assert s.is_unique + + +def test_is_unique_class_ne(capsys): + # GH 20661 + class Foo(object): + def __init__(self, val): + self._value = val + + def __ne__(self, other): + raise Exception("NEQ not supported") + + li = [Foo(i) for i in range(5)] + s = Series(li, index=[i for i in range(5)]) + _, err = capsys.readouterr() + s.is_unique + _, err = capsys.readouterr() + assert len(err) == 0 + + +@pytest.mark.parametrize( + 'keep, expected', + [ + ('first', Series([False, False, False, False, True, True, False])), + ('last', Series([False, True, True, False, False, False, False])), + (False, Series([False, True, True, False, True, True, False])) + ]) +def test_drop_duplicates_non_bool(any_numpy_dtype, keep, expected): + tc = Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(any_numpy_dtype)) + + tm.assert_series_equal(tc.duplicated(keep=keep), expected) + tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected]) + sc = tc.copy() + sc.drop_duplicates(keep=keep, inplace=True) + tm.assert_series_equal(sc, tc[~expected]) + + +@pytest.mark.parametrize('keep, expected', + [('first', Series([False, False, True, True])), + ('last', Series([True, True, False, False])), + (False, Series([True, True, True, True]))]) +def test_drop_duplicates_bool(keep, expected): + tc = Series([True, False, True, False]) + + tm.assert_series_equal(tc.duplicated(keep=keep), expected) + tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected]) + sc = tc.copy() + sc.drop_duplicates(keep=keep, inplace=True) + tm.assert_series_equal(sc, tc[~expected]) + + +@pytest.mark.parametrize('keep, expected', [ + ('first', Series([False, False, True, False, True], name='name')), + ('last', Series([True, True, False, False, False], name='name')), + (False, Series([True, True, True, False, True], name='name')) +]) +def test_duplicated_keep(keep, expected): + s = Series(['a', 'b', 'b', 'c', 'a'], name='name') + + result = s.duplicated(keep=keep) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize('keep, expected', [ + ('first', Series([False, False, True, False, True])), + ('last', Series([True, True, False, False, False])), + (False, Series([True, True, True, False, True])) +]) +def test_duplicated_nan_none(keep, expected): + s = Series([np.nan, 3, 3, None, np.nan], dtype=object) + + result = s.duplicated(keep=keep) + tm.assert_series_equal(result, expected)
Splitting up #21645 Added tests for `duplicated`, parametrized two tests for `drop_duplicates`, fixed a warning from #21614.
https://api.github.com/repos/pandas-dev/pandas/pulls/21899
2018-07-13T21:10:31Z
2018-07-16T10:55:32Z
2018-07-16T10:55:31Z
2018-07-17T07:17:43Z
TST: add test for duplicated frame/test_analytics
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index c0e9b89c1877f..a399fa2b68680 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1542,384 +1542,6 @@ def test_isin_empty_datetimelike(self): result = df1_td.isin(df3) tm.assert_frame_equal(result, expected) - # ---------------------------------------------------------------------- - # Row deduplication - - def test_drop_duplicates(self): - df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1, 1, 2, 2, 2, 2, 1, 2], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates('AAA') - expected = df[:2] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('AAA', keep='last') - expected = df.loc[[6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('AAA', keep=False) - expected = df.loc[[]] - tm.assert_frame_equal(result, expected) - assert len(result) == 0 - - # multi column - expected = df.loc[[0, 1, 2, 3]] - result = df.drop_duplicates(np.array(['AAA', 'B'])) - tm.assert_frame_equal(result, expected) - result = df.drop_duplicates(['AAA', 'B']) - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(('AAA', 'B'), keep='last') - expected = df.loc[[0, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(('AAA', 'B'), keep=False) - expected = df.loc[[0]] - tm.assert_frame_equal(result, expected) - - # consider everything - df2 = df.loc[:, ['AAA', 'B', 'C']] - - result = df2.drop_duplicates() - # in this case only - expected = df2.drop_duplicates(['AAA', 'B']) - tm.assert_frame_equal(result, expected) - - result = df2.drop_duplicates(keep='last') - expected = df2.drop_duplicates(['AAA', 'B'], keep='last') - tm.assert_frame_equal(result, expected) - - result = df2.drop_duplicates(keep=False) - expected = df2.drop_duplicates(['AAA', 'B'], keep=False) - tm.assert_frame_equal(result, expected) - - # integers - result = df.drop_duplicates('C') - expected = df.iloc[[0, 2]] - tm.assert_frame_equal(result, expected) - result = df.drop_duplicates('C', keep='last') - expected = df.iloc[[-2, -1]] - tm.assert_frame_equal(result, expected) - - df['E'] = df['C'].astype('int8') - result = df.drop_duplicates('E') - expected = df.iloc[[0, 2]] - tm.assert_frame_equal(result, expected) - result = df.drop_duplicates('E', keep='last') - expected = df.iloc[[-2, -1]] - tm.assert_frame_equal(result, expected) - - # GH 11376 - df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0], - 'y': [0, 6, 5, 5, 9, 1, 2]}) - expected = df.loc[df.index != 3] - tm.assert_frame_equal(df.drop_duplicates(), expected) - - df = pd.DataFrame([[1, 0], [0, 2]]) - tm.assert_frame_equal(df.drop_duplicates(), df) - - df = pd.DataFrame([[-2, 0], [0, -4]]) - tm.assert_frame_equal(df.drop_duplicates(), df) - - x = np.iinfo(np.int64).max / 3 * 2 - df = pd.DataFrame([[-x, x], [0, x + 4]]) - tm.assert_frame_equal(df.drop_duplicates(), df) - - df = pd.DataFrame([[-x, x], [x, x + 4]]) - tm.assert_frame_equal(df.drop_duplicates(), df) - - # GH 11864 - df = pd.DataFrame([i] * 9 for i in range(16)) - df = df.append([[1] + [0] * 8], ignore_index=True) - - for keep in ['first', 'last', False]: - assert df.duplicated(keep=keep).sum() == 0 - - @pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']]) - def test_duplicated_with_misspelled_column_name(self, subset): - # GH 19730 - df = pd.DataFrame({'A': [0, 0, 1], - 'B': [0, 0, 1], - 'C': [0, 0, 1]}) - - with pytest.raises(KeyError): - df.duplicated(subset) - - with pytest.raises(KeyError): - df.drop_duplicates(subset) - - @pytest.mark.slow - def test_duplicated_do_not_fail_on_wide_dataframes(self): - # gh-21524 - # Given the wide dataframe with a lot of columns - # with different (important!) values - data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000) - for i in range(100)} - df = pd.DataFrame(data).T - result = df.duplicated() - - # Then duplicates produce the bool pd.Series as a result - # and don't fail during calculation. - # Actual values doesn't matter here, though usually - # it's all False in this case - assert isinstance(result, pd.Series) - assert result.dtype == np.bool - - def test_drop_duplicates_with_duplicate_column_names(self): - # GH17836 - df = DataFrame([ - [1, 2, 5], - [3, 4, 6], - [3, 4, 7] - ], columns=['a', 'a', 'b']) - - result0 = df.drop_duplicates() - tm.assert_frame_equal(result0, df) - - result1 = df.drop_duplicates('a') - expected1 = df[:2] - tm.assert_frame_equal(result1, expected1) - - def test_drop_duplicates_for_take_all(self): - df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar', - 'foo', 'bar', 'qux', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1, 1, 2, 2, 2, 2, 1, 2], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates('AAA') - expected = df.iloc[[0, 1, 2, 6]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('AAA', keep='last') - expected = df.iloc[[2, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('AAA', keep=False) - expected = df.iloc[[2, 6]] - tm.assert_frame_equal(result, expected) - - # multiple columns - result = df.drop_duplicates(['AAA', 'B']) - expected = df.iloc[[0, 1, 2, 3, 4, 6]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['AAA', 'B'], keep='last') - expected = df.iloc[[0, 1, 2, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['AAA', 'B'], keep=False) - expected = df.iloc[[0, 1, 2, 6]] - tm.assert_frame_equal(result, expected) - - def test_drop_duplicates_tuple(self): - df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1, 1, 2, 2, 2, 2, 1, 2], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates(('AA', 'AB')) - expected = df[:2] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(('AA', 'AB'), keep='last') - expected = df.loc[[6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(('AA', 'AB'), keep=False) - expected = df.loc[[]] # empty df - assert len(result) == 0 - tm.assert_frame_equal(result, expected) - - # multi column - expected = df.loc[[0, 1, 2, 3]] - result = df.drop_duplicates((('AA', 'AB'), 'B')) - tm.assert_frame_equal(result, expected) - - def test_drop_duplicates_NA(self): - # none - df = DataFrame({'A': [None, None, 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates('A') - expected = df.loc[[0, 2, 3]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('A', keep='last') - expected = df.loc[[1, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('A', keep=False) - expected = df.loc[[]] # empty df - tm.assert_frame_equal(result, expected) - assert len(result) == 0 - - # multi column - result = df.drop_duplicates(['A', 'B']) - expected = df.loc[[0, 2, 3, 6]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['A', 'B'], keep='last') - expected = df.loc[[1, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['A', 'B'], keep=False) - expected = df.loc[[6]] - tm.assert_frame_equal(result, expected) - - # nan - df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates('C') - expected = df[:2] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('C', keep='last') - expected = df.loc[[3, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('C', keep=False) - expected = df.loc[[]] # empty df - tm.assert_frame_equal(result, expected) - assert len(result) == 0 - - # multi column - result = df.drop_duplicates(['C', 'B']) - expected = df.loc[[0, 1, 2, 4]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['C', 'B'], keep='last') - expected = df.loc[[1, 3, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['C', 'B'], keep=False) - expected = df.loc[[1]] - tm.assert_frame_equal(result, expected) - - def test_drop_duplicates_NA_for_take_all(self): - # none - df = DataFrame({'A': [None, None, 'foo', 'bar', - 'foo', 'baz', 'bar', 'qux'], - 'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]}) - - # single column - result = df.drop_duplicates('A') - expected = df.iloc[[0, 2, 3, 5, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('A', keep='last') - expected = df.iloc[[1, 4, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('A', keep=False) - expected = df.iloc[[5, 7]] - tm.assert_frame_equal(result, expected) - - # nan - - # single column - result = df.drop_duplicates('C') - expected = df.iloc[[0, 1, 5, 6]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('C', keep='last') - expected = df.iloc[[3, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('C', keep=False) - expected = df.iloc[[5, 6]] - tm.assert_frame_equal(result, expected) - - def test_drop_duplicates_inplace(self): - orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1, 1, 2, 2, 2, 2, 1, 2], - 'D': lrange(8)}) - - # single column - df = orig.copy() - df.drop_duplicates('A', inplace=True) - expected = orig[:2] - result = df - tm.assert_frame_equal(result, expected) - - df = orig.copy() - df.drop_duplicates('A', keep='last', inplace=True) - expected = orig.loc[[6, 7]] - result = df - tm.assert_frame_equal(result, expected) - - df = orig.copy() - df.drop_duplicates('A', keep=False, inplace=True) - expected = orig.loc[[]] - result = df - tm.assert_frame_equal(result, expected) - assert len(df) == 0 - - # multi column - df = orig.copy() - df.drop_duplicates(['A', 'B'], inplace=True) - expected = orig.loc[[0, 1, 2, 3]] - result = df - tm.assert_frame_equal(result, expected) - - df = orig.copy() - df.drop_duplicates(['A', 'B'], keep='last', inplace=True) - expected = orig.loc[[0, 5, 6, 7]] - result = df - tm.assert_frame_equal(result, expected) - - df = orig.copy() - df.drop_duplicates(['A', 'B'], keep=False, inplace=True) - expected = orig.loc[[0]] - result = df - tm.assert_frame_equal(result, expected) - - # consider everything - orig2 = orig.loc[:, ['A', 'B', 'C']].copy() - - df2 = orig2.copy() - df2.drop_duplicates(inplace=True) - # in this case only - expected = orig2.drop_duplicates(['A', 'B']) - result = df2 - tm.assert_frame_equal(result, expected) - - df2 = orig2.copy() - df2.drop_duplicates(keep='last', inplace=True) - expected = orig2.drop_duplicates(['A', 'B'], keep='last') - result = df2 - tm.assert_frame_equal(result, expected) - - df2 = orig2.copy() - df2.drop_duplicates(keep=False, inplace=True) - expected = orig2.drop_duplicates(['A', 'B'], keep=False) - result = df2 - tm.assert_frame_equal(result, expected) - # Rounding def test_round(self): # GH 2665 diff --git a/pandas/tests/frame/test_duplicates.py b/pandas/tests/frame/test_duplicates.py new file mode 100644 index 0000000000000..289170527dea7 --- /dev/null +++ b/pandas/tests/frame/test_duplicates.py @@ -0,0 +1,439 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function + +import pytest + +import numpy as np + +from pandas.compat import lrange, string_types +from pandas import DataFrame, Series + +import pandas.util.testing as tm + + +@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']]) +def test_duplicated_with_misspelled_column_name(subset): + # GH 19730 + df = DataFrame({'A': [0, 0, 1], + 'B': [0, 0, 1], + 'C': [0, 0, 1]}) + + with pytest.raises(KeyError): + df.duplicated(subset) + + with pytest.raises(KeyError): + df.drop_duplicates(subset) + + +@pytest.mark.slow +def test_duplicated_do_not_fail_on_wide_dataframes(): + # gh-21524 + # Given the wide dataframe with a lot of columns + # with different (important!) values + data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000) + for i in range(100)} + df = DataFrame(data).T + result = df.duplicated() + + # Then duplicates produce the bool Series as a result and don't fail during + # calculation. Actual values doesn't matter here, though usually it's all + # False in this case + assert isinstance(result, Series) + assert result.dtype == np.bool + + +@pytest.mark.parametrize('keep, expected', [ + ('first', Series([False, False, True, False, True])), + ('last', Series([True, True, False, False, False])), + (False, Series([True, True, True, False, True])) +]) +def test_duplicated_keep(keep, expected): + df = DataFrame({'A': [0, 1, 1, 2, 0], 'B': ['a', 'b', 'b', 'c', 'a']}) + + result = df.duplicated(keep=keep) + tm.assert_series_equal(result, expected) + + +@pytest.mark.xfail(reason="GH21720; nan/None falsely considered equal") +@pytest.mark.parametrize('keep, expected', [ + ('first', Series([False, False, True, False, True])), + ('last', Series([True, True, False, False, False])), + (False, Series([True, True, True, False, True])) +]) +def test_duplicated_nan_none(keep, expected): + df = DataFrame({'C': [np.nan, 3, 3, None, np.nan]}, dtype=object) + + result = df.duplicated(keep=keep) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize('keep', ['first', 'last', False]) +@pytest.mark.parametrize('subset', [None, ['A', 'B'], 'A']) +def test_duplicated_subset(subset, keep): + df = DataFrame({'A': [0, 1, 1, 2, 0], + 'B': ['a', 'b', 'b', 'c', 'a'], + 'C': [np.nan, 3, 3, None, np.nan]}) + + if subset is None: + subset = list(df.columns) + elif isinstance(subset, string_types): + # need to have a DataFrame, not a Series + # -> select columns with singleton list, not string + subset = [subset] + + expected = df[subset].duplicated(keep=keep) + result = df.duplicated(keep=keep, subset=subset) + tm.assert_series_equal(result, expected) + + +def test_drop_duplicates(): + df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1, 1, 2, 2, 2, 2, 1, 2], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates('AAA') + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('AAA', keep='last') + expected = df.loc[[6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('AAA', keep=False) + expected = df.loc[[]] + tm.assert_frame_equal(result, expected) + assert len(result) == 0 + + # multi column + expected = df.loc[[0, 1, 2, 3]] + result = df.drop_duplicates(np.array(['AAA', 'B'])) + tm.assert_frame_equal(result, expected) + result = df.drop_duplicates(['AAA', 'B']) + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(('AAA', 'B'), keep='last') + expected = df.loc[[0, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(('AAA', 'B'), keep=False) + expected = df.loc[[0]] + tm.assert_frame_equal(result, expected) + + # consider everything + df2 = df.loc[:, ['AAA', 'B', 'C']] + + result = df2.drop_duplicates() + # in this case only + expected = df2.drop_duplicates(['AAA', 'B']) + tm.assert_frame_equal(result, expected) + + result = df2.drop_duplicates(keep='last') + expected = df2.drop_duplicates(['AAA', 'B'], keep='last') + tm.assert_frame_equal(result, expected) + + result = df2.drop_duplicates(keep=False) + expected = df2.drop_duplicates(['AAA', 'B'], keep=False) + tm.assert_frame_equal(result, expected) + + # integers + result = df.drop_duplicates('C') + expected = df.iloc[[0, 2]] + tm.assert_frame_equal(result, expected) + result = df.drop_duplicates('C', keep='last') + expected = df.iloc[[-2, -1]] + tm.assert_frame_equal(result, expected) + + df['E'] = df['C'].astype('int8') + result = df.drop_duplicates('E') + expected = df.iloc[[0, 2]] + tm.assert_frame_equal(result, expected) + result = df.drop_duplicates('E', keep='last') + expected = df.iloc[[-2, -1]] + tm.assert_frame_equal(result, expected) + + # GH 11376 + df = DataFrame({'x': [7, 6, 3, 3, 4, 8, 0], + 'y': [0, 6, 5, 5, 9, 1, 2]}) + expected = df.loc[df.index != 3] + tm.assert_frame_equal(df.drop_duplicates(), expected) + + df = DataFrame([[1, 0], [0, 2]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + df = DataFrame([[-2, 0], [0, -4]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + x = np.iinfo(np.int64).max / 3 * 2 + df = DataFrame([[-x, x], [0, x + 4]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + df = DataFrame([[-x, x], [x, x + 4]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + # GH 11864 + df = DataFrame([i] * 9 for i in range(16)) + df = df.append([[1] + [0] * 8], ignore_index=True) + + for keep in ['first', 'last', False]: + assert df.duplicated(keep=keep).sum() == 0 + + +def test_drop_duplicates_with_duplicate_column_names(): + # GH17836 + df = DataFrame([ + [1, 2, 5], + [3, 4, 6], + [3, 4, 7] + ], columns=['a', 'a', 'b']) + + result0 = df.drop_duplicates() + tm.assert_frame_equal(result0, df) + + result1 = df.drop_duplicates('a') + expected1 = df[:2] + tm.assert_frame_equal(result1, expected1) + + +def test_drop_duplicates_for_take_all(): + df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar', + 'foo', 'bar', 'qux', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1, 1, 2, 2, 2, 2, 1, 2], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates('AAA') + expected = df.iloc[[0, 1, 2, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('AAA', keep='last') + expected = df.iloc[[2, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('AAA', keep=False) + expected = df.iloc[[2, 6]] + tm.assert_frame_equal(result, expected) + + # multiple columns + result = df.drop_duplicates(['AAA', 'B']) + expected = df.iloc[[0, 1, 2, 3, 4, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['AAA', 'B'], keep='last') + expected = df.iloc[[0, 1, 2, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['AAA', 'B'], keep=False) + expected = df.iloc[[0, 1, 2, 6]] + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_tuple(): + df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1, 1, 2, 2, 2, 2, 1, 2], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates(('AA', 'AB')) + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(('AA', 'AB'), keep='last') + expected = df.loc[[6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(('AA', 'AB'), keep=False) + expected = df.loc[[]] # empty df + assert len(result) == 0 + tm.assert_frame_equal(result, expected) + + # multi column + expected = df.loc[[0, 1, 2, 3]] + result = df.drop_duplicates((('AA', 'AB'), 'B')) + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_NA(): + # none + df = DataFrame({'A': [None, None, 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates('A') + expected = df.loc[[0, 2, 3]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('A', keep='last') + expected = df.loc[[1, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('A', keep=False) + expected = df.loc[[]] # empty df + tm.assert_frame_equal(result, expected) + assert len(result) == 0 + + # multi column + result = df.drop_duplicates(['A', 'B']) + expected = df.loc[[0, 2, 3, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['A', 'B'], keep='last') + expected = df.loc[[1, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['A', 'B'], keep=False) + expected = df.loc[[6]] + tm.assert_frame_equal(result, expected) + + # nan + df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates('C') + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('C', keep='last') + expected = df.loc[[3, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('C', keep=False) + expected = df.loc[[]] # empty df + tm.assert_frame_equal(result, expected) + assert len(result) == 0 + + # multi column + result = df.drop_duplicates(['C', 'B']) + expected = df.loc[[0, 1, 2, 4]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['C', 'B'], keep='last') + expected = df.loc[[1, 3, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['C', 'B'], keep=False) + expected = df.loc[[1]] + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_NA_for_take_all(): + # none + df = DataFrame({'A': [None, None, 'foo', 'bar', + 'foo', 'baz', 'bar', 'qux'], + 'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]}) + + # single column + result = df.drop_duplicates('A') + expected = df.iloc[[0, 2, 3, 5, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('A', keep='last') + expected = df.iloc[[1, 4, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('A', keep=False) + expected = df.iloc[[5, 7]] + tm.assert_frame_equal(result, expected) + + # nan + + # single column + result = df.drop_duplicates('C') + expected = df.iloc[[0, 1, 5, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('C', keep='last') + expected = df.iloc[[3, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('C', keep=False) + expected = df.iloc[[5, 6]] + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_inplace(): + orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1, 1, 2, 2, 2, 2, 1, 2], + 'D': lrange(8)}) + + # single column + df = orig.copy() + df.drop_duplicates('A', inplace=True) + expected = orig[:2] + result = df + tm.assert_frame_equal(result, expected) + + df = orig.copy() + df.drop_duplicates('A', keep='last', inplace=True) + expected = orig.loc[[6, 7]] + result = df + tm.assert_frame_equal(result, expected) + + df = orig.copy() + df.drop_duplicates('A', keep=False, inplace=True) + expected = orig.loc[[]] + result = df + tm.assert_frame_equal(result, expected) + assert len(df) == 0 + + # multi column + df = orig.copy() + df.drop_duplicates(['A', 'B'], inplace=True) + expected = orig.loc[[0, 1, 2, 3]] + result = df + tm.assert_frame_equal(result, expected) + + df = orig.copy() + df.drop_duplicates(['A', 'B'], keep='last', inplace=True) + expected = orig.loc[[0, 5, 6, 7]] + result = df + tm.assert_frame_equal(result, expected) + + df = orig.copy() + df.drop_duplicates(['A', 'B'], keep=False, inplace=True) + expected = orig.loc[[0]] + result = df + tm.assert_frame_equal(result, expected) + + # consider everything + orig2 = orig.loc[:, ['A', 'B', 'C']].copy() + + df2 = orig2.copy() + df2.drop_duplicates(inplace=True) + # in this case only + expected = orig2.drop_duplicates(['A', 'B']) + result = df2 + tm.assert_frame_equal(result, expected) + + df2 = orig2.copy() + df2.drop_duplicates(keep='last', inplace=True) + expected = orig2.drop_duplicates(['A', 'B'], keep='last') + result = df2 + tm.assert_frame_equal(result, expected) + + df2 = orig2.copy() + df2.drop_duplicates(keep=False, inplace=True) + expected = orig2.drop_duplicates(['A', 'B'], keep=False) + result = df2 + tm.assert_frame_equal(result, expected)
Preparation for #21645 Added some parametrised tests for `duplicated`. In addition, I reordered the tests slightly. Currently, the tests in `tests/frame/test_analytics.py` test the following functions in order: ``` drop_duplicates duplicated duplicated drop_duplicates drop_duplicates [...] ``` Since I am (and will be) adding several tests for `duplicated`, I'd like to group them within the code, and moved the two existing tests for `duplicated` to the top of that section.
https://api.github.com/repos/pandas-dev/pandas/pulls/21898
2018-07-13T20:50:59Z
2018-07-16T10:54:15Z
2018-07-16T10:54:15Z
2018-07-17T07:18:07Z
DOC: move feature to correct whatsnew section; typos
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 034a56b2ac0cb..0d2a22f3880b4 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -16,7 +16,7 @@ New features ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A ``Series`` based on an ``ExtensionArray`` now supports arithmetic and comparison -operators. (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``: +operators (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``: 1. Define each of the operators on your ``ExtensionArray`` subclass. 2. Use an operator implementation from pandas that depends on operators that are already defined @@ -80,7 +80,7 @@ Other Enhancements <https://pandas-gbq.readthedocs.io/en/latest/changelog.html#changelog-0-5-0>`__. (:issue:`21627`) - New method :meth:`HDFStore.walk` will recursively walk the group hierarchy of an HDF5 file (:issue:`10932`) -- :func:`read_html` copies cell data across ``colspan``s and ``rowspan``s, and it treats all-``th`` table rows as headers if ``header`` kwarg is not given and there is no ``thead`` (:issue:`17054`) +- :func:`read_html` copies cell data across ``colspan`` and ``rowspan``, and it treats all-``th`` table rows as headers if ``header`` kwarg is not given and there is no ``thead`` (:issue:`17054`) - :meth:`Series.nlargest`, :meth:`Series.nsmallest`, :meth:`DataFrame.nlargest`, and :meth:`DataFrame.nsmallest` now accept the value ``"all"`` for the ``keep`` argument. This keeps all ties for the nth largest/smallest value (:issue:`16818`) - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) @@ -319,7 +319,7 @@ Timezones - Bug in :class:`Timestamp` when passing different string date formats with a timezone offset would produce different timezone offsets (:issue:`12064`) - Bug when comparing a tz-naive :class:`Timestamp` to a tz-aware :class:`DatetimeIndex` which would coerce the :class:`DatetimeIndex` to tz-naive (:issue:`12601`) - Bug in :meth:`Series.truncate` with a tz-aware :class:`DatetimeIndex` which would cause a core dump (:issue:`9243`) -- Bug in :class:`Series` constructor which would coerce tz-aware and tz-naive :class:`Timestamp`s to tz-aware (:issue:`13051`) +- Bug in :class:`Series` constructor which would coerce tz-aware and tz-naive :class:`Timestamp` to tz-aware (:issue:`13051`) - Bug in :class:`Index` with ``datetime64[ns, tz]`` dtype that did not localize integer data correctly (:issue:`20964`) - Bug in :class:`DatetimeIndex` where constructing with an integer and tz would not localize correctly (:issue:`12619`) - Fixed bug where :meth:`DataFrame.describe` and :meth:`Series.describe` on tz-aware datetimes did not show `first` and `last` result (:issue:`21328`)
Splitting up #21645. Motivation is easy: when I started working on #21645, there was nothing under "New Features" (usually having a descriptive section each) except this one-liner. To me, this belongs to "Other Enhancements". Plus found some typos and stuff where the `rst` breaks (cannot have something directly after closing backticks).
https://api.github.com/repos/pandas-dev/pandas/pulls/21897
2018-07-13T20:48:19Z
2018-07-14T15:29:02Z
2018-07-14T15:29:02Z
2018-07-17T07:19:32Z
DEPR: Deprecate Series.to_csv signature
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 5c15c7b6a742f..730a4895055c6 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -477,6 +477,7 @@ Deprecations - :meth:`MultiIndex.to_hierarchical` is deprecated and will be removed in a future version (:issue:`21613`) - :meth:`Series.ptp` is deprecated. Use ``numpy.ptp`` instead (:issue:`21614`) - :meth:`Series.compress` is deprecated. Use ``Series[condition]`` instead (:issue:`18262`) +- The signature of :meth:`Series.to_csv` has been uniformed to that of doc:meth:`DataFrame.to_csv`: the name of the first argument is now 'path_or_buf', the order of subsequent arguments has changed, the 'header' argument now defaults to True. (:issue:`19715`) - :meth:`Categorical.from_codes` has deprecated providing float values for the ``codes`` argument. (:issue:`21767`) - :func:`pandas.read_table` is deprecated. Instead, use :func:`pandas.read_csv` passing ``sep='\t'`` if necessary (:issue:`21948`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cb251d4648925..f2766f45bee2b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1714,107 +1714,6 @@ def to_panel(self): return self._constructor_expanddim(new_mgr) - def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, - columns=None, header=True, index=True, index_label=None, - mode='w', encoding=None, compression='infer', quoting=None, - quotechar='"', line_terminator='\n', chunksize=None, - tupleize_cols=None, date_format=None, doublequote=True, - escapechar=None, decimal='.'): - r"""Write DataFrame to a comma-separated values (csv) file - - Parameters - ---------- - path_or_buf : string or file handle, default None - File path or object, if None is provided the result is returned as - a string. - sep : character, default ',' - Field delimiter for the output file. - na_rep : string, default '' - Missing data representation - float_format : string, default None - Format string for floating point numbers - columns : sequence, optional - Columns to write - header : boolean or list of string, default True - Write out the column names. If a list of strings is given it is - assumed to be aliases for the column names - index : boolean, default True - Write row names (index) - index_label : string or sequence, or False, default None - Column label for index column(s) if desired. If None is given, and - `header` and `index` are True, then the index names are used. A - sequence should be given if the DataFrame uses MultiIndex. If - False do not print fields for index names. Use index_label=False - for easier importing in R - mode : str - Python write mode, default 'w' - encoding : string, optional - A string representing the encoding to use in the output file, - defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, - default 'infer' - If 'infer' and `path_or_buf` is path-like, then detect compression - from the following extensions: '.gz', '.bz2', '.zip' or '.xz' - (otherwise no compression). - - .. versionchanged:: 0.24.0 - 'infer' option added and set to default - line_terminator : string, default ``'\n'`` - The newline character or character sequence to use in the output - file - quoting : optional constant from csv module - defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` - then floats are converted to strings and thus csv.QUOTE_NONNUMERIC - will treat them as non-numeric - quotechar : string (length 1), default '\"' - character used to quote fields - doublequote : boolean, default True - Control quoting of `quotechar` inside a field - escapechar : string (length 1), default None - character used to escape `sep` and `quotechar` when appropriate - chunksize : int or None - rows to write at a time - tupleize_cols : boolean, default False - .. deprecated:: 0.21.0 - This argument will be removed and will always write each row - of the multi-index as a separate row in the CSV file. - - Write MultiIndex columns as a list of tuples (if True) or in - the new, expanded format, where each MultiIndex column is a row - in the CSV (if False). - date_format : string, default None - Format string for datetime objects - decimal: string, default '.' - Character recognized as decimal separator. E.g. use ',' for - European data - - """ - - if tupleize_cols is not None: - warnings.warn("The 'tupleize_cols' parameter is deprecated and " - "will be removed in a future version", - FutureWarning, stacklevel=2) - else: - tupleize_cols = False - - from pandas.io.formats.csvs import CSVFormatter - formatter = CSVFormatter(self, path_or_buf, - line_terminator=line_terminator, sep=sep, - encoding=encoding, - compression=compression, quoting=quoting, - na_rep=na_rep, float_format=float_format, - cols=columns, header=header, index=index, - index_label=index_label, mode=mode, - chunksize=chunksize, quotechar=quotechar, - tupleize_cols=tupleize_cols, - date_format=date_format, - doublequote=doublequote, - escapechar=escapechar, decimal=decimal) - formatter.save() - - if path_or_buf is None: - return formatter.path_or_buf.getvalue() - @Appender(_shared_docs['to_excel'] % _shared_doc_kwargs) def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f62605c342702..52b3f79abf5e8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9270,6 +9270,115 @@ def first_valid_index(self): def last_valid_index(self): return self._find_valid_index('last') + def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, + columns=None, header=True, index=True, index_label=None, + mode='w', encoding=None, compression='infer', quoting=None, + quotechar='"', line_terminator='\n', chunksize=None, + tupleize_cols=None, date_format=None, doublequote=True, + escapechar=None, decimal='.'): + r"""Write object to a comma-separated values (csv) file + + Parameters + ---------- + path_or_buf : string or file handle, default None + File path or object, if None is provided the result is returned as + a string. + .. versionchanged:: 0.24.0 + Was previously named "path" for Series. + sep : character, default ',' + Field delimiter for the output file. + na_rep : string, default '' + Missing data representation + float_format : string, default None + Format string for floating point numbers + columns : sequence, optional + Columns to write + header : boolean or list of string, default True + Write out the column names. If a list of strings is given it is + assumed to be aliases for the column names + .. versionchanged:: 0.24.0 + Previously defaulted to False for Series. + index : boolean, default True + Write row names (index) + index_label : string or sequence, or False, default None + Column label for index column(s) if desired. If None is given, and + `header` and `index` are True, then the index names are used. A + sequence should be given if the object uses MultiIndex. If + False do not print fields for index names. Use index_label=False + for easier importing in R + mode : str + Python write mode, default 'w' + encoding : string, optional + A string representing the encoding to use in the output file, + defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, + default 'infer' + If 'infer' and `path_or_buf` is path-like, then detect compression + from the following extensions: '.gz', '.bz2', '.zip' or '.xz' + (otherwise no compression). + + .. versionchanged:: 0.24.0 + 'infer' option added and set to default + line_terminator : string, default ``'\n'`` + The newline character or character sequence to use in the output + file + quoting : optional constant from csv module + defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` + then floats are converted to strings and thus csv.QUOTE_NONNUMERIC + will treat them as non-numeric + quotechar : string (length 1), default '\"' + character used to quote fields + doublequote : boolean, default True + Control quoting of `quotechar` inside a field + escapechar : string (length 1), default None + character used to escape `sep` and `quotechar` when appropriate + chunksize : int or None + rows to write at a time + tupleize_cols : boolean, default False + .. deprecated:: 0.21.0 + This argument will be removed and will always write each row + of the multi-index as a separate row in the CSV file. + + Write MultiIndex columns as a list of tuples (if True) or in + the new, expanded format, where each MultiIndex column is a row + in the CSV (if False). + date_format : string, default None + Format string for datetime objects + decimal: string, default '.' + Character recognized as decimal separator. E.g. use ',' for + European data + + .. versionchanged:: 0.24.0 + The order of arguments for Series was changed. + """ + + df = self if isinstance(self, ABCDataFrame) else self.to_frame() + + if tupleize_cols is not None: + warnings.warn("The 'tupleize_cols' parameter is deprecated and " + "will be removed in a future version", + FutureWarning, stacklevel=2) + else: + tupleize_cols = False + + from pandas.io.formats.csvs import CSVFormatter + formatter = CSVFormatter(df, path_or_buf, + line_terminator=line_terminator, sep=sep, + encoding=encoding, + compression=compression, quoting=quoting, + na_rep=na_rep, float_format=float_format, + cols=columns, header=header, index=index, + index_label=index_label, mode=mode, + chunksize=chunksize, quotechar=quotechar, + tupleize_cols=tupleize_cols, + date_format=date_format, + doublequote=doublequote, + escapechar=escapechar, decimal=decimal) + formatter.save() + + if path_or_buf is None: + return formatter.path_or_buf.getvalue() + def _doc_parms(cls): """Return a tuple of the doc parms.""" diff --git a/pandas/core/series.py b/pandas/core/series.py index 21dea15772cc0..bfba6367616e2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -17,6 +17,7 @@ from pandas.core.arrays import ExtensionArray from pandas.core.dtypes.common import ( is_categorical_dtype, + is_string_like, is_bool, is_integer, is_integer_dtype, is_float_dtype, @@ -3765,59 +3766,62 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, return result - def to_csv(self, path=None, index=True, sep=",", na_rep='', - float_format=None, header=False, index_label=None, - mode='w', encoding=None, compression='infer', date_format=None, - decimal='.'): - """ - Write Series to a comma-separated values (csv) file - - Parameters - ---------- - path : string or file handle, default None - File path or object, if None is provided the result is returned as - a string. - na_rep : string, default '' - Missing data representation - float_format : string, default None - Format string for floating point numbers - header : boolean, default False - Write out series name - index : boolean, default True - Write row names (index) - index_label : string or sequence, default None - Column label for index column(s) if desired. If None is given, and - `header` and `index` are True, then the index names are used. A - sequence should be given if the DataFrame uses MultiIndex. - mode : Python write mode, default 'w' - sep : character, default "," - Field delimiter for the output file. - encoding : string, optional - a string representing the encoding to use if the contents are - non-ascii, for python versions prior to 3 - compression : None or string, default 'infer' - A string representing the compression to use in the output file. - Allowed values are None, 'gzip', 'bz2', 'zip', 'xz', and 'infer'. - This input is only used when the first argument is a filename. - - .. versionchanged:: 0.24.0 - 'infer' option added and set to default - date_format: string, default None - Format string for datetime objects. - decimal: string, default '.' - Character recognized as decimal separator. E.g. use ',' for - European data - """ - from pandas.core.frame import DataFrame - df = DataFrame(self) - # result is only a string if no path provided, otherwise None - result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep, - float_format=float_format, header=header, - index_label=index_label, mode=mode, - encoding=encoding, compression=compression, - date_format=date_format, decimal=decimal) - if path is None: - return result + @Appender(generic.NDFrame.to_csv.__doc__) + def to_csv(self, *args, **kwargs): + + names = ["path_or_buf", "sep", "na_rep", "float_format", "columns", + "header", "index", "index_label", "mode", "encoding", + "compression", "quoting", "quotechar", "line_terminator", + "chunksize", "tupleize_cols", "date_format", "doublequote", + "escapechar", "decimal"] + + old_names = ["path_or_buf", "index", "sep", "na_rep", "float_format", + "header", "index_label", "mode", "encoding", + "compression", "date_format", "decimal"] + + if "path" in kwargs: + warnings.warn("The signature of `Series.to_csv` was aligned " + "to that of `DataFrame.to_csv`, and argument " + "'path' will be renamed to 'path_or_buf'.", + FutureWarning, stacklevel=2) + kwargs["path_or_buf"] = kwargs.pop("path") + + if len(args) > 1: + # Either "index" (old signature) or "sep" (new signature) is being + # passed as second argument (while the first is the same) + maybe_sep = args[1] + + if not (is_string_like(maybe_sep) and len(maybe_sep) == 1): + # old signature + warnings.warn("The signature of `Series.to_csv` was aligned " + "to that of `DataFrame.to_csv`. Note that the " + "order of arguments changed, and the new one " + "has 'sep' in first place, for which \"{}\" is " + "not a valid value. The old order will cease to " + "be supported in a future version. Please refer " + "to the documentation for `DataFrame.to_csv` " + "when updating your function " + "calls.".format(maybe_sep), + FutureWarning, stacklevel=2) + names = old_names + + pos_args = dict(zip(names[:len(args)], args)) + + for key in pos_args: + if key in kwargs: + raise ValueError("Argument given by name ('{}') and position " + "({})".format(key, names.index(key))) + kwargs[key] = pos_args[key] + + if kwargs.get("header", None) is None: + warnings.warn("The signature of `Series.to_csv` was aligned " + "to that of `DataFrame.to_csv`, and argument " + "'header' will change its default value from False " + "to True: please pass an explicit value to suppress " + "this warning.", FutureWarning, + stacklevel=2) + kwargs["header"] = False # Backwards compatibility. + return self.to_frame().to_csv(**kwargs) @Appender(generic._shared_docs['to_excel'] % _shared_doc_kwargs) def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 9e3b606f31973..e1c3c29ef2846 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -893,22 +893,27 @@ def test_to_csv_line_terminators(self): def test_to_csv_from_csv_categorical(self): - # CSV with categoricals should result in the same output as when one - # would add a "normal" Series/DataFrame. - s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])) - s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) + # CSV with categoricals should result in the same output + # as when one would add a "normal" Series/DataFrame. + s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])) + s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"]) res = StringIO() - s.to_csv(res) + + s.to_csv(res, header=False) exp = StringIO() - s2.to_csv(exp) + + s2.to_csv(exp, header=False) assert res.getvalue() == exp.getvalue() df = DataFrame({"s": s}) df2 = DataFrame({"s": s2}) + res = StringIO() df.to_csv(res) + exp = StringIO() df2.to_csv(exp) + assert res.getvalue() == exp.getvalue() def test_to_csv_path_is_none(self): diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index 76788ced44e84..1806ddd2bbcc6 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -1,4 +1,5 @@ import os +import warnings import pytest @@ -7,6 +8,14 @@ import pandas.util.testing as tm +def catch_to_csv_depr(): + # Catching warnings because Series.to_csv has + # been deprecated. Remove this context when + # Series.to_csv has been aligned. + + return warnings.catch_warnings(record=True) + + @pytest.mark.parametrize('obj', [ pd.DataFrame(100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], @@ -15,11 +24,12 @@ @pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) def test_compression_size(obj, method, compression_only): with tm.ensure_clean() as path: - getattr(obj, method)(path, compression=compression_only) - compressed_size = os.path.getsize(path) - getattr(obj, method)(path, compression=None) - uncompressed_size = os.path.getsize(path) - assert uncompressed_size > compressed_size + with catch_to_csv_depr(): + getattr(obj, method)(path, compression=compression_only) + compressed_size = os.path.getsize(path) + getattr(obj, method)(path, compression=None) + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size @pytest.mark.parametrize('obj', [ @@ -31,16 +41,18 @@ def test_compression_size(obj, method, compression_only): def test_compression_size_fh(obj, method, compression_only): with tm.ensure_clean() as path: f, handles = icom._get_handle(path, 'w', compression=compression_only) - with f: - getattr(obj, method)(f) - assert not f.closed - assert f.closed - compressed_size = os.path.getsize(path) + with catch_to_csv_depr(): + with f: + getattr(obj, method)(f) + assert not f.closed + assert f.closed + compressed_size = os.path.getsize(path) with tm.ensure_clean() as path: f, handles = icom._get_handle(path, 'w', compression=None) - with f: - getattr(obj, method)(f) - assert not f.closed + with catch_to_csv_depr(): + with f: + getattr(obj, method)(f) + assert not f.closed assert f.closed uncompressed_size = os.path.getsize(path) assert uncompressed_size > compressed_size diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 814d794d45c18..cbf9bff06ad34 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -37,7 +37,7 @@ def read_csv(self, path, **kwargs): def test_from_csv_deprecation(self): # see gh-17812 with ensure_clean() as path: - self.ts.to_csv(path) + self.ts.to_csv(path, header=False) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): @@ -45,10 +45,28 @@ def test_from_csv_deprecation(self): depr_ts = Series.from_csv(path) assert_series_equal(depr_ts, ts) + @pytest.mark.parametrize("arg", ["path", "header", "both"]) + def test_to_csv_deprecation(self, arg): + # see gh-19715 + with ensure_clean() as path: + if arg == "path": + kwargs = dict(path=path, header=False) + elif arg == "header": + kwargs = dict(path_or_buf=path) + else: # Both discrepancies match. + kwargs = dict(path=path) + + with tm.assert_produces_warning(FutureWarning): + self.ts.to_csv(**kwargs) + + # Make sure roundtrip still works. + ts = self.read_csv(path) + assert_series_equal(self.ts, ts, check_names=False) + def test_from_csv(self): with ensure_clean() as path: - self.ts.to_csv(path) + self.ts.to_csv(path, header=False) ts = self.read_csv(path) assert_series_equal(self.ts, ts, check_names=False) @@ -65,7 +83,7 @@ def test_from_csv(self): ts_h = self.read_csv(path, header=0) assert ts_h.name == "ts" - self.series.to_csv(path) + self.series.to_csv(path, header=False) series = self.read_csv(path) assert_series_equal(self.series, series, check_names=False) @@ -92,13 +110,13 @@ def test_to_csv(self): import io with ensure_clean() as path: - self.ts.to_csv(path) + self.ts.to_csv(path, header=False) with io.open(path, newline=None) as f: lines = f.readlines() assert (lines[1] != '\n') - self.ts.to_csv(path, index=False) + self.ts.to_csv(path, index=False, header=False) arr = np.loadtxt(path) assert_almost_equal(arr, self.ts.values) @@ -106,7 +124,7 @@ def test_to_csv_unicode_index(self): buf = StringIO() s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")]) - s.to_csv(buf, encoding="UTF-8") + s.to_csv(buf, encoding="UTF-8", header=False) buf.seek(0) s2 = self.read_csv(buf, index_col=0, encoding="UTF-8") @@ -116,7 +134,7 @@ def test_to_csv_float_format(self): with ensure_clean() as filename: ser = Series([0.123456, 0.234567, 0.567567]) - ser.to_csv(filename, float_format="%.2f") + ser.to_csv(filename, float_format="%.2f", header=False) rs = self.read_csv(filename) xp = Series([0.12, 0.23, 0.57]) @@ -128,14 +146,14 @@ def test_to_csv_list_entries(self): split = s.str.split(r'\s+and\s+') buf = StringIO() - split.to_csv(buf) + split.to_csv(buf, header=False) def test_to_csv_path_is_none(self): # GH 8215 # Series.to_csv() was returning None, inconsistent with # DataFrame.to_csv() which returned string s = Series([1, 2, 3]) - csv_str = s.to_csv(path=None) + csv_str = s.to_csv(path_or_buf=None, header=False) assert isinstance(csv_str, str) @pytest.mark.parametrize('s,encoding', [
- [x] closes #19715 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry <strike>Just a proof of concept for discussion (misses docs, whatsnew, new tests). Based on #21868</strike> @dahlbaek @gfyoung
https://api.github.com/repos/pandas-dev/pandas/pulls/21896
2018-07-13T20:22:08Z
2018-08-13T13:37:53Z
2018-08-13T13:37:53Z
2018-08-13T13:43:01Z
use npy_datetimestruct instead of pandas_datetimestruct
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index 11e1e6522ef3b..1ad8c780ba7a4 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -28,9 +28,9 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #define PyInt_AsLong PyLong_AsLong #endif -const pandas_datetimestruct _NS_MIN_DTS = { +const npy_datetimestruct _NS_MIN_DTS = { 1677, 9, 21, 0, 12, 43, 145225, 0, 0}; -const pandas_datetimestruct _NS_MAX_DTS = { +const npy_datetimestruct _NS_MAX_DTS = { 2262, 4, 11, 23, 47, 16, 854775, 807000, 0}; @@ -62,7 +62,7 @@ int dayofweek(int y, int m, int d) { * Adjusts a datetimestruct based on a minutes offset. Assumes * the current values are valid.g */ -void add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes) { +void add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes) { int isleap; /* MINUTES */ @@ -111,7 +111,7 @@ void add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes) { /* * Calculates the days offset from the 1970 epoch. */ -npy_int64 get_datetimestruct_days(const pandas_datetimestruct *dts) { +npy_int64 get_datetimestruct_days(const npy_datetimestruct *dts) { int i, month; npy_int64 year, days = 0; const int *month_lengths; @@ -211,7 +211,7 @@ static npy_int64 days_to_yearsdays(npy_int64 *days_) { * Adjusts a datetimestruct based on a seconds offset. Assumes * the current values are valid. */ -NPY_NO_EXPORT void add_seconds_to_datetimestruct(pandas_datetimestruct *dts, +NPY_NO_EXPORT void add_seconds_to_datetimestruct(npy_datetimestruct *dts, int seconds) { int minutes; @@ -236,7 +236,7 @@ NPY_NO_EXPORT void add_seconds_to_datetimestruct(pandas_datetimestruct *dts, * offset from 1970. */ static void set_datetimestruct_days(npy_int64 days, - pandas_datetimestruct *dts) { + npy_datetimestruct *dts) { const int *month_lengths; int i; @@ -255,10 +255,10 @@ static void set_datetimestruct_days(npy_int64 days, } /* - * Compares two pandas_datetimestruct objects chronologically + * Compares two npy_datetimestruct objects chronologically */ -int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, - const pandas_datetimestruct *b) { +int cmp_npy_datetimestruct(const npy_datetimestruct *a, + const npy_datetimestruct *b) { if (a->year > b->year) { return 1; } else if (a->year < b->year) { @@ -319,7 +319,7 @@ int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, /* * * Tests for and converts a Python datetime.datetime or datetime.date - * object into a NumPy pandas_datetimestruct. Uses tzinfo (if present) + * object into a NumPy npy_datetimestruct. Uses tzinfo (if present) * to convert to UTC time. * * While the C API has PyDate_* and PyDateTime_* functions, the following @@ -331,12 +331,12 @@ int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, * if obj doesn't have the needed date or datetime attributes. */ int convert_pydatetime_to_datetimestruct(PyObject *obj, - pandas_datetimestruct *out) { + npy_datetimestruct *out) { PyObject *tmp; int isleap; /* Initialize the output to all zeros */ - memset(out, 0, sizeof(pandas_datetimestruct)); + memset(out, 0, sizeof(npy_datetimestruct)); out->month = 1; out->day = 1; @@ -512,8 +512,8 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj, return -1; } -npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - pandas_datetimestruct *d) { +npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, + npy_datetimestruct *d) { npy_datetime result = NPY_DATETIME_NAT; convert_datetimestruct_to_datetime(fr, d, &result); @@ -521,7 +521,7 @@ npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, } void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - pandas_datetimestruct *result) { + npy_datetimestruct *result) { convert_datetime_to_datetimestruct(fr, val, result); } @@ -539,7 +539,7 @@ void pandas_timedelta_to_timedeltastruct(npy_timedelta val, * Returns 0 on success, -1 on failure. */ int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, - const pandas_datetimestruct *dts, + const npy_datetimestruct *dts, npy_datetime *out) { npy_datetime ret; @@ -643,11 +643,11 @@ int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, */ int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, npy_datetime dt, - pandas_datetimestruct *out) { + npy_datetimestruct *out) { npy_int64 perday; /* Initialize the output to all zeros */ - memset(out, 0, sizeof(pandas_datetimestruct)); + memset(out, 0, sizeof(npy_datetimestruct)); out->year = 1970; out->month = 1; out->day = 1; diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index 5644ac036f198..f5c48036c16f8 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -19,30 +19,25 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #include <numpy/ndarraytypes.h> -typedef struct { - npy_int64 year; - npy_int32 month, day, hour, min, sec, us, ps, as; -} pandas_datetimestruct; - typedef struct { npy_int64 days; npy_int32 hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds; } pandas_timedeltastruct; -extern const pandas_datetimestruct _NS_MIN_DTS; -extern const pandas_datetimestruct _NS_MAX_DTS; +extern const npy_datetimestruct _NS_MIN_DTS; +extern const npy_datetimestruct _NS_MAX_DTS; // stuff pandas needs // ---------------------------------------------------------------------------- int convert_pydatetime_to_datetimestruct(PyObject *obj, - pandas_datetimestruct *out); + npy_datetimestruct *out); -npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - pandas_datetimestruct *d); +npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, + npy_datetimestruct *d); void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - pandas_datetimestruct *result); + npy_datetimestruct *result); void pandas_timedelta_to_timedeltastruct(npy_timedelta val, NPY_DATETIMEUNIT fr, @@ -61,14 +56,14 @@ int is_leapyear(npy_int64 year); * Calculates the days offset from the 1970 epoch. */ npy_int64 -get_datetimestruct_days(const pandas_datetimestruct *dts); +get_datetimestruct_days(const npy_datetimestruct *dts); /* - * Compares two pandas_datetimestruct objects chronologically + * Compares two npy_datetimestruct objects chronologically */ -int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, - const pandas_datetimestruct *b); +int cmp_npy_datetimestruct(const npy_datetimestruct *a, + const npy_datetimestruct *b); /* @@ -76,12 +71,12 @@ int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, * the current values are valid. */ void -add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes); +add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes); int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, npy_datetime dt, - pandas_datetimestruct *out); + npy_datetimestruct *out); #endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_ diff --git a/pandas/_libs/src/datetime/np_datetime_strings.c b/pandas/_libs/src/datetime/np_datetime_strings.c index b1852094c301e..fa96cce1756c8 100644 --- a/pandas/_libs/src/datetime/np_datetime_strings.c +++ b/pandas/_libs/src/datetime/np_datetime_strings.c @@ -63,7 +63,7 @@ This file implements string parsing and creation for NumPy datetime. * Returns 0 on success, -1 on failure. */ int parse_iso_8601_datetime(char *str, int len, - pandas_datetimestruct *out, + npy_datetimestruct *out, int *out_local, int *out_tzoffset) { int year_leap = 0; int i, numdigits; @@ -86,7 +86,7 @@ int parse_iso_8601_datetime(char *str, int len, int hour_was_2_digits = 0; /* Initialize the output to all zeros */ - memset(out, 0, sizeof(pandas_datetimestruct)); + memset(out, 0, sizeof(npy_datetimestruct)); out->month = 1; out->day = 1; @@ -567,7 +567,7 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { /* - * Converts an pandas_datetimestruct to an (almost) ISO 8601 + * Converts an npy_datetimestruct to an (almost) ISO 8601 * NULL-terminated string using timezone Z (UTC). If the string fits in * the space exactly, it leaves out the NULL terminator and returns success. * @@ -580,7 +580,7 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { * Returns 0 on success, -1 on failure (for example if the output * string was too short). */ -int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, +int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, NPY_DATETIMEUNIT base) { char *substr = outstr, sublen = outlen; int tmplen; diff --git a/pandas/_libs/src/datetime/np_datetime_strings.h b/pandas/_libs/src/datetime/np_datetime_strings.h index ff1d26e5168b5..821bb79b345bd 100644 --- a/pandas/_libs/src/datetime/np_datetime_strings.h +++ b/pandas/_libs/src/datetime/np_datetime_strings.h @@ -51,9 +51,9 @@ This file implements string parsing and creation for NumPy datetime. */ int parse_iso_8601_datetime(char *str, int len, - pandas_datetimestruct *out, - int *out_local, - int *out_tzoffset); + npy_datetimestruct *out, + int *out_local, + int *out_tzoffset); /* * Provides a string length to use for converting datetime @@ -63,7 +63,7 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base); /* - * Converts an pandas_datetimestruct to an (almost) ISO 8601 + * Converts an npy_datetimestruct to an (almost) ISO 8601 * NULL-terminated string using timezone Z (UTC). * * 'base' restricts the output to that unit. Set 'base' to @@ -73,7 +73,7 @@ get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base); * string was too short). */ int -make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, +make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, NPY_DATETIMEUNIT base); #endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_ diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index 5011d33d189c2..7dab77131c1a0 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -47,14 +47,14 @@ static int monthToQuarter(int month) { return ((month - 1) / 3) + 1; } * Assumes GREGORIAN_CALENDAR */ npy_int64 unix_date_from_ymd(int year, int month, int day) { /* Calculate the absolute date */ - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date; - memset(&dts, 0, sizeof(pandas_datetimestruct)); + memset(&dts, 0, sizeof(npy_datetimestruct)); dts.year = year; dts.month = month; dts.day = day; - unix_date = pandas_datetimestruct_to_datetime(NPY_FR_D, &dts); + unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, &dts); return unix_date; } @@ -127,7 +127,7 @@ static npy_int64 DtoB_weekday(npy_int64 unix_date) { return floordiv(unix_date + 4, 7) * 5 + mod_compat(unix_date + 4, 7) - 4; } -static npy_int64 DtoB(pandas_datetimestruct *dts, +static npy_int64 DtoB(npy_datetimestruct *dts, int roll_back, npy_int64 unix_date) { int day_of_week = dayofweek(dts->year, dts->month, dts->day); @@ -149,7 +149,7 @@ static npy_int64 DtoB(pandas_datetimestruct *dts, //************ FROM DAILY *************** static npy_int64 asfreq_DTtoA(npy_int64 ordinal, asfreq_info *af_info) { - pandas_datetimestruct dts; + npy_datetimestruct dts; ordinal = downsample_daytime(ordinal, af_info); pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); if (dts.month > af_info->to_end) { @@ -160,7 +160,7 @@ static npy_int64 asfreq_DTtoA(npy_int64 ordinal, asfreq_info *af_info) { } static int DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year) { - pandas_datetimestruct dts; + npy_datetimestruct dts; int quarter; pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); @@ -188,7 +188,7 @@ static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, asfreq_info *af_info) { } static npy_int64 asfreq_DTtoM(npy_int64 ordinal, asfreq_info *af_info) { - pandas_datetimestruct dts; + npy_datetimestruct dts; ordinal = downsample_daytime(ordinal, af_info); @@ -203,7 +203,7 @@ static npy_int64 asfreq_DTtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_DTtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = downsample_daytime(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); @@ -262,7 +262,7 @@ static npy_int64 asfreq_WtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_WtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = asfreq_WtoDT(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); @@ -302,7 +302,7 @@ static npy_int64 asfreq_MtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_MtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = asfreq_MtoDT(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); @@ -357,7 +357,7 @@ static npy_int64 asfreq_QtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_QtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = asfreq_QtoDT(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); @@ -414,7 +414,7 @@ static npy_int64 asfreq_AtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_AtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = asfreq_AtoDT(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index eaa4eca44c15b..c9b0143ffc6ca 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -442,7 +442,7 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, return PyString_AS_STRING(newObj); } -static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts, +static void *PandasDateTimeStructToJSON(npy_datetimestruct *dts, JSONTypeContext *tc, void *outValue, size_t *_outLen) { NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; @@ -471,14 +471,14 @@ static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts, } } else { PRINTMARK(); - *((JSINT64 *)outValue) = pandas_datetimestruct_to_datetime(base, dts); + *((JSINT64 *)outValue) = npy_datetimestruct_to_datetime(base, dts); return NULL; } } static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - pandas_datetimestruct dts; + npy_datetimestruct dts; PyDatetimeScalarObject *obj = (PyDatetimeScalarObject *)_obj; PRINTMARK(); @@ -489,7 +489,7 @@ static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - pandas_datetimestruct dts; + npy_datetimestruct dts; PyObject *obj = (PyObject *)_obj; PRINTMARK(); @@ -509,7 +509,7 @@ static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, static void *NpyDatetime64ToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - pandas_datetimestruct dts; + npy_datetimestruct dts; PRINTMARK(); pandas_datetime_to_datetimestruct((npy_datetime)GET_TC(tc)->longValue, diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 281e497945c5f..f8ce346b16317 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -21,7 +21,7 @@ PyDateTime_IMPORT from tslibs.np_datetime cimport (check_dts_bounds, - pandas_datetimestruct, + npy_datetimestruct, _string_to_dts, dt64_to_dtstruct, dtstruct_to_dt64, pydatetime_to_dt64, pydate_to_dt64, @@ -58,20 +58,20 @@ cdef bint PY2 = str == bytes cdef inline object create_datetime_from_ts( - int64_t value, pandas_datetimestruct dts, + int64_t value, npy_datetimestruct dts, object tz, object freq): """ convenience routine to construct a datetime.datetime from its parts """ return datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tz) cdef inline object create_date_from_ts( - int64_t value, pandas_datetimestruct dts, + int64_t value, npy_datetimestruct dts, object tz, object freq): """ convenience routine to construct a datetime.date from its parts """ return date(dts.year, dts.month, dts.day) cdef inline object create_time_from_ts( - int64_t value, pandas_datetimestruct dts, + int64_t value, npy_datetimestruct dts, object tz, object freq): """ convenience routine to construct a datetime.time from its parts """ return time(dts.hour, dts.min, dts.sec, dts.us) @@ -103,11 +103,11 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, cdef: Py_ssize_t i, n = len(arr) ndarray[int64_t] trans, deltas - pandas_datetimestruct dts + npy_datetimestruct dts object dt int64_t value ndarray[object] result = np.empty(n, dtype=object) - object (*func_create)(int64_t, pandas_datetimestruct, object, object) + object (*func_create)(int64_t, npy_datetimestruct, object, object) if box == "date": assert (tz is None), "tz should be None when converting to date" @@ -230,7 +230,7 @@ def format_array_from_datetime(ndarray[int64_t] values, object tz=None, bint show_ms = 0, show_us = 0, show_ns = 0, basic_format = 0 ndarray[object] result = np.empty(N, dtype=object) object ts, res - pandas_datetimestruct dts + npy_datetimestruct dts if na_rep is None: na_rep = 'NaT' @@ -454,7 +454,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', object val, py_dt ndarray[int64_t] iresult ndarray[object] oresult - pandas_datetimestruct dts + npy_datetimestruct dts bint utc_convert = bool(utc) bint seen_integer = 0 bint seen_string = 0 diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 448dbd27e8278..96e4676fe91c0 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -5,12 +5,12 @@ from cpython.datetime cimport datetime, tzinfo from numpy cimport int64_t, int32_t -from np_datetime cimport pandas_datetimestruct +from np_datetime cimport npy_datetimestruct cdef class _TSObject: cdef: - pandas_datetimestruct dts # pandas_datetimestruct + npy_datetimestruct dts # npy_datetimestruct int64_t value # numpy dt64 object tzinfo diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index b948be606645d..d0090852fa5af 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -19,7 +19,7 @@ from cpython.datetime cimport (datetime, tzinfo, PyDateTime_IMPORT from np_datetime cimport (check_dts_bounds, - pandas_datetimestruct, + npy_datetimestruct, pandas_datetime_to_datetimestruct, _string_to_dts, npy_datetime, dt64_to_dtstruct, dtstruct_to_dt64, @@ -60,7 +60,7 @@ cdef inline int64_t get_datetime64_nanos(object val) except? -1: value to nanoseconds if necessary. """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts NPY_DATETIMEUNIT unit npy_datetime ival @@ -93,7 +93,7 @@ def ensure_datetime64ns(ndarray arr, copy=True): Py_ssize_t i, n = arr.size ndarray[int64_t] ivalues, iresult NPY_DATETIMEUNIT unit - pandas_datetimestruct dts + npy_datetimestruct dts shape = (<object> arr).shape @@ -157,7 +157,7 @@ def datetime_to_datetime64(ndarray[object] values): Py_ssize_t i, n = len(values) object val, inferred_tz = None ndarray[int64_t] iresult - pandas_datetimestruct dts + npy_datetimestruct dts _TSObject _ts result = np.empty(n, dtype='M8[ns]') @@ -203,7 +203,7 @@ cdef inline maybe_datetimelike_to_i8(object val): val : int64 timestamp or original input """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts try: return val.value except AttributeError: @@ -220,7 +220,7 @@ cdef inline maybe_datetimelike_to_i8(object val): # lightweight C object to hold datetime & int64 pair cdef class _TSObject: # cdef: - # pandas_datetimestruct dts # pandas_datetimestruct + # npy_datetimestruct dts # npy_datetimestruct # int64_t value # numpy dt64 # object tzinfo @@ -682,7 +682,7 @@ cdef inline int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, result : int64_t """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts int64_t result, delta datetime dt @@ -730,7 +730,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): ndarray[int64_t] trans, deltas Py_ssize_t pos int64_t v, offset, utc_date - pandas_datetimestruct dts + npy_datetimestruct dts ndarray[int64_t] arr # TODO: Is there a lighter-weight way to do this? # See GH#17734 We should always be converting either from UTC or to UTC @@ -784,7 +784,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): Py_ssize_t i, j, pos, n = len(vals) ndarray[Py_ssize_t] posn int64_t v, offset, delta - pandas_datetimestruct dts + npy_datetimestruct dts if len(vals) == 0: return np.array([], dtype=np.int64) @@ -849,7 +849,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None, int64_t *tdata int64_t v, left, right ndarray[int64_t] result, result_a, result_b, dst_hours - pandas_datetimestruct dts + npy_datetimestruct dts bint infer_dst = False, is_dst = False, fill = False bint is_coerce = errors == 'coerce', is_raise = errors == 'raise' @@ -1086,7 +1086,7 @@ def normalize_i8_timestamps(ndarray[int64_t] stamps, tz=None): """ cdef: Py_ssize_t i, n = len(stamps) - pandas_datetimestruct dts + npy_datetimestruct dts ndarray[int64_t] result = np.empty(n, dtype=np.int64) if tz is not None: @@ -1125,7 +1125,7 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): Py_ssize_t n = len(stamps) ndarray[int64_t] result = np.empty(n, dtype=np.int64) ndarray[int64_t] trans, deltas, pos - pandas_datetimestruct dts + npy_datetimestruct dts if is_utc(tz): with nogil: @@ -1168,13 +1168,13 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): return result -cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts) nogil: +cdef inline int64_t _normalized_stamp(npy_datetimestruct *dts) nogil: """ Normalize the given datetimestruct to midnight, then convert to int64_t. Parameters ---------- - *dts : pointer to pandas_datetimestruct + *dts : pointer to npy_datetimestruct Returns ------- @@ -1206,7 +1206,7 @@ def is_date_array_normalized(ndarray[int64_t] stamps, tz=None): cdef: Py_ssize_t i, n = len(stamps) ndarray[int64_t] trans, deltas - pandas_datetimestruct dts + npy_datetimestruct dts int64_t local_val if tz is None or is_utc(tz): diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index ccf67e765e079..a298f521ef853 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -16,7 +16,7 @@ cnp.import_array() from ccalendar import get_locale_names, MONTHS_FULL, DAYS_FULL from ccalendar cimport (get_days_in_month, is_leapyear, dayofweek, get_week_of_year, get_day_of_year) -from np_datetime cimport (pandas_datetimestruct, pandas_timedeltastruct, +from np_datetime cimport (npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct, td64_to_tdstruct) from nattype cimport NPY_NAT @@ -47,7 +47,7 @@ def build_field_sarray(ndarray[int64_t] dtindex): """ cdef: Py_ssize_t i, count = 0 - pandas_datetimestruct dts + npy_datetimestruct dts ndarray[int32_t] years, months, days, hours, minutes, seconds, mus count = len(dtindex) @@ -94,7 +94,7 @@ def get_date_name_field(ndarray[int64_t] dtindex, object field, cdef: Py_ssize_t i, count = 0 ndarray[object] out, names - pandas_datetimestruct dts + npy_datetimestruct dts int dow count = len(dtindex) @@ -150,7 +150,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, ndarray[int8_t] out ndarray[int32_t, ndim=2] _month_offset bint isleap - pandas_datetimestruct dts + npy_datetimestruct dts int mo_off, dom, doy, dow, ldom _month_offset = np.array( @@ -389,7 +389,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field): cdef: Py_ssize_t i, count = 0 ndarray[int32_t] out - pandas_datetimestruct dts + npy_datetimestruct dts count = len(dtindex) out = np.empty(count, dtype='i4') diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 7c91c5551dc47..a585259286a58 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -25,15 +25,11 @@ cdef extern from "numpy/arrayscalars.h": npy_timedelta obval PyArray_DatetimeMetaData obmeta -cdef extern from "../src/datetime/np_datetime.h": - ctypedef struct pandas_datetimestruct: +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_datetimestruct: int64_t year int32_t month, day, hour, min, sec, us, ps, as - ctypedef struct pandas_timedeltastruct: - int64_t days - int32_t hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds - ctypedef enum NPY_DATETIMEUNIT: NPY_FR_Y NPY_FR_M @@ -50,27 +46,32 @@ cdef extern from "../src/datetime/np_datetime.h": NPY_FR_fs NPY_FR_as +cdef extern from "../src/datetime/np_datetime.h": + ctypedef struct pandas_timedeltastruct: + int64_t days + int32_t hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds + void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - pandas_datetimestruct *result) nogil + npy_datetimestruct *result) nogil cdef int reverse_ops[6] cdef bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1 -cdef check_dts_bounds(pandas_datetimestruct *dts) +cdef check_dts_bounds(npy_datetimestruct *dts) -cdef int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil -cdef void dt64_to_dtstruct(int64_t dt64, pandas_datetimestruct* out) nogil +cdef int64_t dtstruct_to_dt64(npy_datetimestruct* dts) nogil +cdef void dt64_to_dtstruct(int64_t dt64, npy_datetimestruct* out) nogil cdef void td64_to_tdstruct(int64_t td64, pandas_timedeltastruct* out) nogil -cdef int64_t pydatetime_to_dt64(datetime val, pandas_datetimestruct *dts) -cdef int64_t pydate_to_dt64(date val, pandas_datetimestruct *dts) +cdef int64_t pydatetime_to_dt64(datetime val, npy_datetimestruct *dts) +cdef int64_t pydate_to_dt64(date val, npy_datetimestruct *dts) cdef npy_datetime get_datetime64_value(object obj) nogil cdef npy_timedelta get_timedelta64_value(object obj) nogil cdef NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil -cdef int _string_to_dts(object val, pandas_datetimestruct* dts, +cdef int _string_to_dts(object val, npy_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1 diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index e58ec0702adcc..3c0fe98ee7b7d 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -16,27 +16,26 @@ PyDateTime_IMPORT from numpy cimport int64_t cdef extern from "../src/datetime/np_datetime.h": - int cmp_pandas_datetimestruct(pandas_datetimestruct *a, - pandas_datetimestruct *b) + int cmp_npy_datetimestruct(npy_datetimestruct *a, + npy_datetimestruct *b) - npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - pandas_datetimestruct *d - ) nogil + npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, + npy_datetimestruct *d) nogil void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - pandas_datetimestruct *result) nogil + npy_datetimestruct *result) nogil void pandas_timedelta_to_timedeltastruct(npy_timedelta val, NPY_DATETIMEUNIT fr, pandas_timedeltastruct *result ) nogil - pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS + npy_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS cdef extern from "../src/datetime/np_datetime_strings.h": int parse_iso_8601_datetime(char *str, int len, - pandas_datetimestruct *out, + npy_datetimestruct *out, int *out_local, int *out_tzoffset) # ---------------------------------------------------------------------- @@ -101,17 +100,17 @@ class OutOfBoundsDatetime(ValueError): pass -cdef inline check_dts_bounds(pandas_datetimestruct *dts): +cdef inline check_dts_bounds(npy_datetimestruct *dts): """Raises OutOfBoundsDatetime if the given date is outside the range that can be represented by nanosecond-resolution 64-bit integers.""" cdef: bint error = False if (dts.year <= 1677 and - cmp_pandas_datetimestruct(dts, &_NS_MIN_DTS) == -1): + cmp_npy_datetimestruct(dts, &_NS_MIN_DTS) == -1): error = True elif (dts.year >= 2262 and - cmp_pandas_datetimestruct(dts, &_NS_MAX_DTS) == 1): + cmp_npy_datetimestruct(dts, &_NS_MAX_DTS) == 1): error = True if error: @@ -125,14 +124,14 @@ cdef inline check_dts_bounds(pandas_datetimestruct *dts): # ---------------------------------------------------------------------- # Conversion -cdef inline int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil: - """Convenience function to call pandas_datetimestruct_to_datetime +cdef inline int64_t dtstruct_to_dt64(npy_datetimestruct* dts) nogil: + """Convenience function to call npy_datetimestruct_to_datetime with the by-far-most-common frequency NPY_FR_ns""" - return pandas_datetimestruct_to_datetime(NPY_FR_ns, dts) + return npy_datetimestruct_to_datetime(NPY_FR_ns, dts) cdef inline void dt64_to_dtstruct(int64_t dt64, - pandas_datetimestruct* out) nogil: + npy_datetimestruct* out) nogil: """Convenience function to call pandas_datetime_to_datetimestruct with the by-far-most-common frequency NPY_FR_ns""" pandas_datetime_to_datetimestruct(dt64, NPY_FR_ns, out) @@ -147,7 +146,7 @@ cdef inline void td64_to_tdstruct(int64_t td64, cdef inline int64_t pydatetime_to_dt64(datetime val, - pandas_datetimestruct *dts): + npy_datetimestruct *dts): dts.year = PyDateTime_GET_YEAR(val) dts.month = PyDateTime_GET_MONTH(val) dts.day = PyDateTime_GET_DAY(val) @@ -160,7 +159,7 @@ cdef inline int64_t pydatetime_to_dt64(datetime val, cdef inline int64_t pydate_to_dt64(date val, - pandas_datetimestruct *dts): + npy_datetimestruct *dts): dts.year = PyDateTime_GET_YEAR(val) dts.month = PyDateTime_GET_MONTH(val) dts.day = PyDateTime_GET_DAY(val) @@ -169,7 +168,7 @@ cdef inline int64_t pydate_to_dt64(date val, return dtstruct_to_dt64(dts) -cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts, +cdef inline int _string_to_dts(object val, npy_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1: cdef: int result @@ -187,7 +186,7 @@ cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts, cdef inline int _cstring_to_dts(char *val, int length, - pandas_datetimestruct* dts, + npy_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1: # Note: without this "extra layer" between _string_to_dts # and parse_iso_8601_datetime, calling _string_to_dts raises diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 27b7f03358a3a..1efcfaa5b9741 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -24,7 +24,7 @@ from ccalendar import MONTHS, DAYS from ccalendar cimport get_days_in_month, dayofweek from conversion cimport tz_convert_single, pydt_to_i8, localize_pydatetime from nattype cimport NPY_NAT -from np_datetime cimport (pandas_datetimestruct, +from np_datetime cimport (npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct) # --------------------------------------------------------------------- @@ -548,14 +548,14 @@ cpdef datetime shift_day(datetime other, int days): return localize_pydatetime(shifted, tz) -cdef inline int year_add_months(pandas_datetimestruct dts, int months) nogil: - """new year number after shifting pandas_datetimestruct number of months""" +cdef inline int year_add_months(npy_datetimestruct dts, int months) nogil: + """new year number after shifting npy_datetimestruct number of months""" return dts.year + (dts.month + months - 1) / 12 -cdef inline int month_add_months(pandas_datetimestruct dts, int months) nogil: +cdef inline int month_add_months(npy_datetimestruct dts, int months) nogil: """ - New month number after shifting pandas_datetimestruct + New month number after shifting npy_datetimestruct number of months. """ cdef int new_month = (dts.month + months) % 12 @@ -584,7 +584,7 @@ def shift_quarters(int64_t[:] dtindex, int quarters, """ cdef: Py_ssize_t i - pandas_datetimestruct dts + npy_datetimestruct dts int count = len(dtindex) int months_to_roll, months_since, n, compare_day bint roll_check @@ -726,7 +726,7 @@ def shift_months(int64_t[:] dtindex, int months, object day=None): """ cdef: Py_ssize_t i - pandas_datetimestruct dts + npy_datetimestruct dts int count = len(dtindex) int months_to_roll bint roll_check diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 0ec5d25beeeb9..3d38320e31533 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -22,15 +22,14 @@ from cpython.datetime cimport (PyDateTime_Check, PyDelta_Check, # import datetime C API PyDateTime_IMPORT -from np_datetime cimport (pandas_datetimestruct, dtstruct_to_dt64, +from np_datetime cimport (npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct, pandas_datetime_to_datetimestruct, NPY_DATETIMEUNIT, NPY_FR_D) cdef extern from "../src/datetime/np_datetime.h": - int64_t pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - pandas_datetimestruct *d - ) nogil + int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, + npy_datetimestruct *d) nogil cimport util from util cimport is_period_object, is_string_object, INT32_MIN @@ -87,14 +86,14 @@ cdef extern from "period_helper.h": @cython.cdivision -cdef char* c_strftime(pandas_datetimestruct *dts, char *fmt): +cdef char* c_strftime(npy_datetimestruct *dts, char *fmt): """ Generate a nice string representation of the period object, originally from DateObject_strftime Parameters ---------- - dts : pandas_datetimestruct* + dts : npy_datetimestruct* fmt : char* Returns @@ -124,7 +123,7 @@ cdef char* c_strftime(pandas_datetimestruct *dts, char *fmt): # ---------------------------------------------------------------------- -# Conversion between date_info and pandas_datetimestruct +# Conversion between date_info and npy_datetimestruct cdef inline int get_freq_group(int freq) nogil: return (freq // 1000) * 1000 @@ -137,13 +136,13 @@ cdef inline int get_freq_group_index(int freq) nogil: # specifically _dont_ use cdvision or else ordinals near -1 are assigned to # incorrect dates GH#19643 @cython.cdivision(False) -cdef int64_t get_period_ordinal(pandas_datetimestruct *dts, int freq) nogil: +cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil: """ Generate an ordinal in period space Parameters ---------- - dts: pandas_datetimestruct* + dts: npy_datetimestruct* freq : int Returns @@ -187,7 +186,7 @@ cdef int64_t get_period_ordinal(pandas_datetimestruct *dts, int freq) nogil: elif freq == FR_MTH: return (dts.year - 1970) * 12 + dts.month - 1 - unix_date = pandas_datetimestruct_to_datetime(NPY_FR_D, dts) + unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, dts) if freq >= FR_SEC: seconds = unix_date * 86400 + dts.hour * 3600 + dts.min * 60 + dts.sec @@ -238,7 +237,7 @@ cdef int64_t get_period_ordinal(pandas_datetimestruct *dts, int freq) nogil: cdef void get_date_info(int64_t ordinal, int freq, - pandas_datetimestruct *dts) nogil: + npy_datetimestruct *dts) nogil: cdef: int64_t unix_date double abstime @@ -286,7 +285,7 @@ cdef int64_t get_unix_date(int64_t period_ordinal, int freq) nogil: @cython.cdivision -cdef void date_info_from_days_and_time(pandas_datetimestruct *dts, +cdef void date_info_from_days_and_time(npy_datetimestruct *dts, int64_t unix_date, double abstime) nogil: """ @@ -294,7 +293,7 @@ cdef void date_info_from_days_and_time(pandas_datetimestruct *dts, Parameters ---------- - dts : pandas_datetimestruct* + dts : npy_datetimestruct* unix_date : int64_t days elapsed since datetime(1970, 1, 1) abstime : double @@ -397,7 +396,7 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): cdef int DtoQ_yq(int64_t unix_date, asfreq_info *af_info, int *year): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts int quarter date_info_from_days_and_time(&dts, unix_date, 0) @@ -432,7 +431,7 @@ def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None): cdef: ndarray[int64_t] out Py_ssize_t i, l - pandas_datetimestruct dts + npy_datetimestruct dts l = len(dtarr) @@ -610,7 +609,7 @@ cpdef int64_t period_ordinal(int y, int m, int d, int h, int min, ordinal : int64_t """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts dts.year = y dts.month = m dts.day = d @@ -624,7 +623,7 @@ cpdef int64_t period_ordinal(int y, int m, int d, int h, int min, cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil: cdef: - pandas_datetimestruct dts + npy_datetimestruct dts if ordinal == NPY_NAT: return NPY_NAT @@ -687,7 +686,7 @@ cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^", cdef object _period_strftime(int64_t value, int freq, object fmt): cdef: Py_ssize_t i - pandas_datetimestruct dts + npy_datetimestruct dts char *formatted object pat, repl, result list found_pat = [False] * len(extra_fmts) @@ -743,7 +742,7 @@ ctypedef int (*accessor)(int64_t ordinal, int freq) except INT32_MIN cdef int pyear(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.year @@ -765,63 +764,63 @@ cdef int pquarter(int64_t ordinal, int freq): cdef int pmonth(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.month cdef int pday(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.day cdef int pweekday(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dayofweek(dts.year, dts.month, dts.day) cdef int pday_of_year(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return get_day_of_year(dts.year, dts.month, dts.day) cdef int pweek(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return ccalendar.get_week_of_year(dts.year, dts.month, dts.day) cdef int phour(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.hour cdef int pminute(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.min cdef int psecond(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return <int>dts.sec cdef int pdays_in_month(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return ccalendar.get_days_in_month(dts.year, dts.month) @@ -936,7 +935,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, Py_ssize_t n = len(stamps) ndarray[int64_t] result = np.empty(n, dtype=np.int64) ndarray[int64_t] trans, deltas, pos - pandas_datetimestruct dts + npy_datetimestruct dts int64_t local_val if is_utc(tz): diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index a53d794b48cfa..10e730763175d 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -15,7 +15,7 @@ from pandas._libs.khash cimport (khiter_t, kh_init_int64, kh_int64_t, kh_resize_int64, kh_get_int64) -from np_datetime cimport pandas_datetimestruct, dt64_to_dtstruct +from np_datetime cimport npy_datetimestruct, dt64_to_dtstruct from frequencies cimport get_freq_code from timezones cimport (is_utc, is_tzlocal, maybe_get_tz, get_dst_info) @@ -53,7 +53,7 @@ _ONE_DAY = <int64_t>(24 * _ONE_HOUR) cpdef resolution(ndarray[int64_t] stamps, tz=None): cdef: Py_ssize_t i, n = len(stamps) - pandas_datetimestruct dts + npy_datetimestruct dts int reso = RESO_DAY, curr_reso if tz is not None: @@ -75,7 +75,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): Py_ssize_t n = len(stamps) int reso = RESO_DAY, curr_reso ndarray[int64_t] trans, deltas, pos - pandas_datetimestruct dts + npy_datetimestruct dts int64_t local_val if is_utc(tz): @@ -122,7 +122,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): return reso -cdef inline int _reso_stamp(pandas_datetimestruct *dts): +cdef inline int _reso_stamp(npy_datetimestruct *dts): if dts.us != 0: if dts.us % 1000 == 0: return RESO_MS diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 5b3d4399a6e10..a843a8e2b5612 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -33,7 +33,7 @@ from numpy cimport ndarray, int64_t from datetime import date as datetime_date from np_datetime cimport (check_dts_bounds, - dtstruct_to_dt64, pandas_datetimestruct) + dtstruct_to_dt64, npy_datetimestruct) from util cimport is_string_object @@ -77,7 +77,7 @@ def array_strptime(ndarray[object] values, object fmt, cdef: Py_ssize_t i, n = len(values) - pandas_datetimestruct dts + npy_datetimestruct dts ndarray[int64_t] iresult ndarray[object] result_timezone int year, month, day, minute, hour, second, weekday, julian diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd index 8e7380b37209e..e9e484c715f9a 100644 --- a/pandas/_libs/tslibs/timestamps.pxd +++ b/pandas/_libs/tslibs/timestamps.pxd @@ -2,10 +2,10 @@ # cython: profile=False from numpy cimport int64_t -from np_datetime cimport pandas_datetimestruct +from np_datetime cimport npy_datetimestruct cdef object create_timestamp_from_ts(int64_t value, - pandas_datetimestruct dts, + npy_datetimestruct dts, object tz, object freq) cdef int64_t _NS_UPPER_BOUND, _NS_LOWER_BOUND diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 864950ff03eae..be988e7247e59 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -29,7 +29,7 @@ from nattype import NaT from nattype cimport NPY_NAT from np_datetime import OutOfBoundsDatetime from np_datetime cimport (reverse_ops, cmp_scalar, check_dts_bounds, - pandas_datetimestruct, dt64_to_dtstruct) + npy_datetimestruct, dt64_to_dtstruct) from offsets cimport to_offset from timedeltas import Timedelta from timedeltas cimport delta_to_nanoseconds @@ -45,7 +45,7 @@ _no_input = object() cdef inline object create_timestamp_from_ts(int64_t value, - pandas_datetimestruct dts, + npy_datetimestruct dts, object tz, object freq): """ convenience routine to construct a Timestamp from its parts """ cdef _Timestamp ts_base @@ -973,7 +973,7 @@ class Timestamp(_Timestamp): """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts int64_t value, value_tz, offset object _tzinfo, result, k, v datetime ts_input
Follow-on to #21863
https://api.github.com/repos/pandas-dev/pandas/pulls/21886
2018-07-13T02:51:08Z
2018-07-14T14:54:30Z
2018-07-14T14:54:30Z
2018-07-14T15:38:11Z
TST: Parametrize tests in tests/util/test_hashing.py
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index 82b870c156cc8..0c14dcb49c56f 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -13,17 +13,17 @@ class TestHashing(object): - def setup_method(self, method): - self.df = DataFrame( - {'i32': np.array([1, 2, 3] * 3, dtype='int32'), - 'f32': np.array([None, 2.5, 3.5] * 3, dtype='float32'), - 'cat': Series(['a', 'b', 'c'] * 3).astype('category'), - 'obj': Series(['d', 'e', 'f'] * 3), - 'bool': np.array([True, False, True] * 3), - 'dt': Series(pd.date_range('20130101', periods=9)), - 'dt_tz': Series(pd.date_range('20130101', periods=9, - tz='US/Eastern')), - 'td': Series(pd.timedelta_range('2000', periods=9))}) + @pytest.fixture(params=[ + Series([1, 2, 3] * 3, dtype='int32'), + Series([None, 2.5, 3.5] * 3, dtype='float32'), + Series(['a', 'b', 'c'] * 3, dtype='category'), + Series(['d', 'e', 'f'] * 3), + Series([True, False, True] * 3), + Series(pd.date_range('20130101', periods=9)), + Series(pd.date_range('20130101', periods=9, tz='US/Eastern')), + Series(pd.timedelta_range('2000', periods=9))]) + def series(self, request): + return request.param def test_consistency(self): # check that our hash doesn't change because of a mistake @@ -34,10 +34,9 @@ def test_consistency(self): index=['foo', 'bar', 'baz']) tm.assert_series_equal(result, expected) - def test_hash_array(self): - for name, s in self.df.iteritems(): - a = s.values - tm.assert_numpy_array_equal(hash_array(a), hash_array(a)) + def test_hash_array(self, series): + a = series.values + tm.assert_numpy_array_equal(hash_array(a), hash_array(a)) def test_hash_array_mixed(self): result1 = hash_array(np.array([3, 4, 'All'])) @@ -46,10 +45,11 @@ def test_hash_array_mixed(self): tm.assert_numpy_array_equal(result1, result2) tm.assert_numpy_array_equal(result1, result3) - def test_hash_array_errors(self): - - for val in [5, 'foo', pd.Timestamp('20130101')]: - pytest.raises(TypeError, hash_array, val) + @pytest.mark.parametrize('val', [5, 'foo', pd.Timestamp('20130101')]) + def test_hash_array_errors(self, val): + msg = 'must pass a ndarray-like' + with tm.assert_raises_regex(TypeError, msg): + hash_array(val) def check_equal(self, obj, **kwargs): a = hash_pandas_object(obj, **kwargs) @@ -80,31 +80,33 @@ def test_hash_tuples(self): result = hash_tuples(tups[0]) assert result == expected[0] - def test_hash_tuple(self): + @pytest.mark.parametrize('tup', [ + (1, 'one'), (1, np.nan), (1.0, pd.NaT, 'A'), + ('A', pd.Timestamp("2012-01-01"))]) + def test_hash_tuple(self, tup): # test equivalence between hash_tuples and hash_tuple - for tup in [(1, 'one'), (1, np.nan), (1.0, pd.NaT, 'A'), - ('A', pd.Timestamp("2012-01-01"))]: - result = hash_tuple(tup) - expected = hash_tuples([tup])[0] - assert result == expected - - def test_hash_scalar(self): - for val in [1, 1.4, 'A', b'A', u'A', pd.Timestamp("2012-01-01"), - pd.Timestamp("2012-01-01", tz='Europe/Brussels'), - datetime.datetime(2012, 1, 1), - pd.Timestamp("2012-01-01", tz='EST').to_pydatetime(), - pd.Timedelta('1 days'), datetime.timedelta(1), - pd.Period('2012-01-01', freq='D'), pd.Interval(0, 1), - np.nan, pd.NaT, None]: - result = _hash_scalar(val) - expected = hash_array(np.array([val], dtype=object), - categorize=True) - assert result[0] == expected[0] - - def test_hash_tuples_err(self): - - for val in [5, 'foo', pd.Timestamp('20130101')]: - pytest.raises(TypeError, hash_tuples, val) + result = hash_tuple(tup) + expected = hash_tuples([tup])[0] + assert result == expected + + @pytest.mark.parametrize('val', [ + 1, 1.4, 'A', b'A', u'A', pd.Timestamp("2012-01-01"), + pd.Timestamp("2012-01-01", tz='Europe/Brussels'), + datetime.datetime(2012, 1, 1), + pd.Timestamp("2012-01-01", tz='EST').to_pydatetime(), + pd.Timedelta('1 days'), datetime.timedelta(1), + pd.Period('2012-01-01', freq='D'), pd.Interval(0, 1), + np.nan, pd.NaT, None]) + def test_hash_scalar(self, val): + result = _hash_scalar(val) + expected = hash_array(np.array([val], dtype=object), categorize=True) + assert result[0] == expected[0] + + @pytest.mark.parametrize('val', [5, 'foo', pd.Timestamp('20130101')]) + def test_hash_tuples_err(self, val): + msg = 'must be convertible to a list-of-tuples' + with tm.assert_raises_regex(TypeError, msg): + hash_tuples(val) def test_multiindex_unique(self): mi = MultiIndex.from_tuples([(118, 472), (236, 118), @@ -172,36 +174,35 @@ def test_hash_pandas_object(self, obj): self.check_equal(obj) self.check_not_equal_with_index(obj) - def test_hash_pandas_object2(self): - for name, s in self.df.iteritems(): - self.check_equal(s) - self.check_not_equal_with_index(s) - - def test_hash_pandas_empty_object(self): - for obj in [Series([], dtype='float64'), - Series([], dtype='object'), - Index([])]: - self.check_equal(obj) + def test_hash_pandas_object2(self, series): + self.check_equal(series) + self.check_not_equal_with_index(series) - # these are by-definition the same with - # or w/o the index as the data is empty + @pytest.mark.parametrize('obj', [ + Series([], dtype='float64'), Series([], dtype='object'), Index([])]) + def test_hash_pandas_empty_object(self, obj): + # these are by-definition the same with + # or w/o the index as the data is empty + self.check_equal(obj) - def test_categorical_consistency(self): + @pytest.mark.parametrize('s1', [ + Series(['a', 'b', 'c', 'd']), + Series([1000, 2000, 3000, 4000]), + Series(pd.date_range(0, periods=4))]) + @pytest.mark.parametrize('categorize', [True, False]) + def test_categorical_consistency(self, s1, categorize): # GH15143 # Check that categoricals hash consistent with their values, not codes # This should work for categoricals of any dtype - for s1 in [Series(['a', 'b', 'c', 'd']), - Series([1000, 2000, 3000, 4000]), - Series(pd.date_range(0, periods=4))]: - s2 = s1.astype('category').cat.set_categories(s1) - s3 = s2.cat.set_categories(list(reversed(s1))) - for categorize in [True, False]: - # These should all hash identically - h1 = hash_pandas_object(s1, categorize=categorize) - h2 = hash_pandas_object(s2, categorize=categorize) - h3 = hash_pandas_object(s3, categorize=categorize) - tm.assert_series_equal(h1, h2) - tm.assert_series_equal(h1, h3) + s2 = s1.astype('category').cat.set_categories(s1) + s3 = s2.cat.set_categories(list(reversed(s1))) + + # These should all hash identically + h1 = hash_pandas_object(s1, categorize=categorize) + h2 = hash_pandas_object(s2, categorize=categorize) + h3 = hash_pandas_object(s3, categorize=categorize) + tm.assert_series_equal(h1, h2) + tm.assert_series_equal(h1, h3) def test_categorical_with_nan_consistency(self): c = pd.Categorical.from_codes( @@ -216,13 +217,12 @@ def test_categorical_with_nan_consistency(self): assert result[1] in expected def test_pandas_errors(self): - - for obj in [pd.Timestamp('20130101')]: - with pytest.raises(TypeError): - hash_pandas_object(obj) + with pytest.raises(TypeError): + hash_pandas_object(pd.Timestamp('20130101')) with catch_warnings(record=True): obj = tm.makePanel() + with pytest.raises(TypeError): hash_pandas_object(obj) @@ -238,9 +238,9 @@ def test_hash_keys(self): def test_invalid_key(self): # this only matters for object dtypes - def f(): + msg = 'key should be a 16-byte string encoded' + with tm.assert_raises_regex(ValueError, msg): hash_pandas_object(Series(list('abc')), hash_key='foo') - pytest.raises(ValueError, f) def test_alread_encoded(self): # if already encoded then ok @@ -253,19 +253,13 @@ def test_alternate_encoding(self): obj = Series(list('abc')) self.check_equal(obj, encoding='ascii') - def test_same_len_hash_collisions(self): - - for l in range(8): - length = 2**(l + 8) + 1 - s = tm.rands_array(length, 2) - result = hash_array(s, 'utf8') - assert not result[0] == result[1] - - for l in range(8): - length = 2**(l + 8) - s = tm.rands_array(length, 2) - result = hash_array(s, 'utf8') - assert not result[0] == result[1] + @pytest.mark.parametrize('l_exp', range(8)) + @pytest.mark.parametrize('l_add', [0, 1]) + def test_same_len_hash_collisions(self, l_exp, l_add): + length = 2**(l_exp + 8) + l_add + s = tm.rands_array(length, 2) + result = hash_array(s, 'utf8') + assert not result[0] == result[1] def test_hash_collisions(self):
Noticed that these tests could be parametrized when investigating a comment in the `IntervalArray` PR, and originally started doing to work there. The `IntervalArray` PR is big enough already, and this is unrelated, so creating a separate PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/21883
2018-07-13T00:58:55Z
2018-07-14T14:32:45Z
2018-07-14T14:32:45Z
2018-09-24T17:22:31Z
[BLD] enable cython coverage, use cythonize
diff --git a/.coveragerc b/.coveragerc index 3f630aa6cf8f5..f5c8b701a79a8 100644 --- a/.coveragerc +++ b/.coveragerc @@ -2,6 +2,7 @@ [run] branch = False omit = */tests/* +plugins = Cython.Coverage [report] # Regexes for lines to exclude from consideration @@ -22,6 +23,7 @@ exclude_lines = if __name__ == .__main__.: ignore_errors = False +show_missing = True [html] directory = coverage_html_report diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 76dadb4ec3e23..cd3ce5c1a8f09 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1655,8 +1655,8 @@ cdef class _Period(object): return value def __setstate__(self, state): - self.freq=state[1] - self.ordinal=state[2] + self.freq = state[1] + self.ordinal = state[2] def __reduce__(self): object_state = None, self.freq, self.ordinal diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/tslibs/util.pxd similarity index 97% rename from pandas/_libs/src/util.pxd rename to pandas/_libs/tslibs/util.pxd index 7ce2181f32553..305c4f8f908e0 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -1,4 +1,4 @@ -from numpy cimport ndarray, NPY_C_CONTIGUOUS, NPY_F_CONTIGUOUS +from numpy cimport ndarray cimport numpy as cnp cnp.import_array() @@ -64,7 +64,7 @@ cdef inline bint is_datetime64_object(object obj) nogil: # -------------------------------------------------------------------- -cdef extern from "numpy_helper.h": +cdef extern from "../src/numpy_helper.h": void set_array_not_contiguous(ndarray ao) int assign_value_1d(ndarray, Py_ssize_t, object) except -1 @@ -87,7 +87,7 @@ ctypedef fused numeric: cnp.float32_t cnp.float64_t -cdef extern from "headers/stdint.h": +cdef extern from "../src/headers/stdint.h": enum: UINT8_MAX enum: UINT16_MAX enum: UINT32_MAX diff --git a/pandas/_libs/util.pxd b/pandas/_libs/util.pxd new file mode 100644 index 0000000000000..0b7e66902cbb1 --- /dev/null +++ b/pandas/_libs/util.pxd @@ -0,0 +1 @@ +from tslibs.util cimport * diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index cea77e2c88b1b..b9dd46a10dfda 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -13,7 +13,7 @@ from numpy cimport ndarray, double_t, int64_t, float64_t cnp.import_array() -cdef extern from "../src/headers/cmath" namespace "std": +cdef extern from "src/headers/cmath" namespace "std": int signbit(double) nogil double sqrt(double x) nogil diff --git a/setup.py b/setup.py index 4910fcf292ca6..85c5970af018f 100755 --- a/setup.py +++ b/setup.py @@ -40,9 +40,11 @@ def is_platform_windows(): try: import Cython ver = Cython.__version__ + from Cython.Build import cythonize _CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver) except ImportError: _CYTHON_INSTALLED = False + cythonize = lambda x, *args, **kwargs: x # dummy func # The import of Extension must be after the import of Cython, otherwise # we do not get the appropriately patched class. @@ -419,11 +421,66 @@ def get_tag(self): cmdclass['build_src'] = DummyBuildSrc cmdclass['build_ext'] = CheckingBuildExt +# ---------------------------------------------------------------------- +# Preparation of compiler arguments + if sys.byteorder == 'big': endian_macro = [('__BIG_ENDIAN__', '1')] else: endian_macro = [('__LITTLE_ENDIAN__', '1')] + +if is_platform_windows(): + extra_compile_args = [] +else: + # args to ignore warnings + extra_compile_args = ['-Wno-unused-function'] + + +# enable coverage by building cython files by setting the environment variable +# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) +linetrace = os.environ.get('PANDAS_CYTHON_COVERAGE', False) +CYTHON_TRACE = str(int(bool(linetrace))) + +# Note: if not using `cythonize`, coverage can be enabled by +# pinning `ext.cython_directives = directives` to each ext in extensions. +# github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy +directives = {'linetrace': False} +macros = [] +if linetrace: + # https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py + directives['linetrace'] = True + macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')] + + +# ---------------------------------------------------------------------- +# Specification of Dependencies + +# TODO: Need to check to see if e.g. `linetrace` has changed and possibly +# re-compile. +def maybe_cythonize(extensions, *args, **kwargs): + """ + Render tempita templates before calling cythonize + """ + if len(sys.argv) > 1 and 'clean' in sys.argv: + # Avoid running cythonize on `python setup.py clean` + # See https://github.com/cython/cython/issues/1495 + return extensions + + numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') + # TODO: Is this really necessary here? + for ext in extensions: + if (hasattr(ext, 'include_dirs') and + numpy_incl not in ext.include_dirs): + ext.include_dirs.append(numpy_incl) + + if cython: + build_ext.render_templates(_pxifiles) + return cythonize(extensions, *args, **kwargs) + else: + return extensions + + lib_depends = ['inference'] @@ -434,23 +491,13 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): if suffix == '.pyx': lib_depends = [srcpath(f, suffix='.pyx', subdir='_libs/src') for f in lib_depends] - lib_depends.append('pandas/_libs/src/util.pxd') + lib_depends.append('pandas/_libs/util.pxd') else: lib_depends = [] common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src'] -def pxd(name): - return pjoin('pandas', name + '.pxd') - - -if is_platform_windows(): - extra_compile_args = [] -else: - # args to ignore warnings - extra_compile_args = ['-Wno-unused-function'] - lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h', 'pandas/_libs/src/parse_helper.h', 'pandas/_libs/src/compat_helper.h'] @@ -466,22 +513,18 @@ def pxd(name): ext_data = { '_libs.algos': { 'pyxfile': '_libs/algos', - 'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'], 'depends': _pxi_dep['algos']}, '_libs.groupby': { 'pyxfile': '_libs/groupby', - 'pxdfiles': ['_libs/src/util', '_libs/algos'], 'depends': _pxi_dep['groupby']}, '_libs.hashing': { 'pyxfile': '_libs/hashing'}, '_libs.hashtable': { 'pyxfile': '_libs/hashtable', - 'pxdfiles': ['_libs/hashtable', '_libs/missing', '_libs/khash'], 'depends': (['pandas/_libs/src/klib/khash_python.h'] + _pxi_dep['hashtable'])}, '_libs.index': { 'pyxfile': '_libs/index', - 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], 'depends': _pxi_dep['index'], 'sources': np_datetime_sources}, '_libs.indexing': { @@ -490,21 +533,15 @@ def pxd(name): 'pyxfile': '_libs/internals'}, '_libs.interval': { 'pyxfile': '_libs/interval', - 'pxdfiles': ['_libs/hashtable'], 'depends': _pxi_dep['interval']}, '_libs.join': { 'pyxfile': '_libs/join', - 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], 'depends': _pxi_dep['join']}, '_libs.lib': { 'pyxfile': '_libs/lib', - 'pxdfiles': ['_libs/src/util', - '_libs/missing', - '_libs/tslibs/conversion'], 'depends': lib_depends + tseries_depends}, '_libs.missing': { 'pyxfile': '_libs/missing', - 'pxdfiles': ['_libs/src/util'], 'depends': tseries_depends}, '_libs.parsers': { 'pyxfile': '_libs/parsers', @@ -514,12 +551,9 @@ def pxd(name): 'sources': ['pandas/_libs/src/parser/tokenizer.c', 'pandas/_libs/src/parser/io.c']}, '_libs.reduction': { - 'pyxfile': '_libs/reduction', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/reduction'}, '_libs.ops': { - 'pyxfile': '_libs/ops', - 'pxdfiles': ['_libs/src/util', - '_libs/missing']}, + 'pyxfile': '_libs/ops'}, '_libs.properties': { 'pyxfile': '_libs/properties', 'include': []}, @@ -534,113 +568,66 @@ def pxd(name): 'depends': _pxi_dep['sparse']}, '_libs.tslib': { 'pyxfile': '_libs/tslib', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/conversion', - '_libs/tslibs/timedeltas', - '_libs/tslibs/timestamps', - '_libs/tslibs/timezones', - '_libs/tslibs/nattype', - '_libs/tslibs/offsets'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.ccalendar': { 'pyxfile': '_libs/tslibs/ccalendar'}, '_libs.tslibs.conversion': { 'pyxfile': '_libs/tslibs/conversion', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/nattype', - '_libs/tslibs/timezones', - '_libs/tslibs/timedeltas'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.fields': { 'pyxfile': '_libs/tslibs/fields', - 'pxdfiles': ['_libs/tslibs/ccalendar', - '_libs/tslibs/nattype'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.frequencies': { - 'pyxfile': '_libs/tslibs/frequencies', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/tslibs/frequencies'}, '_libs.tslibs.nattype': { - 'pyxfile': '_libs/tslibs/nattype', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/tslibs/nattype'}, '_libs.tslibs.np_datetime': { 'pyxfile': '_libs/tslibs/np_datetime', 'depends': np_datetime_headers, 'sources': np_datetime_sources}, '_libs.tslibs.offsets': { 'pyxfile': '_libs/tslibs/offsets', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/ccalendar', - '_libs/tslibs/conversion', - '_libs/tslibs/frequencies', - '_libs/tslibs/nattype'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.parsing': { - 'pyxfile': '_libs/tslibs/parsing', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/tslibs/parsing'}, '_libs.tslibs.period': { 'pyxfile': '_libs/tslibs/period', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/ccalendar', - '_libs/tslibs/timedeltas', - '_libs/tslibs/timezones', - '_libs/tslibs/nattype', - '_libs/tslibs/offsets'], 'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'], 'sources': np_datetime_sources + ['pandas/_libs/src/period_helper.c']}, '_libs.tslibs.resolution': { 'pyxfile': '_libs/tslibs/resolution', - 'pxdfiles': ['_libs/src/util', - '_libs/khash', - '_libs/tslibs/ccalendar', - '_libs/tslibs/frequencies', - '_libs/tslibs/timezones'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.strptime': { 'pyxfile': '_libs/tslibs/strptime', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/nattype'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.timedeltas': { 'pyxfile': '_libs/tslibs/timedeltas', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/nattype', - '_libs/tslibs/offsets'], 'depends': np_datetime_headers, 'sources': np_datetime_sources}, '_libs.tslibs.timestamps': { 'pyxfile': '_libs/tslibs/timestamps', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/ccalendar', - '_libs/tslibs/conversion', - '_libs/tslibs/nattype', - '_libs/tslibs/offsets', - '_libs/tslibs/timedeltas', - '_libs/tslibs/timezones'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.timezones': { - 'pyxfile': '_libs/tslibs/timezones', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/tslibs/timezones'}, '_libs.testing': { 'pyxfile': '_libs/testing'}, '_libs.window': { 'pyxfile': '_libs/window', - 'pxdfiles': ['_libs/skiplist', '_libs/src/util'], 'language': 'c++', 'suffix': '.cpp'}, '_libs.writers': { - 'pyxfile': '_libs/writers', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/writers'}, 'io.sas._sas': { 'pyxfile': 'io/sas/sas'}, 'io.msgpack._packer': { - 'macros': endian_macro, + 'macros': endian_macro + macros, 'depends': ['pandas/_libs/src/msgpack/pack.h', 'pandas/_libs/src/msgpack/pack_template.h'], 'include': ['pandas/_libs/src/msgpack'] + common_include, @@ -652,7 +639,7 @@ def pxd(name): 'depends': ['pandas/_libs/src/msgpack/unpack.h', 'pandas/_libs/src/msgpack/unpack_define.h', 'pandas/_libs/src/msgpack/unpack_template.h'], - 'macros': endian_macro, + 'macros': endian_macro + macros, 'include': ['pandas/_libs/src/msgpack'] + common_include, 'language': 'c++', 'suffix': '.cpp', @@ -668,10 +655,6 @@ def pxd(name): sources = [srcpath(data['pyxfile'], suffix=source_suffix, subdir='')] - pxds = [pxd(x) for x in data.get('pxdfiles', [])] - if suffix == '.pyx' and pxds: - sources.extend(pxds) - sources.extend(data.get('sources', [])) include = data.get('include', common_include) @@ -681,7 +664,7 @@ def pxd(name): depends=data.get('depends', []), include_dirs=include, language=data.get('language', 'c'), - define_macros=data.get('macros', []), + define_macros=data.get('macros', macros), extra_compile_args=extra_compile_args) extensions.append(obj) @@ -708,7 +691,8 @@ def pxd(name): 'pandas/_libs/src/ujson/lib', 'pandas/_libs/src/datetime'], extra_compile_args=(['-D_GNU_SOURCE'] + - extra_compile_args)) + extra_compile_args), + define_macros=macros) extensions.append(ujson_ext) @@ -718,7 +702,8 @@ def pxd(name): # extension for pseudo-safely moving bytes into mutable buffers _move_ext = Extension('pandas.util._move', depends=[], - sources=['pandas/util/move.c']) + sources=['pandas/util/move.c'], + define_macros=macros) extensions.append(_move_ext) # The build cache system does string matching below this point. @@ -729,7 +714,7 @@ def pxd(name): version=versioneer.get_version(), packages=find_packages(include=['pandas', 'pandas.*']), package_data={'': ['templates/*', '_libs/*.dll']}, - ext_modules=extensions, + ext_modules=maybe_cythonize(extensions, compiler_directives=directives), maintainer_email=EMAIL, description=DESCRIPTION, license=LICENSE,
- [x] closes #18089 Doesn't close #12624, but merits a mention. Usage: ``` $ export PANDAS_CYTHON_COVERAGE=TRUE $ python setup.py build_ext --inplace $ coverage erase $ pytest pandas/tests/scalar --cov=pandas/_libs [...] Name Stmts Miss Cover Missing ----------------------------------------------------------------------- pandas/_libs/__init__.py 1 0 100% pandas/_libs/algos.pyx 162 116 28% 43, 45-47, 58, 66, 82-107, 132-145, 149-153, 160-181, 183, 198-206, 209, 212-244, 263-275, 278-284, 288-289, 292-299, 302-310 pandas/_libs/algos_common_helper.pxi 1884 1851 2% 25-1203, 1214-1942, 1958-1962, 1964, 1967, 1974-1976, 1981-1987, 1989-1991, 1999-3171, 3177-3227, 3232-3243 pandas/_libs/groupby.pyx 168 148 12% 29-94, 113-134, 143-163, 165-171, 190-207, 210-213, 224-257, 260-265, 293-303, 306-314, 317-323, 354-380 pandas/_libs/hashing.pyx 97 94 3% 48-190 pandas/_libs/hashtable.pyx 83 75 10% 59-142, 154-177 pandas/_libs/hashtable_class_helper.pxi 846 828 2% 19-805, 810, 813, 818-829, 837-864, 878-1423 pandas/_libs/index.pyx 384 300 22% 34, 36-39, 46, 49-71, 81-89, 95-100, 110, 114, 126, 130, 135-143, 147, 153-202, 204, 211-214, 221, 223, 227, 233-236, 245-258, 261, 277, 280, 289, 293-314, 318-403, 406, 409, 411, 415-429, 433-434, 440-525, 536-542, 546, 549-694 pandas/_libs/index_class_helper.pxi 95 90 5% 14-115, 117, 120, 122-123, 126-165 [...] ``` (I'll post results from a full test run in a bit) Apparently there is an issue with the Cython.Coverage plugin that causes cdef function/class definition lines to not get covered. Not sure if that's going to be fixed or if we need to find a workaround. To make `cythonize` work I had to move `util.pxd` to `tslibs`, then cimport everything into a _libs.util pxd namespace. There may be a way around this that I haven't found. If not, there are parts of `tslibs.util` that are not used in `tslibs`, can be moved to `_libs.util`. We _could_ remove a whole bunch more from the `ext_data` dictionary, but I'm saving that until after the first pass of the review process. (I think doing so will lighten the build, not sure) This will have a merge conflict with #21878, but it'll be easy to resolve when the time comes.
https://api.github.com/repos/pandas-dev/pandas/pulls/21879
2018-07-12T18:11:58Z
2018-07-20T20:28:43Z
2018-07-20T20:28:43Z
2018-07-22T14:41:16Z
Cleanup cimports, implement bits of numpy_helper in util.pxd
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index f5f9c06a7e4c2..5e4a431caca00 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1,21 +1,20 @@ # -*- coding: utf-8 -*- # cython: profile=False -cimport numpy as cnp -import numpy as np - cimport cython +from cython cimport Py_ssize_t -cnp.import_array() +from libc.stdlib cimport malloc, free +import numpy as np from numpy cimport (ndarray, double_t, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, float32_t, float64_t) -from libc.stdlib cimport malloc, free from util cimport numeric, get_nat + from algos cimport (swap, TiebreakEnumType, TIEBREAK_AVERAGE, TIEBREAK_MIN, TIEBREAK_MAX, TIEBREAK_FIRST, TIEBREAK_DENSE) from algos import take_2d_axis1_float64_float64, groupsort_indexer, tiebreakers @@ -74,8 +73,8 @@ cdef inline float64_t kth_smallest_c(float64_t* a, double_t x, t l = 0 - m = n -1 - while (l<m): + m = n - 1 + while l < m: x = a[k] i = l j = m diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index e408e02b9d5a1..31ef4b7a3e807 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -9,14 +9,12 @@ from cpython.slice cimport PySlice_Check import numpy as np cimport numpy as cnp from numpy cimport (ndarray, float64_t, int32_t, - int64_t, uint8_t, uint64_t, intp_t) + int64_t, uint8_t, uint64_t, intp_t, + # Note: NPY_DATETIME, NPY_TIMEDELTA are only available + # for cimport in cython>=0.27.3 + NPY_DATETIME, NPY_TIMEDELTA) cnp.import_array() -cdef extern from "numpy/arrayobject.h": - # These can be cimported directly from numpy in cython>=0.27.3 - cdef enum NPY_TYPES: - NPY_DATETIME - NPY_TIMEDELTA cimport util diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 4129132251682..b0d8ce9e4b237 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -1,17 +1,22 @@ -cimport numpy as cnp -import numpy as np +# -*- coding: utf-8 -*- +import numbers + +from cpython.object cimport (Py_EQ, Py_NE, Py_GT, Py_LT, Py_GE, Py_LE, + PyObject_RichCompare) -cimport util cimport cython -import cython +from cython cimport Py_ssize_t + +import numpy as np from numpy cimport ndarray + + +cimport util + from tslibs import Timestamp from tslibs.timezones cimport tz_compare -from cpython.object cimport (Py_EQ, Py_NE, Py_GT, Py_LT, Py_GE, Py_LE, - PyObject_RichCompare) -import numbers _VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither']) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 23aebc85e6300..172117f7d8059 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -753,4 +753,4 @@ def indices_fast(object index, ndarray[int64_t] labels, list keys, return result -include "inference.pyx" +include "src/inference.pyx" diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index 5cfa51dc8a0be..f409fec44890d 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -30,24 +30,6 @@ PANDAS_INLINE PyObject* get_value_1d(PyArrayObject* ap, Py_ssize_t i) { return PyArray_Scalar(item, PyArray_DESCR(ap), (PyObject*)ap); } -// returns ASCII or UTF8 (py3) view on python str -// python object owns memory, should not be freed -PANDAS_INLINE const char* get_c_string(PyObject* obj) { -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_AsUTF8(obj); -#else - return PyString_AsString(obj); -#endif -} - -PANDAS_INLINE PyObject* char_to_string(const char* data) { -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_FromString(data); -#else - return PyString_FromString(data); -#endif -} - void set_array_not_contiguous(PyArrayObject* ao) { ao->flags &= ~(NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS); diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd index a8cd78016665f..728eb63dc836c 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/src/util.pxd @@ -4,6 +4,9 @@ cnp.import_array() cimport cpython from cpython cimport PyTypeObject +from cpython.string cimport PyString_FromString, PyString_AsString + +DEF PY3 = bytes != str cdef extern from "Python.h": # Note: importing extern-style allows us to declare these as nogil @@ -14,6 +17,8 @@ cdef extern from "Python.h": bint PyFloat_Check(object obj) nogil bint PyComplex_Check(object obj) nogil bint PyObject_TypeCheck(object obj, PyTypeObject* type) nogil + char* PyUnicode_AsUTF8(object unicode) + object PyUnicode_FromString(const char* u) nogil cdef extern from "numpy/arrayobject.h": @@ -69,8 +74,6 @@ cdef extern from "numpy_helper.h": int assign_value_1d(ndarray, Py_ssize_t, object) except -1 cnp.int64_t get_nat() object get_value_1d(ndarray, Py_ssize_t) - char *get_c_string(object) except NULL - object char_to_string(char*) ctypedef fused numeric: cnp.int8_t @@ -101,6 +104,26 @@ cdef extern from "headers/stdint.h": enum: INT64_MIN +cdef inline const char* get_c_string(object obj) except NULL: + """ + returns ASCII or UTF8 (py3) view on python str + python object owns memory, should not be freed + """ + # TODO: this docstring is copied verbatim from version that was + # directly in numpy_helper.C; is it still accurate? + IF PY3: + return PyUnicode_AsUTF8(obj) + ELSE: + return PyString_AsString(obj) + + +cdef inline object char_to_string(const char* data): + IF PY3: + return PyUnicode_FromString(data) + ELSE: + return PyString_FromString(data) + + cdef inline object get_value_at(ndarray arr, object loc): cdef: Py_ssize_t i, sz diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 281e497945c5f..1d44af6b81992 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1,24 +1,29 @@ # -*- coding: utf-8 -*- # cython: profile=False +cimport cython +from cython cimport Py_ssize_t + +from cpython cimport PyFloat_Check, PyUnicode_Check + +from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, + PyDateTime_CheckExact, + PyDateTime_IMPORT, + timedelta, datetime, date, time) +# import datetime C API +PyDateTime_IMPORT + cimport numpy as cnp from numpy cimport int64_t, ndarray, float64_t import numpy as np cnp.import_array() +import pytz -from cpython cimport PyFloat_Check, PyUnicode_Check from util cimport (is_integer_object, is_float_object, is_string_object, is_datetime64_object) -from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, - PyDateTime_CheckExact, - PyDateTime_IMPORT, - timedelta, datetime, date, time) -# import datetime C API -PyDateTime_IMPORT - from tslibs.np_datetime cimport (check_dts_bounds, pandas_datetimestruct, @@ -30,13 +35,6 @@ from tslibs.np_datetime import OutOfBoundsDatetime from tslibs.parsing import parse_datetime_string -cimport cython -from cython cimport Py_ssize_t - - -import pytz - - from tslibs.timedeltas cimport cast_from_unit from tslibs.timezones cimport (is_utc, is_tzlocal, is_fixed_offset, treat_tz_as_pytz, get_dst_info) @@ -54,7 +52,8 @@ from tslibs.timestamps cimport (create_timestamp_from_ts, _NS_UPPER_BOUND, _NS_LOWER_BOUND) from tslibs.timestamps import Timestamp -cdef bint PY2 = str == bytes + +DEF PY2 = str == bytes cdef inline object create_datetime_from_ts( @@ -556,8 +555,9 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', if len(val) == 0 or val in nat_strings: iresult[i] = NPY_NAT continue - if PyUnicode_Check(val) and PY2: - val = val.encode('utf-8') + if PY2: + if PyUnicode_Check(val): + val = val.encode('utf-8') try: _string_to_dts(val, &dts, &out_local, &out_tzoffset) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 27b7f03358a3a..094a37b210516 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -334,8 +334,6 @@ class _BaseOffset(object): # other is not a DateOffset object return False - return self._params == other._params - def __ne__(self, other): return not self == other diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index f5048d32e826b..580d155f87fa8 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -10,7 +10,7 @@ cimport cython from cython cimport Py_ssize_t -from datetime import datetime +from cpython.datetime cimport datetime import time import numpy as np @@ -37,7 +37,7 @@ from dateutil.parser import DEFAULTPARSER from dateutil.parser import parse as du_parse from ccalendar import MONTH_NUMBERS -from nattype import nat_strings +from nattype import nat_strings, NaT # ---------------------------------------------------------------------- # Constants @@ -54,9 +54,6 @@ cdef object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])') cdef set _not_datelike_strings = {'a', 'A', 'm', 'M', 'p', 'P', 't', 'T'} -NAT_SENTINEL = object() -# This allows us to reference NaT without having to import it - # ---------------------------------------------------------------------- @@ -136,9 +133,6 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): res = parse_datetime_string_with_reso(arg, freq=freq, dayfirst=dayfirst, yearfirst=yearfirst) - if res[0] is NAT_SENTINEL: - from pandas._libs.tslib import NaT - res = (NaT,) + res[1:] return res @@ -206,7 +200,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default, # should be NaT??? if date_string in nat_strings: - return NAT_SENTINEL, NAT_SENTINEL, '' + return NaT, NaT, '' date_string = date_string.upper() date_len = len(date_string) @@ -407,7 +401,7 @@ def try_parse_dates(ndarray[object] values, parser=None, # EAFP here try: - for i from 0 <= i < n: + for i in range(n): if values[i] == '': result[i] = np.nan else: @@ -419,7 +413,7 @@ def try_parse_dates(ndarray[object] values, parser=None, parse_date = parser try: - for i from 0 <= i < n: + for i in range(n): if values[i] == '': result[i] = np.nan else: @@ -459,7 +453,7 @@ def try_parse_date_and_time(ndarray[object] dates, ndarray[object] times, else: parse_time = time_parser - for i from 0 <= i < n: + for i in range(n): d = parse_date(str(dates[i])) t = parse_time(str(times[i])) result[i] = datetime(d.year, d.month, d.day, @@ -479,7 +473,7 @@ def try_parse_year_month_day(ndarray[object] years, ndarray[object] months, raise ValueError('Length of years/months/days must all be equal') result = np.empty(n, dtype='O') - for i from 0 <= i < n: + for i in range(n): result[i] = datetime(int(years[i]), int(months[i]), int(days[i])) return result @@ -505,7 +499,7 @@ def try_parse_datetime_components(ndarray[object] years, raise ValueError('Length of all datetime components must be equal') result = np.empty(n, dtype='O') - for i from 0 <= i < n: + for i in range(n): float_secs = float(seconds[i]) secs = int(float_secs) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 0ec5d25beeeb9..2ce1008d0ffb3 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -46,14 +46,14 @@ from conversion cimport tz_convert_utc_to_tzlocal from frequencies cimport (get_freq_code, get_base_alias, get_to_timestamp_base, get_freq_str, get_rule_month) -from parsing import parse_time_string, NAT_SENTINEL +from parsing import parse_time_string from resolution import Resolution from nattype import nat_strings, NaT, iNaT from nattype cimport _nat_scalar_rules, NPY_NAT, is_null_datetimelike from offsets cimport to_offset from offsets import _Tick -cdef bint PY2 = str == bytes +DEF PY2 = str == bytes cdef extern from "period_helper.h": @@ -729,7 +729,7 @@ cdef object _period_strftime(int64_t value, int freq, object fmt): result = result.replace(str_extra_fmts[i], repl) - if PY2: + IF PY2: result = result.decode('utf-8', 'ignore') return result @@ -1820,7 +1820,7 @@ class Period(_Period): value = str(value) value = value.upper() dt, _, reso = parse_time_string(value, freq) - if dt is NAT_SENTINEL: + if dt is NaT: ordinal = iNaT if freq is None: diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index a53d794b48cfa..5f085ff135d93 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -339,10 +339,6 @@ class Resolution(object): # ---------------------------------------------------------------------- # Frequency Inference - -# TODO: this is non performant logic here (and duplicative) and this -# simply should call unique_1d directly -# plus no reason to depend on khash directly cdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr): cdef: Py_ssize_t i, n = len(arr) @@ -367,6 +363,50 @@ cdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr): return result +cdef object month_position_check(fields, weekdays): + cdef: + int32_t daysinmonth, y, m, d + bint calendar_end = True + bint business_end = True + bint calendar_start = True + bint business_start = True + bint cal + int32_t[:] years + int32_t[:] months + int32_t[:] days + + years = fields['Y'] + months = fields['M'] + days = fields['D'] + + for y, m, d, wd in zip(years, months, days, weekdays): + if calendar_start: + calendar_start &= d == 1 + if business_start: + business_start &= d == 1 or (d <= 3 and wd == 0) + + if calendar_end or business_end: + daysinmonth = get_days_in_month(y, m) + cal = d == daysinmonth + if calendar_end: + calendar_end &= cal + if business_end: + business_end &= cal or (daysinmonth - d < 3 and wd == 4) + elif not calendar_start and not business_start: + break + + if calendar_end: + return 'ce' + elif business_end: + return 'be' + elif calendar_start: + return 'cs' + elif business_start: + return 'bs' + else: + return None + + cdef inline bint _is_multiple(int64_t us, int64_t mult): return us % mult == 0 @@ -475,52 +515,8 @@ cdef class _FrequencyInferer(object): def rep_stamp(self): return Timestamp(self.values[0]) - cdef month_position_check(self): - # TODO: cythonize this, very slow - cdef: - int32_t daysinmonth, y, m, d - bint calendar_end = True - bint business_end = True - bint calendar_start = True - bint business_start = True - bint cal - int32_t[:] years - int32_t[:] months - int32_t[:] days - - fields = self.fields - years = fields['Y'] - months = fields['M'] - days = fields['D'] - weekdays = self.index.dayofweek - - for y, m, d, wd in zip(years, months, days, weekdays): - - if calendar_start: - calendar_start &= d == 1 - if business_start: - business_start &= d == 1 or (d <= 3 and wd == 0) - - if calendar_end or business_end: - daysinmonth = get_days_in_month(y, m) - cal = d == daysinmonth - if calendar_end: - calendar_end &= cal - if business_end: - business_end &= cal or (daysinmonth - d < 3 and wd == 4) - elif not calendar_start and not business_start: - break - - if calendar_end: - return 'ce' - elif business_end: - return 'be' - elif calendar_start: - return 'cs' - elif business_start: - return 'bs' - else: - return None + cdef object month_position_check(self): + return month_position_check(self.fields, self.index.dayofweek) @cache_readonly def mdiffs(self):
Removes a few unnecessary uses of `cnp.import_array()` Uses cython's conditional compilation to avoid runtime PY2/PY3 checks Arranges cython imports in stdlib-->3rd party--> internal order. Separates out the one part of `libresolution._FrequencyInferer` that we actually _do_ want to keep in cython Couple of small lintings in groupby.pyx
https://api.github.com/repos/pandas-dev/pandas/pulls/21878
2018-07-12T17:50:20Z
2018-07-14T14:36:11Z
2018-07-14T14:36:11Z
2018-07-14T15:52:16Z
TST: Parameterize more tests
diff --git a/pandas/conftest.py b/pandas/conftest.py index e49b2bedee47b..c1376670ffbf0 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -259,7 +259,10 @@ def string_dtype(request): return request.param -@pytest.fixture(params=[float, "float32", "float64"]) +FLOAT_DTYPES = [float, "float32", "float64"] + + +@pytest.fixture(params=FLOAT_DTYPES) def float_dtype(request): """ Parameterized fixture for float dtypes. @@ -286,6 +289,7 @@ def complex_dtype(request): UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"] SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"] ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES +ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES @pytest.fixture(params=SIGNED_INT_DTYPES) @@ -334,6 +338,26 @@ def any_int_dtype(request): return request.param +@pytest.fixture(params=ALL_REAL_DTYPES) +def any_real_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtypes. + + * int8 + * uint8 + * int16 + * uint16 + * int32 + * uint32 + * int64 + * uint64 + * float32 + * float64 + """ + + return request.param + + @pytest.fixture def mock(): """ diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 1eeeec0be3b8b..76a50a9ecf5e7 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -9,7 +9,7 @@ import numpy as np from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, Categorical, compat, concat, option_context) -from pandas.compat import u +from pandas.compat import u, PY2 from pandas import _np_version_under1p14 from pandas.core.dtypes.dtypes import DatetimeTZDtype, CategoricalDtype @@ -21,6 +21,11 @@ import pandas as pd +@pytest.fixture(params=[str, compat.text_type]) +def text_dtype(request): + return request.param + + class TestDataFrameDataTypes(TestData): def test_concat_empty_dataframe_dtypes(self): @@ -351,27 +356,23 @@ def test_select_dtypes_datetime_with_tz(self): expected = df3.reindex(columns=[]) assert_frame_equal(result, expected) - def test_select_dtypes_str_raises(self): - df = DataFrame({'a': list('abc'), - 'g': list(u('abc')), - 'b': list(range(1, 4)), - 'c': np.arange(3, 6).astype('u1'), - 'd': np.arange(4.0, 7.0, dtype='float64'), - 'e': [True, False, True], - 'f': pd.date_range('now', periods=3).values}) - string_dtypes = set((str, 'str', np.string_, 'S1', - 'unicode', np.unicode_, 'U1')) - try: - string_dtypes.add(unicode) - except NameError: - pass - for dt in string_dtypes: - with tm.assert_raises_regex(TypeError, - 'string dtypes are not allowed'): - df.select_dtypes(include=[dt]) - with tm.assert_raises_regex(TypeError, - 'string dtypes are not allowed'): - df.select_dtypes(exclude=[dt]) + @pytest.mark.parametrize( + "dtype", [str, "str", np.string_, "S1", + "unicode", np.unicode_, "U1"] + ([unicode] if PY2 else [])) + @pytest.mark.parametrize("arg", ["include", "exclude"]) + def test_select_dtypes_str_raises(self, dtype, arg): + df = DataFrame({"a": list("abc"), + "g": list(u("abc")), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values}) + msg = "string dtypes are not allowed" + kwargs = {arg: [dtype]} + + with tm.assert_raises_regex(TypeError, msg): + df.select_dtypes(**kwargs) def test_select_dtypes_bad_arg_raises(self): df = DataFrame({'a': list('abc'), @@ -502,61 +503,59 @@ def test_astype_with_view(self): tf = self.frame.astype(np.float64) casted = tf.astype(np.int64, copy=False) # noqa - def test_astype_cast_nan_inf_int(self): - # GH14265, check nan and inf raise error when converting to int - types = [np.int32, np.int64] - values = [np.nan, np.inf] - msg = 'Cannot convert non-finite values \\(NA or inf\\) to integer' + @pytest.mark.parametrize("dtype", [np.int32, np.int64]) + @pytest.mark.parametrize("val", [np.nan, np.inf]) + def test_astype_cast_nan_inf_int(self, val, dtype): + # see gh-14265 + # + # Check NaN and inf --> raise error when converting to int. + msg = "Cannot convert non-finite values \\(NA or inf\\) to integer" + df = DataFrame([val]) - for this_type in types: - for this_val in values: - df = DataFrame([this_val]) - with tm.assert_raises_regex(ValueError, msg): - df.astype(this_type) + with tm.assert_raises_regex(ValueError, msg): + df.astype(dtype) - def test_astype_str(self): - # GH9757 - a = Series(date_range('2010-01-04', periods=5)) - b = Series(date_range('3/6/2012 00:00', periods=5, tz='US/Eastern')) - c = Series([Timedelta(x, unit='d') for x in range(5)]) + def test_astype_str(self, text_dtype): + # see gh-9757 + a = Series(date_range("2010-01-04", periods=5)) + b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern")) + c = Series([Timedelta(x, unit="d") for x in range(5)]) d = Series(range(5)) e = Series([0.0, 0.2, 0.4, 0.6, 0.8]) - df = DataFrame({'a': a, 'b': b, 'c': c, 'd': d, 'e': e}) - - # datetimelike - # Test str and unicode on python 2.x and just str on python 3.x - for tt in set([str, compat.text_type]): - result = df.astype(tt) - - expected = DataFrame({ - 'a': list(map(tt, map(lambda x: Timestamp(x)._date_repr, - a._values))), - 'b': list(map(tt, map(Timestamp, b._values))), - 'c': list(map(tt, map(lambda x: Timedelta(x) - ._repr_base(format='all'), c._values))), - 'd': list(map(tt, d._values)), - 'e': list(map(tt, e._values)), - }) - - assert_frame_equal(result, expected) - - # float/nan - # 11302 - # consistency in astype(str) - for tt in set([str, compat.text_type]): - result = DataFrame([np.NaN]).astype(tt) - expected = DataFrame(['nan']) - assert_frame_equal(result, expected) - - result = DataFrame([1.12345678901234567890]).astype(tt) - if _np_version_under1p14: - # < 1.14 truncates - expected = DataFrame(['1.12345678901']) - else: - # >= 1.14 preserves the full repr - expected = DataFrame(['1.1234567890123457']) - assert_frame_equal(result, expected) + df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e}) + + # Datetime-like + # Test str and unicode on Python 2.x and just str on Python 3.x + result = df.astype(text_dtype) + + expected = DataFrame({ + "a": list(map(text_dtype, + map(lambda x: Timestamp(x)._date_repr, a._values))), + "b": list(map(text_dtype, map(Timestamp, b._values))), + "c": list(map(text_dtype, + map(lambda x: Timedelta(x)._repr_base(format="all"), + c._values))), + "d": list(map(text_dtype, d._values)), + "e": list(map(text_dtype, e._values)), + }) + + assert_frame_equal(result, expected) + + def test_astype_str_float(self, text_dtype): + # see gh-11302 + result = DataFrame([np.NaN]).astype(text_dtype) + expected = DataFrame(["nan"]) + + assert_frame_equal(result, expected) + result = DataFrame([1.12345678901234567890]).astype(text_dtype) + + # < 1.14 truncates + # >= 1.14 preserves the full repr + val = ("1.12345678901" if _np_version_under1p14 + else "1.1234567890123457") + expected = DataFrame([val]) + assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype_class", [dict, Series]) def test_astype_dict_like(self, dtype_class): diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 59b53cd23010e..d5df9d3820fdc 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -892,77 +892,64 @@ def test_on_float(self): assert_frame_equal(result, expected) - def test_on_specialized_type(self): - # GH13936 - for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, - np.int8, np.int16, np.int32, np.int64, - np.float16, np.float32, np.float64]: - df1 = pd.DataFrame({ - 'value': [5, 2, 25, 100, 78, 120, 79], - 'symbol': list("ABCDEFG")}, - columns=['symbol', 'value']) - df1.value = dtype(df1.value) - - df2 = pd.DataFrame({ - 'value': [0, 80, 120, 125], - 'result': list('xyzw')}, - columns=['value', 'result']) - df2.value = dtype(df2.value) - - df1 = df1.sort_values('value').reset_index(drop=True) - - if dtype == np.float16: - with pytest.raises(MergeError): - pd.merge_asof(df1, df2, on='value') - continue - - result = pd.merge_asof(df1, df2, on='value') - - expected = pd.DataFrame( - {'symbol': list("BACEGDF"), - 'value': [2, 5, 25, 78, 79, 100, 120], - 'result': list('xxxxxyz') - }, columns=['symbol', 'value', 'result']) - expected.value = dtype(expected.value) - - assert_frame_equal(result, expected) - - def test_on_specialized_type_by_int(self): - # GH13936 - for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, - np.int8, np.int16, np.int32, np.int64, - np.float16, np.float32, np.float64]: - df1 = pd.DataFrame({ - 'value': [5, 2, 25, 100, 78, 120, 79], - 'key': [1, 2, 3, 2, 3, 1, 2], - 'symbol': list("ABCDEFG")}, - columns=['symbol', 'key', 'value']) - df1.value = dtype(df1.value) - - df2 = pd.DataFrame({ - 'value': [0, 80, 120, 125], - 'key': [1, 2, 2, 3], - 'result': list('xyzw')}, - columns=['value', 'key', 'result']) - df2.value = dtype(df2.value) - - df1 = df1.sort_values('value').reset_index(drop=True) - - if dtype == np.float16: - with pytest.raises(MergeError): - pd.merge_asof(df1, df2, on='value', by='key') - else: - result = pd.merge_asof(df1, df2, on='value', by='key') - - expected = pd.DataFrame({ - 'symbol': list("BACEGDF"), - 'key': [2, 1, 3, 3, 2, 2, 1], - 'value': [2, 5, 25, 78, 79, 100, 120], - 'result': [np.nan, 'x', np.nan, np.nan, np.nan, 'y', 'x']}, - columns=['symbol', 'key', 'value', 'result']) - expected.value = dtype(expected.value) - - assert_frame_equal(result, expected) + def test_on_specialized_type(self, any_real_dtype): + # see gh-13936 + dtype = np.dtype(any_real_dtype).type + + df1 = pd.DataFrame({ + "value": [5, 2, 25, 100, 78, 120, 79], + "symbol": list("ABCDEFG")}, + columns=["symbol", "value"]) + df1.value = dtype(df1.value) + + df2 = pd.DataFrame({ + "value": [0, 80, 120, 125], + "result": list("xyzw")}, + columns=["value", "result"]) + df2.value = dtype(df2.value) + + df1 = df1.sort_values("value").reset_index(drop=True) + result = pd.merge_asof(df1, df2, on="value") + + expected = pd.DataFrame( + {"symbol": list("BACEGDF"), + "value": [2, 5, 25, 78, 79, 100, 120], + "result": list("xxxxxyz") + }, columns=["symbol", "value", "result"]) + expected.value = dtype(expected.value) + + assert_frame_equal(result, expected) + + def test_on_specialized_type_by_int(self, any_real_dtype): + # see gh-13936 + dtype = np.dtype(any_real_dtype).type + + df1 = pd.DataFrame({ + "value": [5, 2, 25, 100, 78, 120, 79], + "key": [1, 2, 3, 2, 3, 1, 2], + "symbol": list("ABCDEFG")}, + columns=["symbol", "key", "value"]) + df1.value = dtype(df1.value) + + df2 = pd.DataFrame({ + "value": [0, 80, 120, 125], + "key": [1, 2, 2, 3], + "result": list("xyzw")}, + columns=["value", "key", "result"]) + df2.value = dtype(df2.value) + + df1 = df1.sort_values("value").reset_index(drop=True) + result = pd.merge_asof(df1, df2, on="value", by="key") + + expected = pd.DataFrame({ + "symbol": list("BACEGDF"), + "key": [2, 1, 3, 3, 2, 2, 1], + "value": [2, 5, 25, 78, 79, 100, 120], + "result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"]}, + columns=["symbol", "key", "value", "result"]) + expected.value = dtype(expected.value) + + assert_frame_equal(result, expected) def test_on_float_by_int(self): # type specialize both "by" and "on" parameters diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index 5cfb9b1ff4292..bd54d5f57d12d 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -137,44 +137,63 @@ def test_get_set_boolean_different_order(test_data): assert_series_equal(sel, exp) +def test_where_unsafe_int(sint_dtype): + s = Series(np.arange(10), dtype=sint_dtype) + mask = s < 5 + + s[mask] = lrange(2, 7) + expected = Series(lrange(2, 7) + lrange(5, 10), dtype=sint_dtype) + + assert_series_equal(s, expected) + + +def test_where_unsafe_float(float_dtype): + s = Series(np.arange(10), dtype=float_dtype) + mask = s < 5 + + s[mask] = lrange(2, 7) + expected = Series(lrange(2, 7) + lrange(5, 10), dtype=float_dtype) + + assert_series_equal(s, expected) + + +@pytest.mark.parametrize("dtype", [np.int64, np.float64]) +def test_where_unsafe_upcast(dtype): + s = Series(np.arange(10), dtype=dtype) + values = [2.5, 3.5, 4.5, 5.5, 6.5] + + mask = s < 5 + expected = Series(values + lrange(5, 10), dtype="float64") + + s[mask] = values + assert_series_equal(s, expected) + + +@pytest.mark.parametrize("dtype", [ + np.int8, np.int16, np.int32, np.float32 +]) +def test_where_unsafe_itemsize_fail(dtype): + # Can't do these, as we are forced to change the + # item size of the input to something we cannot. + s = Series(np.arange(10), dtype=dtype) + mask = s < 5 + + values = [2.5, 3.5, 4.5, 5.5, 6.5] + pytest.raises(Exception, s.__setitem__, tuple(mask), values) + + def test_where_unsafe(): - # unsafe dtype changes - for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, - np.float32, np.float64]: - s = Series(np.arange(10), dtype=dtype) - mask = s < 5 - s[mask] = lrange(2, 7) - expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype) - assert_series_equal(s, expected) - assert s.dtype == expected.dtype - - # these are allowed operations, but are upcasted - for dtype in [np.int64, np.float64]: - s = Series(np.arange(10), dtype=dtype) - mask = s < 5 - values = [2.5, 3.5, 4.5, 5.5, 6.5] - s[mask] = values - expected = Series(values + lrange(5, 10), dtype='float64') - assert_series_equal(s, expected) - assert s.dtype == expected.dtype - - # GH 9731 - s = Series(np.arange(10), dtype='int64') - mask = s > 5 + # see gh-9731 + s = Series(np.arange(10), dtype="int64") values = [2.5, 3.5, 4.5, 5.5] + + mask = s > 5 + expected = Series(lrange(6) + values, dtype="float64") + s[mask] = values - expected = Series(lrange(6) + values, dtype='float64') assert_series_equal(s, expected) - # can't do these as we are forced to change the itemsize of the input - # to something we cannot - for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]: - s = Series(np.arange(10), dtype=dtype) - mask = s < 5 - values = [2.5, 3.5, 4.5, 5.5, 6.5] - pytest.raises(Exception, s.__setitem__, tuple(mask), values) - - # GH3235 + # see gh-3235 s = Series(np.arange(10), dtype='int64') mask = s < 5 s[mask] = lrange(2, 7)
Add parameterization to the following tests: * `frame/test_dtypes.py` * `series/indexing/test_boolean.py` * `reshape/merge/test_merge_asof.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/21873
2018-07-12T05:20:06Z
2018-07-13T04:55:27Z
2018-07-13T04:55:27Z
2018-07-13T04:55:55Z
[REF] Move comparison methods to EAMixins, share code
diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index 5cfa51dc8a0be..3573a561945d2 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -50,7 +50,7 @@ PANDAS_INLINE PyObject* char_to_string(const char* data) { void set_array_not_contiguous(PyArrayObject* ao) { - ao->flags &= ~(NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS); + ao->flags &= ~(NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS); } #endif // PANDAS__LIBS_SRC_NUMPY_HELPER_H_ diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 0ec5d25beeeb9..e4350ee8ded53 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1859,21 +1859,40 @@ cdef int64_t _ordinal_from_fields(year, month, quarter, day, hour, minute, second, freq): base, mult = get_freq_code(freq) if quarter is not None: - year, month = _quarter_to_myear(year, quarter, freq) + year, month = quarter_to_myear(year, quarter, freq) return period_ordinal(year, month, day, hour, minute, second, 0, 0, base) -def _quarter_to_myear(year, quarter, freq): - if quarter is not None: - if quarter <= 0 or quarter > 4: - raise ValueError('Quarter must be 1 <= q <= 4') +def quarter_to_myear(int year, int quarter, freq): + """ + A quarterly frequency defines a "year" which may not coincide with + the calendar-year. Find the calendar-year and calendar-month associated + with the given year and quarter under the `freq`-derived calendar. + + Parameters + ---------- + year : int + quarter : int + freq : DateOffset + + Returns + ------- + year : int + month : int + + See Also + -------- + Period.qyear + """ + if quarter <= 0 or quarter > 4: + raise ValueError('Quarter must be 1 <= q <= 4') - mnum = MONTH_NUMBERS[get_rule_month(freq)] + 1 - month = (mnum + (quarter - 1) * 3) % 12 + 1 - if month > mnum: - year -= 1 + mnum = MONTH_NUMBERS[get_rule_month(freq)] + 1 + month = (mnum + (quarter - 1) * 3) % 12 + 1 + if month > mnum: + year -= 1 return year, month diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index 1b8a43d4293a5..6ccbb872bf50e 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -3,4 +3,4 @@ from .categorical import Categorical # noqa from .datetimes import DatetimeArrayMixin # noqa from .period import PeriodArrayMixin # noqa -from .timedelta import TimedeltaArrayMixin # noqa +from .timedeltas import TimedeltaArrayMixin # noqa diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 65f34b847f8d0..ec430e4bf17b1 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -10,19 +10,53 @@ DIFFERENT_FREQ_INDEX, IncompatibleFrequency) from pandas.errors import NullFrequencyError, PerformanceWarning +from pandas import compat from pandas.tseries import frequencies from pandas.tseries.offsets import Tick from pandas.core.dtypes.common import ( + needs_i8_conversion, + is_list_like, + is_bool_dtype, is_period_dtype, is_timedelta64_dtype, is_object_dtype) +from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame, ABCIndexClass import pandas.core.common as com from pandas.core.algorithms import checked_add_with_arr +def _make_comparison_op(op, cls): + # TODO: share code with indexes.base version? Main difference is that + # the block for MultiIndex was removed here. + def cmp_method(self, other): + if isinstance(other, ABCDataFrame): + return NotImplemented + + if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)): + if other.ndim > 0 and len(self) != len(other): + raise ValueError('Lengths must match to compare') + + if needs_i8_conversion(self) and needs_i8_conversion(other): + # we may need to directly compare underlying + # representations + return self._evaluate_compare(other, op) + + # numpy will show a DeprecationWarning on invalid elementwise + # comparisons, this will raise in the future + with warnings.catch_warnings(record=True): + with np.errstate(all='ignore'): + result = op(self.values, np.asarray(other)) + + return result + + name = '__{name}__'.format(name=op.__name__) + # TODO: docstring? + return compat.set_function_name(cmp_method, name, cls) + + class AttributesMixin(object): @property @@ -435,3 +469,85 @@ def _addsub_offset_array(self, other, op): if not is_period_dtype(self): kwargs['freq'] = 'infer' return type(self)(res_values, **kwargs) + + # -------------------------------------------------------------- + # Comparison Methods + + def _evaluate_compare(self, other, op): + """ + We have been called because a comparison between + 8 aware arrays. numpy >= 1.11 will + now warn about NaT comparisons + """ + # Called by comparison methods when comparing datetimelike + # with datetimelike + + if not isinstance(other, type(self)): + # coerce to a similar object + if not is_list_like(other): + # scalar + other = [other] + elif lib.is_scalar(lib.item_from_zerodim(other)): + # ndarray scalar + other = [other.item()] + other = type(self)(other) + + # compare + result = op(self.asi8, other.asi8) + + # technically we could support bool dtyped Index + # for now just return the indexing array directly + mask = (self._isnan) | (other._isnan) + + filler = iNaT + if is_bool_dtype(result): + filler = False + + result[mask] = filler + return result + + # TODO: get this from ExtensionOpsMixin + @classmethod + def _add_comparison_methods(cls): + """ add in comparison methods """ + # DatetimeArray and TimedeltaArray comparison methods will + # call these as their super(...) methods + cls.__eq__ = _make_comparison_op(operator.eq, cls) + cls.__ne__ = _make_comparison_op(operator.ne, cls) + cls.__lt__ = _make_comparison_op(operator.lt, cls) + cls.__gt__ = _make_comparison_op(operator.gt, cls) + cls.__le__ = _make_comparison_op(operator.le, cls) + cls.__ge__ = _make_comparison_op(operator.ge, cls) + + +DatetimeLikeArrayMixin._add_comparison_methods() + + +# ------------------------------------------------------------------- +# Shared Constructor Helpers + +def validate_periods(periods): + """ + If a `periods` argument is passed to the Datetime/Timedelta Array/Index + constructor, cast it to an integer. + + Parameters + ---------- + periods : None, float, int + + Returns + ------- + periods : None or int + + Raises + ------ + TypeError + if periods is None, float, or int + """ + if periods is not None: + if lib.is_float(periods): + periods = int(periods) + elif not lib.is_integer(periods): + raise TypeError('periods must be a number, got {periods}' + .format(periods=periods)) + return periods diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d7dfa73c53d8d..5835090e25de1 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -13,21 +13,37 @@ from pandas.util._decorators import cache_readonly from pandas.errors import PerformanceWarning +from pandas import compat from pandas.core.dtypes.common import ( _NS_DTYPE, + is_datetimelike, is_datetime64tz_dtype, is_datetime64_dtype, is_timedelta64_dtype, _ensure_int64) from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries +import pandas.core.common as com from pandas.core.algorithms import checked_add_with_arr from pandas.tseries.frequencies import to_offset, DateOffset from pandas.tseries.offsets import Tick -from .datetimelike import DatetimeLikeArrayMixin +from pandas.core.arrays import datetimelike as dtl + + +def _to_m8(key, tz=None): + """ + Timestamp-like => dt64 + """ + if not isinstance(key, Timestamp): + # this also converts strings + key = Timestamp(key, tz=tz) + + return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE) def _field_accessor(name, field, docstring=None): @@ -68,7 +84,58 @@ def f(self): return property(f) -class DatetimeArrayMixin(DatetimeLikeArrayMixin): +def _dt_array_cmp(opname, cls): + """ + Wrap comparison operations to convert datetime-like to datetime64 + """ + nat_result = True if opname == '__ne__' else False + + def wrapper(self, other): + meth = getattr(dtl.DatetimeLikeArrayMixin, opname) + + if isinstance(other, (datetime, np.datetime64, compat.string_types)): + if isinstance(other, datetime): + # GH#18435 strings get a pass from tzawareness compat + self._assert_tzawareness_compat(other) + + other = _to_m8(other, tz=self.tz) + result = meth(self, other) + if isna(other): + result.fill(nat_result) + else: + if isinstance(other, list): + other = type(self)(other) + elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)): + # Following Timestamp convention, __eq__ is all-False + # and __ne__ is all True, others raise TypeError. + if opname == '__eq__': + return np.zeros(shape=self.shape, dtype=bool) + elif opname == '__ne__': + return np.ones(shape=self.shape, dtype=bool) + raise TypeError('%s type object %s' % + (type(other), str(other))) + + if is_datetimelike(other): + self._assert_tzawareness_compat(other) + + result = meth(self, np.asarray(other)) + result = com._values_from_object(result) + + # Make sure to pass an array to result[...]; indexing with + # Series breaks with older version of numpy + o_mask = np.array(isna(other)) + if o_mask.any(): + result[o_mask] = nat_result + + if self.hasnans: + result[self._isnan] = nat_result + + return result + + return compat.set_function_name(wrapper, opname, cls) + + +class DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin): """ Assumes that subclass __new__/__init__ defines: tz @@ -222,6 +289,18 @@ def __iter__(self): # ----------------------------------------------------------------- # Comparison Methods + @classmethod + def _add_comparison_methods(cls): + """add in comparison methods""" + cls.__eq__ = _dt_array_cmp('__eq__', cls) + cls.__ne__ = _dt_array_cmp('__ne__', cls) + cls.__lt__ = _dt_array_cmp('__lt__', cls) + cls.__gt__ = _dt_array_cmp('__gt__', cls) + cls.__le__ = _dt_array_cmp('__le__', cls) + cls.__ge__ = _dt_array_cmp('__ge__', cls) + # TODO: Some classes pass __eq__ while others pass operator.eq; + # standardize this. + def _has_same_tz(self, other): zzone = self._timezone @@ -335,7 +414,7 @@ def _add_delta(self, delta): The result's name is set outside of _add_delta by the calling method (__add__ or __sub__) """ - from pandas.core.arrays.timedelta import TimedeltaArrayMixin + from pandas.core.arrays.timedeltas import TimedeltaArrayMixin if isinstance(delta, (Tick, timedelta, np.timedelta64)): new_values = self._add_delta_td(delta) @@ -1021,3 +1100,6 @@ def to_julian_date(self): self.microsecond / 3600.0 / 1e+6 + self.nanosecond / 3600.0 / 1e+9 ) / 24.0) + + +DatetimeArrayMixin._add_comparison_methods() diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 000775361061e..66b1fb8db25c0 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -8,7 +8,7 @@ from pandas._libs.tslib import NaT, iNaT from pandas._libs.tslibs.period import ( Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX, - get_period_field_arr, period_asfreq_arr, _quarter_to_myear) + get_period_field_arr, period_asfreq_arr) from pandas._libs.tslibs import period as libperiod from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas._libs.tslibs.fields import isleapyear_arr @@ -26,7 +26,7 @@ from pandas.tseries import frequencies from pandas.tseries.offsets import Tick, DateOffset -from .datetimelike import DatetimeLikeArrayMixin +from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin def _field_accessor(name, alias, docstring=None): @@ -466,7 +466,7 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None, year, quarter = _make_field_arrays(year, quarter) for y, q in compat.zip(year, quarter): - y, m = _quarter_to_myear(y, q, freq) + y, m = libperiod.quarter_to_myear(y, q, freq) val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) ordinals.append(val) else: diff --git a/pandas/core/arrays/timedelta.py b/pandas/core/arrays/timedeltas.py similarity index 81% rename from pandas/core/arrays/timedelta.py rename to pandas/core/arrays/timedeltas.py index dbd481aae4f37..f027b84506164 100644 --- a/pandas/core/arrays/timedelta.py +++ b/pandas/core/arrays/timedeltas.py @@ -3,7 +3,7 @@ import numpy as np -from pandas._libs import tslibs, lib +from pandas._libs import tslibs from pandas._libs.tslibs import Timedelta, NaT from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64 @@ -11,7 +11,7 @@ from pandas import compat from pandas.core.dtypes.common import ( - _TD_DTYPE, _ensure_int64, is_timedelta64_dtype) + _TD_DTYPE, _ensure_int64, is_timedelta64_dtype, is_list_like) from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna @@ -20,7 +20,19 @@ from pandas.tseries.offsets import Tick, DateOffset from pandas.tseries.frequencies import to_offset -from .datetimelike import DatetimeLikeArrayMixin +from . import datetimelike as dtl + + +def _to_m8(key): + """ + Timedelta-like => dt64 + """ + if not isinstance(key, Timedelta): + # this also converts strings + key = Timedelta(key) + + # return an type that can be compared + return np.int64(key.value).view(_TD_DTYPE) def _is_convertible_to_td(key): @@ -42,7 +54,47 @@ def f(self): return property(f) -class TimedeltaArrayMixin(DatetimeLikeArrayMixin): +def _td_array_cmp(opname, cls): + """ + Wrap comparison operations to convert timedelta-like to timedelta64 + """ + nat_result = True if opname == '__ne__' else False + + def wrapper(self, other): + msg = "cannot compare a {cls} with type {typ}" + meth = getattr(dtl.DatetimeLikeArrayMixin, opname) + if _is_convertible_to_td(other) or other is NaT: + try: + other = _to_m8(other) + except ValueError: + # failed to parse as timedelta + raise TypeError(msg.format(cls=type(self).__name__, + typ=type(other).__name__)) + result = meth(self, other) + if isna(other): + result.fill(nat_result) + + elif not is_list_like(other): + raise TypeError(msg.format(cls=type(self).__name__, + typ=type(other).__name__)) + else: + other = type(self)(other).values + result = meth(self, other) + result = com._values_from_object(result) + + o_mask = np.array(isna(other)) + if o_mask.any(): + result[o_mask] = nat_result + + if self.hasnans: + result[self._isnan] = nat_result + + return result + + return compat.set_function_name(wrapper, opname, cls) + + +class TimedeltaArrayMixin(dtl.DatetimeLikeArrayMixin): @property def _box_func(self): return lambda x: Timedelta(x, unit='ns') @@ -78,20 +130,15 @@ def __new__(cls, values, freq=None, start=None, end=None, periods=None, freq != 'infer'): freq = to_offset(freq) - if periods is not None: - if lib.is_float(periods): - periods = int(periods) - elif not lib.is_integer(periods): - raise TypeError('`periods` must be a number, got {periods}' - .format(periods=periods)) + periods = dtl.validate_periods(periods) if values is None: if freq is None and com._any_none(periods, start, end): raise ValueError('Must provide freq argument if no data is ' 'supplied') else: - return cls._generate(start, end, periods, freq, - closed=closed) + return cls._generate_range(start, end, periods, freq, + closed=closed) result = cls._simple_new(values, freq=freq) if freq == 'infer': @@ -102,7 +149,7 @@ def __new__(cls, values, freq=None, start=None, end=None, periods=None, return result @classmethod - def _generate(cls, start, end, periods, freq, closed=None, **kwargs): + def _generate_range(cls, start, end, periods, freq, closed=None, **kwargs): # **kwargs are for compat with TimedeltaIndex, which includes `name` if com._count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' @@ -219,6 +266,19 @@ def _evaluate_with_timedelta_like(self, other, op): return NotImplemented + # ---------------------------------------------------------------- + # Comparison Methods + + @classmethod + def _add_comparison_methods(cls): + """add in comparison methods""" + cls.__eq__ = _td_array_cmp('__eq__', cls) + cls.__ne__ = _td_array_cmp('__ne__', cls) + cls.__lt__ = _td_array_cmp('__lt__', cls) + cls.__gt__ = _td_array_cmp('__gt__', cls) + cls.__le__ = _td_array_cmp('__le__', cls) + cls.__ge__ = _td_array_cmp('__ge__', cls) + # ---------------------------------------------------------------- # Conversion Methods - Vectorized analogues of Timedelta methods @@ -332,6 +392,9 @@ def f(x): return result +TimedeltaArrayMixin._add_comparison_methods() + + # --------------------------------------------------------------------- # Constructor Helpers diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index a0456630c9a0f..ed416c3ef857d 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -285,7 +285,9 @@ def is_list_like(obj): """ return (isinstance(obj, Iterable) and + # we do not count strings/unicode/bytes as list-like not isinstance(obj, string_and_binary_types) and + # exclude zero-dimensional numpy arrays, effectively scalars not (isinstance(obj, np.ndarray) and obj.ndim == 0)) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 78fa6f8217157..419e543ae8044 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -45,7 +45,6 @@ is_datetime64tz_dtype, is_timedelta64_dtype, is_hashable, - needs_i8_conversion, is_iterator, is_list_like, is_scalar) @@ -87,11 +86,6 @@ def cmp_method(self, other): if other.ndim > 0 and len(self) != len(other): raise ValueError('Lengths must match to compare') - # we may need to directly compare underlying - # representations - if needs_i8_conversion(self) and needs_i8_conversion(other): - return self._evaluate_compare(other, op) - from .multi import MultiIndex if is_object_dtype(self) and not isinstance(self, MultiIndex): # don't pass MultiIndex @@ -4628,9 +4622,6 @@ def _evaluate_with_timedelta_like(self, other, op): def _evaluate_with_datetime_like(self, other, op): raise TypeError("can only perform ops with datetime like values") - def _evaluate_compare(self, other, op): - raise com.AbstractMethodError(self) - @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 37e20496aafce..3f0bdf18f7230 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -223,7 +223,7 @@ def _validate_frequency(cls, index, freq, **kwargs): if index.empty or inferred == freq.freqstr: return None - on_freq = cls._generate( + on_freq = cls._generate_range( index[0], None, len(index), None, freq, **kwargs) if not np.array_equal(index.asi8, on_freq.asi8): msg = ('Inferred frequency {infer} from passed values does not ' @@ -290,34 +290,11 @@ def wrapper(left, right): return wrapper + @Appender(DatetimeLikeArrayMixin._evaluate_compare.__doc__) def _evaluate_compare(self, other, op): - """ - We have been called because a comparison between - 8 aware arrays. numpy >= 1.11 will - now warn about NaT comparisons - """ - - # coerce to a similar object - if not isinstance(other, type(self)): - if not is_list_like(other): - # scalar - other = [other] - elif is_scalar(lib.item_from_zerodim(other)): - # ndarray scalar - other = [other.item()] - other = type(self)(other) - - # compare - result = op(self.asi8, other.asi8) - - # technically we could support bool dtyped Index - # for now just return the indexing array directly - mask = (self._isnan) | (other._isnan) + result = DatetimeLikeArrayMixin._evaluate_compare(self, other, op) if is_bool_dtype(result): - result[mask] = False return result - - result[mask] = iNaT try: return Index(result) except TypeError: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 4931610e652b6..4732178d552be 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -18,7 +18,7 @@ is_integer, is_float, is_integer_dtype, - is_datetime64_ns_dtype, is_datetimelike, + is_datetime64_ns_dtype, is_period_dtype, is_bool_dtype, is_string_like, @@ -31,7 +31,8 @@ from pandas.core.dtypes.missing import isna import pandas.core.dtypes.concat as _concat -from pandas.core.arrays.datetimes import DatetimeArrayMixin +from pandas.core.arrays.datetimes import DatetimeArrayMixin, _to_m8 +from pandas.core.arrays import datetimelike as dtl from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.numeric import Int64Index, Float64Index @@ -87,49 +88,8 @@ def _dt_index_cmp(opname, cls): """ Wrap comparison operations to convert datetime-like to datetime64 """ - nat_result = True if opname == '__ne__' else False - def wrapper(self, other): - func = getattr(super(DatetimeIndex, self), opname) - - if isinstance(other, (datetime, np.datetime64, compat.string_types)): - if isinstance(other, datetime): - # GH#18435 strings get a pass from tzawareness compat - self._assert_tzawareness_compat(other) - - other = _to_m8(other, tz=self.tz) - result = func(other) - if isna(other): - result.fill(nat_result) - else: - if isinstance(other, list): - other = DatetimeIndex(other) - elif not isinstance(other, (np.ndarray, Index, ABCSeries)): - # Following Timestamp convention, __eq__ is all-False - # and __ne__ is all True, others raise TypeError. - if opname == '__eq__': - return np.zeros(shape=self.shape, dtype=bool) - elif opname == '__ne__': - return np.ones(shape=self.shape, dtype=bool) - raise TypeError('%s type object %s' % - (type(other), str(other))) - - if is_datetimelike(other): - self._assert_tzawareness_compat(other) - - result = func(np.asarray(other)) - result = com._values_from_object(result) - - # Make sure to pass an array to result[...]; indexing with - # Series breaks with older version of numpy - o_mask = np.array(isna(other)) - if o_mask.any(): - result[o_mask] = nat_result - - if self.hasnans: - result[self._isnan] = nat_result - - # support of bool dtype indexers + result = getattr(DatetimeArrayMixin, opname)(self, other) if is_bool_dtype(result): return result return Index(result) @@ -339,12 +299,7 @@ def __new__(cls, data=None, freq_infer = True freq = None - if periods is not None: - if is_float(periods): - periods = int(periods) - elif not is_integer(periods): - msg = 'periods must be a number, got {periods}' - raise TypeError(msg.format(periods=periods)) + periods = dtl.validate_periods(periods) # if dtype has an embedded tz, capture it if dtype is not None: @@ -364,9 +319,9 @@ def __new__(cls, data=None, msg = 'Must provide freq argument if no data is supplied' raise ValueError(msg) else: - return cls._generate(start, end, periods, name, freq, tz=tz, - normalize=normalize, closed=closed, - ambiguous=ambiguous) + return cls._generate_range(start, end, periods, name, freq, + tz=tz, normalize=normalize, + closed=closed, ambiguous=ambiguous) if not isinstance(data, (np.ndarray, Index, ABCSeries)): if is_scalar(data): @@ -438,8 +393,8 @@ def __new__(cls, data=None, return subarr._deepcopy_if_needed(ref_to_data, copy) @classmethod - def _generate(cls, start, end, periods, name, freq, - tz=None, normalize=False, ambiguous='raise', closed=None): + def _generate_range(cls, start, end, periods, name, freq, tz=None, + normalize=False, ambiguous='raise', closed=None): if com._count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' 'and freq, exactly three must be specified') @@ -521,7 +476,7 @@ def _generate(cls, start, end, periods, name, freq, index = cls._cached_range(start, end, periods=periods, freq=freq, name=name) else: - index = _generate_regular_range(start, end, periods, freq) + index = _generate_regular_range(cls, start, end, periods, freq) else: @@ -545,14 +500,15 @@ def _generate(cls, start, end, periods, name, freq, index = cls._cached_range(start, end, periods=periods, freq=freq, name=name) else: - index = _generate_regular_range(start, end, periods, freq) + index = _generate_regular_range(cls, start, end, + periods, freq) if tz is not None and getattr(index, 'tz', None) is None: arr = conversion.tz_localize_to_utc(_ensure_int64(index), tz, ambiguous=ambiguous) - index = DatetimeIndex(arr) + index = cls(arr) # index is localized datetime64 array -> have to convert # start/end as well to compare @@ -1764,7 +1720,7 @@ def to_julian_date(self): DatetimeIndex._add_datetimelike_methods() -def _generate_regular_range(start, end, periods, freq): +def _generate_regular_range(cls, start, end, periods, freq): if isinstance(freq, Tick): stride = freq.nanos if periods is None: @@ -1788,7 +1744,8 @@ def _generate_regular_range(start, end, periods, freq): "if a 'period' is given.") data = np.arange(b, e, stride, dtype=np.int64) - data = DatetimeIndex._simple_new(data.view(_NS_DTYPE), None, tz=tz) + # TODO: Do we need to use _simple_new here? just return data.view? + data = cls._simple_new(data.view(_NS_DTYPE), None, tz=tz) else: if isinstance(start, Timestamp): start = start.to_pydatetime() @@ -2088,17 +2045,6 @@ def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, closed=closed, **kwargs) -def _to_m8(key, tz=None): - """ - Timestamp-like => dt64 - """ - if not isinstance(key, Timestamp): - # this also converts strings - key = Timestamp(key, tz=tz) - - return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE) - - _CACHE_START = Timestamp(datetime(1950, 1, 1)) _CACHE_END = Timestamp(datetime(2030, 1, 1)) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index eb1171c45b1e5..1ed6145f01a44 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -15,8 +15,10 @@ from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries -from pandas.core.arrays.timedelta import ( - TimedeltaArrayMixin, _is_convertible_to_td) +from pandas.core.arrays.timedeltas import ( + TimedeltaArrayMixin, _is_convertible_to_td, _to_m8) +from pandas.core.arrays import datetimelike as dtl + from pandas.core.indexes.base import Index from pandas.core.indexes.numeric import Int64Index import pandas.compat as compat @@ -53,39 +55,10 @@ def _td_index_cmp(opname, cls): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ - nat_result = True if opname == '__ne__' else False - def wrapper(self, other): - msg = "cannot compare a {cls} with type {typ}" - func = getattr(super(TimedeltaIndex, self), opname) - if _is_convertible_to_td(other) or other is NaT: - try: - other = _to_m8(other) - except ValueError: - # failed to parse as timedelta - raise TypeError(msg.format(cls=type(self).__name__, - typ=type(other).__name__)) - result = func(other) - if isna(other): - result.fill(nat_result) - - elif not is_list_like(other): - raise TypeError(msg.format(cls=type(self).__name__, - typ=type(other).__name__)) - else: - other = TimedeltaIndex(other).values - result = func(other) - result = com._values_from_object(result) - - o_mask = np.array(isna(other)) - if o_mask.any(): - result[o_mask] = nat_result - - if self.hasnans: - result[self._isnan] = nat_result - - # support of bool dtype indexers + result = getattr(TimedeltaArrayMixin, opname)(self, other) if is_bool_dtype(result): + # support of bool dtype indexers return result return Index(result) @@ -218,20 +191,15 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, freq_infer = True freq = None - if periods is not None: - if is_float(periods): - periods = int(periods) - elif not is_integer(periods): - msg = 'periods must be a number, got {periods}' - raise TypeError(msg.format(periods=periods)) + periods = dtl.validate_periods(periods) if data is None: if freq is None and com._any_none(periods, start, end): msg = 'Must provide freq argument if no data is supplied' raise ValueError(msg) else: - return cls._generate(start, end, periods, name, freq, - closed=closed) + return cls._generate_range(start, end, periods, name, freq, + closed=closed) if unit is not None: data = to_timedelta(data, unit=unit, box=False) @@ -248,30 +216,28 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, elif copy: data = np.array(data, copy=True) + subarr = cls._simple_new(data, name=name, freq=freq) # check that we are matching freqs - if verify_integrity and len(data) > 0: + if verify_integrity and len(subarr) > 0: if freq is not None and not freq_infer: - index = cls._simple_new(data, name=name) - cls._validate_frequency(index, freq) - index.freq = freq - return index + cls._validate_frequency(subarr, freq) if freq_infer: - index = cls._simple_new(data, name=name) - inferred = index.inferred_freq + inferred = subarr.inferred_freq if inferred: - index.freq = to_offset(inferred) - return index + subarr.freq = to_offset(inferred) + return subarr - return cls._simple_new(data, name=name, freq=freq) + return subarr @classmethod - def _generate(cls, start, end, periods, name, freq, closed=None): + def _generate_range(cls, start, end, periods, name, freq, closed=None): # TimedeltaArray gets `name` via **kwargs, so we need to explicitly # override it if name is passed as a positional argument - return super(TimedeltaIndex, cls)._generate(start, end, - periods, freq, - name=name, closed=closed) + return super(TimedeltaIndex, cls)._generate_range(start, end, + periods, freq, + name=name, + closed=closed) @classmethod def _simple_new(cls, values, name=None, freq=None, **kwargs): @@ -797,18 +763,6 @@ def _is_convertible_to_index(other): return False -def _to_m8(key): - """ - Timedelta-like => dt64 - """ - if not isinstance(key, Timedelta): - # this also converts strings - key = Timedelta(key) - - # return an type that can be compared - return np.int64(key.value).view(_TD_DTYPE) - - def timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None): """ diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index d116b3bcff86a..69e802fbaa3f0 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -4,7 +4,7 @@ import pandas as pd from pandas.core.arrays.datetimes import DatetimeArrayMixin -from pandas.core.arrays.timedelta import TimedeltaArrayMixin +from pandas.core.arrays.timedeltas import TimedeltaArrayMixin from pandas.core.arrays.period import PeriodArrayMixin diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index d4ad2e4eeb2e6..387a70fe37253 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -208,8 +208,8 @@ def get_offset(name): raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name)) # cache _offset_map[name] = offset - # do not return cache because it's mutable - return _offset_map[name].copy() + + return _offset_map[name] getOffset = get_offset
Changes an old usage numpy's C API that is deprecated. This won't get rid of the warnings because cython hasn't changed it, but still. Also stops making copies of offsets since they are now immutable. Handles a handful of changes requested in the last pass: de-privatizes _quarter_to_myear (plus bonus docstring), renames _generate --> _generate_range, comments in is_list_like renames arrays.timedelta --> arrays.timedeltas to match core.indexes Implements comparison methods in DatetimeArray and TimedeltaArray, cleans up some Index code that is no longer needed as a result. Makes some progress on sharing code between TDI and DTI constructors (most of which we want to move up to the array classes)
https://api.github.com/repos/pandas-dev/pandas/pulls/21872
2018-07-12T03:17:44Z
2018-07-14T14:38:01Z
2018-07-14T14:38:01Z
2020-04-05T17:42:01Z
API: Add DataFrame.droplevel
diff --git a/doc/source/api.rst b/doc/source/api.rst index fff944651588e..9faac4c616477 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -444,6 +444,7 @@ Reindexing / Selection / Label manipulation Series.align Series.drop + Series.droplevel Series.drop_duplicates Series.duplicated Series.equals @@ -1063,6 +1064,7 @@ Reshaping, sorting, transposing .. autosummary:: :toctree: generated/ + DataFrame.droplevel DataFrame.pivot DataFrame.pivot_table DataFrame.reorder_levels diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 034a56b2ac0cb..d300c2b273906 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -74,6 +74,7 @@ Other Enhancements - :func:`Series.mode` and :func:`DataFrame.mode` now support the ``dropna`` parameter which can be used to specify whether NaN/NaT values should be considered (:issue:`17534`) - :func:`to_csv` now supports ``compression`` keyword when a file handle is passed. (:issue:`21227`) - :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with :class:`MultiIndex` (:issue:`21115`) +- :meth:`Series.droplevel` and :meth:`DataFrame.droplevel` are now implemented (:issue:`20342`) - Added support for reading from Google Cloud Storage via the ``gcsfs`` library (:issue:`19454`) - :func:`to_gbq` and :func:`read_gbq` signature and documentation updated to reflect changes from the `Pandas-GBQ library version 0.5.0 diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8da678e0adec0..608eebd079eef 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -716,6 +716,66 @@ def swapaxes(self, axis1, axis2, copy=True): return self._constructor(new_values, *new_axes).__finalize__(self) + def droplevel(self, level, axis=0): + """Return DataFrame with requested index / column level(s) removed. + + .. versionadded:: 0.24.0 + + Parameters + ---------- + level : int, str, or list-like + If a string is given, must be the name of a level + If list-like, elements must be names or positional indexes + of levels. + + axis : {0 or 'index', 1 or 'columns'}, default 0 + + + Returns + ------- + DataFrame.droplevel() + + Examples + -------- + >>> df = pd.DataFrame([ + ...: [1, 2, 3, 4], + ...: [5, 6, 7, 8], + ...: [9, 10, 11, 12] + ...: ]).set_index([0, 1]).rename_axis(['a', 'b']) + + >>> df.columns = pd.MultiIndex.from_tuples([ + ...: ('c', 'e'), ('d', 'f') + ...:], names=['level_1', 'level_2']) + + >>> df + level_1 c d + level_2 e f + a b + 1 2 3 4 + 5 6 7 8 + 9 10 11 12 + + >>> df.droplevel('a') + level_1 c d + level_2 e f + b + 2 3 4 + 6 7 8 + 10 11 12 + + >>> df.droplevel('level2', axis=1) + level_1 c d + a b + 1 2 3 4 + 5 6 7 8 + 9 10 11 12 + + """ + labels = self._get_axis(axis) + new_labels = labels.droplevel(level) + result = self.set_axis(new_labels, axis=axis, inplace=False) + return result + def pop(self, item): """ Return item and drop from frame. Raise KeyError if not found. diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 21961906c39bb..4f95eb3fe7b47 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -1056,6 +1056,28 @@ def test_reindex_signature(self): "limit", "copy", "level", "method", "fill_value", "tolerance"} + def test_droplevel(self): + # GH20342 + df = pd.DataFrame([ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12] + ]) + df = df.set_index([0, 1]).rename_axis(['a', 'b']) + df.columns = pd.MultiIndex.from_tuples([('c', 'e'), ('d', 'f')], + names=['level_1', 'level_2']) + + # test that dropping of a level in index works + expected = df.reset_index('a', drop=True) + result = df.droplevel('a', axis='index') + assert_frame_equal(result, expected) + + # test that dropping of a level in columns works + expected = df.copy() + expected.columns = pd.Index(['c', 'd'], name='level_1') + result = df.droplevel('level_2', axis='columns') + assert_frame_equal(result, expected) + class TestIntervalIndex(object): diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 859082a7e722d..840c80d6775a5 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -295,3 +295,15 @@ def test_reset_index_drop_errors(self): s = pd.Series(range(4), index=pd.MultiIndex.from_product([[1, 2]] * 2)) with tm.assert_raises_regex(KeyError, 'not found'): s.reset_index('wrong', drop=True) + + def test_droplevel(self): + # GH20342 + ser = pd.Series([1, 2, 3, 4]) + ser.index = pd.MultiIndex.from_arrays([(1, 2, 3, 4), (5, 6, 7, 8)], + names=['a', 'b']) + expected = ser.reset_index('b', drop=True) + result = ser.droplevel('b', axis='index') + assert_series_equal(result, expected) + # test that droplevel raises ValueError on axis != 0 + with pytest.raises(ValueError): + ser.droplevel(1, axis='columns')
- [x] closes #20342 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21871
2018-07-12T01:50:50Z
2018-07-20T13:11:27Z
2018-07-20T13:11:27Z
2018-07-20T15:14:34Z
[CLN] De-privatize commonly-used functions
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 172117f7d8059..4cc119a700ca0 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -485,7 +485,7 @@ cpdef ndarray[object] astype_str(ndarray arr): def clean_index_list(list obj): """ - Utility used in pandas.core.index._ensure_index + Utility used in pandas.core.index.ensure_index """ cdef: Py_ssize_t i, n = len(obj) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6e49e8044ff25..78c9113ce60de 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -27,9 +27,9 @@ is_datetime64_any_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_datetimelike, is_interval_dtype, is_scalar, is_list_like, - _ensure_platform_int, _ensure_object, - _ensure_float64, _ensure_uint64, - _ensure_int64) + ensure_platform_int, ensure_object, + ensure_float64, ensure_uint64, + ensure_int64) from pandas.compat.numpy import _np_version_under1p10 from pandas.core.dtypes.missing import isna, na_value_for_dtype @@ -73,32 +73,32 @@ def _ensure_data(values, dtype=None): # we check some simple dtypes first try: if is_object_dtype(dtype): - return _ensure_object(np.asarray(values)), 'object', 'object' + return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): - return _ensure_int64(values), 'int64', 'int64' + return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): - return _ensure_uint64(values), 'uint64', 'uint64' + return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): - return _ensure_float64(values), 'float64', 'float64' + return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: - return _ensure_object(np.asarray(values)), 'object', 'object' + return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(record=True): - values = _ensure_float64(values) + values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here - return _ensure_object(values), 'object', 'object' + return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or @@ -129,13 +129,13 @@ def _ensure_data(values, dtype=None): # we are actually coercing to int64 # until our algos support int* directly (not all do) - values = _ensure_int64(values) + values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values) - return _ensure_object(values), 'object', 'object' + return ensure_object(values), 'object', 'object' def _reconstruct_data(values, dtype, original): @@ -475,7 +475,7 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None, labels = table.get_labels(values, uniques, 0, na_sentinel, na_value=na_value) - labels = _ensure_platform_int(labels) + labels = ensure_platform_int(labels) uniques = uniques.to_array() return labels, uniques @@ -1309,7 +1309,7 @@ def _take_nd_object(arr, indexer, out, axis, fill_value, mask_info): if arr.dtype != out.dtype: arr = arr.astype(out.dtype) if arr.shape[axis] > 0: - arr.take(_ensure_platform_int(indexer), axis=axis, out=out) + arr.take(ensure_platform_int(indexer), axis=axis, out=out) if needs_masking: outindexer = [slice(None)] * arr.ndim outindexer[axis] = mask @@ -1450,7 +1450,7 @@ def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None): return func def func(arr, indexer, out, fill_value=np.nan): - indexer = _ensure_int64(indexer) + indexer = ensure_int64(indexer) _take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info) @@ -1609,7 +1609,7 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, indexer = np.arange(arr.shape[axis], dtype=np.int64) dtype, fill_value = arr.dtype, arr.dtype.type() else: - indexer = _ensure_int64(indexer, copy=False) + indexer = ensure_int64(indexer, copy=False) if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() mask_info = None, False @@ -1687,11 +1687,11 @@ def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None, if row_idx is None: row_idx = np.arange(arr.shape[0], dtype=np.int64) else: - row_idx = _ensure_int64(row_idx) + row_idx = ensure_int64(row_idx) if col_idx is None: col_idx = np.arange(arr.shape[1], dtype=np.int64) else: - col_idx = _ensure_int64(col_idx) + col_idx = ensure_int64(col_idx) indexer = row_idx, col_idx if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 7a6253dffe235..973a8af76bb07 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -17,9 +17,9 @@ coerce_indexer_dtype) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.common import ( - _ensure_int64, - _ensure_object, - _ensure_platform_int, + ensure_int64, + ensure_object, + ensure_platform_int, is_extension_array_dtype, is_dtype_equal, is_datetimelike, @@ -1221,7 +1221,7 @@ def shift(self, periods): if codes.ndim > 1: raise NotImplementedError("Categorical with ndim > 1.") if np.prod(codes.shape) and (periods != 0): - codes = np.roll(codes, _ensure_platform_int(periods), axis=0) + codes = np.roll(codes, ensure_platform_int(periods), axis=0) if periods > 0: codes[:periods] = -1 else: @@ -2137,7 +2137,7 @@ def mode(self, dropna=True): if dropna: good = self._codes != -1 values = self._codes[good] - values = sorted(htable.mode_int64(_ensure_int64(values), dropna)) + values = sorted(htable.mode_int64(ensure_int64(values), dropna)) result = self._constructor(values=values, categories=self.categories, ordered=self.ordered, fastpath=True) return result @@ -2431,8 +2431,8 @@ def _get_codes_for_values(values, categories): from pandas.core.algorithms import _get_data_algo, _hashtables if not is_dtype_equal(values.dtype, categories.dtype): - values = _ensure_object(values) - categories = _ensure_object(categories) + values = ensure_object(values) + categories = ensure_object(categories) (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables) (_, _), cats = _get_data_algo(categories, _hashtables) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 5835090e25de1..c5e85cb5892f4 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -21,7 +21,7 @@ is_datetime64tz_dtype, is_datetime64_dtype, is_timedelta64_dtype, - _ensure_int64) + ensure_int64) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries @@ -167,7 +167,7 @@ def _simple_new(cls, values, freq=None, tz=None, **kwargs): values = np.array(values, copy=False) if not is_datetime64_dtype(values): - values = _ensure_int64(values).view(_NS_DTYPE) + values = ensure_int64(values).view(_NS_DTYPE) result = object.__new__(cls) result._data = values diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 4ad53e16bc439..c915b272aee8b 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -19,7 +19,7 @@ ABCSeries, ABCIntervalIndex, ABCInterval) from pandas.core.dtypes.missing import isna, notna -from pandas.core.indexes.base import Index, _ensure_index +from pandas.core.indexes.base import Index, ensure_index from pandas.util._decorators import Appender from pandas.util._doctools import _WritableDoc @@ -145,8 +145,8 @@ def _simple_new(cls, left, right, closed=None, result = IntervalMixin.__new__(cls) closed = closed or 'right' - left = _ensure_index(left, copy=copy) - right = _ensure_index(right, copy=copy) + left = ensure_index(left, copy=copy) + right = ensure_index(right, copy=copy) if dtype is not None: # GH 19262: dtype must be an IntervalDtype to override inferred diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index f027b84506164..a28f7fc9c32fa 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -11,7 +11,7 @@ from pandas import compat from pandas.core.dtypes.common import ( - _TD_DTYPE, _ensure_int64, is_timedelta64_dtype, is_list_like) + _TD_DTYPE, ensure_int64, is_timedelta64_dtype, is_list_like) from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna @@ -117,7 +117,7 @@ def _simple_new(cls, values, freq=None, **kwargs): # non-nano unit values = values.astype(_TD_DTYPE) else: - values = _ensure_int64(values).view(_TD_DTYPE) + values = ensure_int64(values).view(_TD_DTYPE) result = object.__new__(cls) result._data = values diff --git a/pandas/core/common.py b/pandas/core/common.py index 0a33873630d27..0ca776b6bfa77 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -13,7 +13,7 @@ from pandas import compat from pandas.compat import long, zip, iteritems, PY36, OrderedDict from pandas.core.config import get_option -from pandas.core.dtypes.generic import ABCSeries, ABCIndex +from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCIndexClass from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import _iterable_not_string from pandas.core.dtypes.missing import isna, isnull, notnull # noqa @@ -120,11 +120,6 @@ def is_bool_indexer(key): return False -def _default_index(n): - from pandas.core.index import RangeIndex - return RangeIndex(0, n, name=None) - - def _mut_exclusive(**kwargs): item1, item2 = kwargs.items() label1, val1 = item1 @@ -299,11 +294,10 @@ def intersection(*seqs): def _asarray_tuplesafe(values, dtype=None): - from pandas.core.index import Index if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): values = list(values) - elif isinstance(values, Index): + elif isinstance(values, ABCIndexClass): return values.values if isinstance(values, list) and dtype in [np.object_, object]: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 0bc6ad8499934..8675d3be06287 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -8,7 +8,7 @@ from pandas._libs import tslib, lib, tslibs from pandas._libs.tslibs import iNaT from pandas.compat import string_types, text_type, PY3 -from .common import (_ensure_object, is_bool, is_integer, is_float, +from .common import (ensure_object, is_bool, is_integer, is_float, is_complex, is_datetimetz, is_categorical_dtype, is_datetimelike, is_extension_type, @@ -25,8 +25,8 @@ is_bool_dtype, is_scalar, is_string_dtype, _string_dtypes, pandas_dtype, - _ensure_int8, _ensure_int16, - _ensure_int32, _ensure_int64, + ensure_int8, ensure_int16, + ensure_int32, ensure_int64, _NS_DTYPE, _TD_DTYPE, _INT64_DTYPE, _POSSIBLY_CAST_DTYPES) from .dtypes import (ExtensionDtype, PandasExtensionDtype, DatetimeTZDtype, @@ -85,7 +85,7 @@ def trans(x): if isinstance(dtype, string_types): if dtype == 'infer': - inferred_type = lib.infer_dtype(_ensure_object(result.ravel())) + inferred_type = lib.infer_dtype(ensure_object(result.ravel())) if inferred_type == 'boolean': dtype = 'bool' elif inferred_type == 'integer': @@ -602,12 +602,12 @@ def coerce_indexer_dtype(indexer, categories): """ coerce the indexer input array to the smallest dtype possible """ length = len(categories) if length < _int8_max: - return _ensure_int8(indexer) + return ensure_int8(indexer) elif length < _int16_max: - return _ensure_int16(indexer) + return ensure_int16(indexer) elif length < _int32_max: - return _ensure_int32(indexer) - return _ensure_int64(indexer) + return ensure_int32(indexer) + return ensure_int64(indexer) def coerce_to_dtypes(result, dtypes): @@ -948,7 +948,7 @@ def try_timedelta(v): except Exception: return v.reshape(shape) - inferred_type = lib.infer_datetimelike_array(_ensure_object(v)) + inferred_type = lib.infer_datetimelike_array(ensure_object(v)) if inferred_type == 'date' and convert_dates: value = try_datetime(v) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index ef4f36dc6df33..5a2f91d775fb2 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -32,14 +32,14 @@ # oh the troubles to reduce import time _is_scipy_sparse = None -_ensure_float64 = algos.ensure_float64 -_ensure_float32 = algos.ensure_float32 +ensure_float64 = algos.ensure_float64 +ensure_float32 = algos.ensure_float32 _ensure_datetime64ns = conversion.ensure_datetime64ns _ensure_timedelta64ns = conversion.ensure_timedelta64ns -def _ensure_float(arr): +def ensure_float(arr): """ Ensure that an array object has a float dtype if possible. @@ -59,16 +59,16 @@ def _ensure_float(arr): return arr -_ensure_uint64 = algos.ensure_uint64 -_ensure_int64 = algos.ensure_int64 -_ensure_int32 = algos.ensure_int32 -_ensure_int16 = algos.ensure_int16 -_ensure_int8 = algos.ensure_int8 -_ensure_platform_int = algos.ensure_platform_int -_ensure_object = algos.ensure_object +ensure_uint64 = algos.ensure_uint64 +ensure_int64 = algos.ensure_int64 +ensure_int32 = algos.ensure_int32 +ensure_int16 = algos.ensure_int16 +ensure_int8 = algos.ensure_int8 +ensure_platform_int = algos.ensure_platform_int +ensure_object = algos.ensure_object -def _ensure_categorical(arr): +def ensure_categorical(arr): """ Ensure that an array-like object is a Categorical (if not already). diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 7ef4a7674753e..66998aa6866f6 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -16,7 +16,7 @@ is_string_like_dtype, is_bool_dtype, is_integer_dtype, is_dtype_equal, is_extension_array_dtype, - needs_i8_conversion, _ensure_object, + needs_i8_conversion, ensure_object, pandas_dtype, is_scalar, is_object_dtype, @@ -413,7 +413,7 @@ def array_equivalent(left, right, strict_nan=False): if not strict_nan: # isna considers NaN and None to be equivalent. return lib.array_equivalent_object( - _ensure_object(left.ravel()), _ensure_object(right.ravel())) + ensure_object(left.ravel()), ensure_object(right.ravel())) for left_value, right_value in zip(left, right): if left_value is NaT and right_value is not NaT: @@ -470,7 +470,7 @@ def _infer_fill_value(val): if is_datetimelike(val): return np.array('NaT', dtype=val.dtype) elif is_object_dtype(val.dtype): - dtype = lib.infer_dtype(_ensure_object(val)) + dtype = lib.infer_dtype(ensure_object(val)) if dtype in ['datetime', 'datetime64']: return np.array('NaT', dtype=_NS_DTYPE) elif dtype in ['timedelta', 'timedelta64']: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6380944338010..4578d2ac08199 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -51,9 +51,9 @@ is_dtype_equal, needs_i8_conversion, _get_dtype_from_object, - _ensure_float64, - _ensure_int64, - _ensure_platform_int, + ensure_float64, + ensure_int64, + ensure_platform_int, is_list_like, is_nested_list_like, is_iterator, @@ -64,8 +64,8 @@ from pandas.core.generic import NDFrame, _shared_docs -from pandas.core.index import (Index, MultiIndex, _ensure_index, - _ensure_index_from_sequences) +from pandas.core.index import (Index, MultiIndex, ensure_index, + ensure_index_from_sequences) from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) from pandas.core.internals import (BlockManager, @@ -88,6 +88,7 @@ from pandas.core.indexes.period import PeriodIndex from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex +import pandas.core.indexes.base as ibase import pandas.core.common as com import pandas.core.nanops as nanops @@ -397,16 +398,16 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, if is_named_tuple(data[0]) and columns is None: columns = data[0]._fields arrays, columns = _to_arrays(data, columns, dtype=dtype) - columns = _ensure_index(columns) + columns = ensure_index(columns) # set the index if index is None: if isinstance(data[0], Series): index = _get_names_from_index(data) elif isinstance(data[0], Categorical): - index = com._default_index(len(data[0])) + index = ibase.default_index(len(data[0])) else: - index = com._default_index(len(data)) + index = ibase.default_index(len(data)) mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) @@ -450,7 +451,7 @@ def _init_dict(self, data, index, columns, dtype=None): # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: - index = _ensure_index(index) + index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): @@ -491,14 +492,14 @@ def _get_axes(N, K, index=index, columns=columns): # return axes or defaults if index is None: - index = com._default_index(N) + index = ibase.default_index(N) else: - index = _ensure_index(index) + index = ensure_index(index) if columns is None: - columns = com._default_index(K) + columns = ibase.default_index(K) else: - columns = _ensure_index(columns) + columns = ensure_index(columns) return index, columns # we could have a categorical type passed or coerced to 'category' @@ -1236,7 +1237,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, # Make a copy of the input columns so we can modify it if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) if is_iterator(data): if nrows == 0: @@ -1265,7 +1266,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, if isinstance(data, dict): if columns is None: - columns = arr_columns = _ensure_index(sorted(data)) + columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] @@ -1281,15 +1282,15 @@ def from_records(cls, data, index=None, exclude=None, columns=None, elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = _to_arrays(data, columns) if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) arr_columns = columns else: arrays, arr_columns = _to_arrays(data, columns, coerce_float=coerce_float) - arr_columns = _ensure_index(arr_columns) + arr_columns = ensure_index(arr_columns) if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) else: columns = arr_columns @@ -1312,8 +1313,8 @@ def from_records(cls, data, index=None, exclude=None, columns=None, try: to_remove = [arr_columns.get_loc(field) for field in index] index_data = [arrays[i] for i in to_remove] - result_index = _ensure_index_from_sequences(index_data, - names=index) + result_index = ensure_index_from_sequences(index_data, + names=index) exclude.update(index) except Exception: @@ -1480,18 +1481,18 @@ def from_items(cls, items, columns=None, orient='columns'): if orient == 'columns': if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) idict = dict(items) if len(idict) < len(items): - if not columns.equals(_ensure_index(keys)): + if not columns.equals(ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: - columns = _ensure_index(keys) + columns = ensure_index(keys) arrays = values # GH 17312 @@ -1508,7 +1509,7 @@ def from_items(cls, items, columns=None, orient='columns'): if columns is None: raise TypeError("Must pass columns with orient='index'") - keys = _ensure_index(keys) + keys = ensure_index(keys) # GH 17312 # Provide more informative error msg when scalar values passed @@ -4006,7 +4007,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False, to_remove.append(col) arrays.append(level) - index = _ensure_index_from_sequences(arrays, names) + index = ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: duplicates = index[index.duplicated()].unique() @@ -4188,7 +4189,7 @@ def _maybe_casted_values(index, labels=None): values, mask, np.nan) return values - new_index = com._default_index(len(new_obj)) + new_index = ibase.default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] @@ -4509,7 +4510,7 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False, keys.append(k) indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position) - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) else: from pandas.core.sorting import nargsort @@ -6749,14 +6750,14 @@ def corr(self, method='pearson', min_periods=1): mat = numeric_df.values if method == 'pearson': - correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods) + correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods) elif method == 'spearman': - correl = libalgos.nancorr_spearman(_ensure_float64(mat), + correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods) else: if min_periods is None: min_periods = 1 - mat = _ensure_float64(mat).T + mat = ensure_float64(mat).T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) @@ -6886,7 +6887,7 @@ def cov(self, min_periods=None): baseCov = np.cov(mat.T) baseCov = baseCov.reshape((len(cols), len(cols))) else: - baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True, + baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods) return self._constructor(baseCov, index=idx, columns=cols) @@ -7076,7 +7077,7 @@ def _count_level(self, level, axis=0, numeric_only=False): level = count_axis._get_level_number(level) level_index = count_axis.levels[level] - labels = _ensure_int64(count_axis.labels[level]) + labels = ensure_int64(count_axis.labels[level]) counts = lib.count_level_2d(mask, labels, len(level_index), axis=0) result = DataFrame(counts, index=level_index, columns=agg_axis) @@ -7608,7 +7609,7 @@ def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): arrays = _homogenize(arrays, index, dtype) # from BlockManager perspective - axes = [_ensure_index(columns), _ensure_index(index)] + axes = [ensure_index(columns), ensure_index(index)] return create_block_manager_from_arrays(arrays, arr_names, axes) @@ -7660,9 +7661,9 @@ def extract_index(data): (lengths[0], len(index))) raise ValueError(msg) else: - index = com._default_index(lengths[0]) + index = ibase.default_index(lengths[0]) - return _ensure_index(index) + return ensure_index(index) def _prep_ndarray(values, copy=True): @@ -7734,7 +7735,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None): dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: - columns = com._default_index(len(data)) + columns = ibase.default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, Series, Index)) and data.dtype.names is not None): @@ -7758,11 +7759,11 @@ def _masked_rec_array_to_mgr(data, index, columns, dtype, copy): if index is None: index = _get_names_from_index(fdata) if index is None: - index = com._default_index(len(data)) - index = _ensure_index(index) + index = ibase.default_index(len(data)) + index = ensure_index(index) if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) arrays, arr_columns = _to_arrays(fdata, columns) # fill if needed @@ -7790,8 +7791,8 @@ def _reorder_arrays(arrays, arr_columns, columns): # reorder according to the columns if (columns is not None and len(columns) and arr_columns is not None and len(arr_columns)): - indexer = _ensure_index(arr_columns).get_indexer(columns) - arr_columns = _ensure_index([arr_columns[i] for i in indexer]) + indexer = ensure_index(arr_columns).get_indexer(columns) + arr_columns = ensure_index([arr_columns[i] for i in indexer]) arrays = [arrays[i] for i in indexer] return arrays, arr_columns @@ -7818,7 +7819,7 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): for s in data: index = getattr(s, 'index', None) if index is None: - index = com._default_index(len(s)) + index = ibase.default_index(len(s)) if id(index) in indexer_cache: indexer = indexer_cache[id(index)] @@ -7855,7 +7856,7 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): def _convert_object_array(content, columns, coerce_float=False, dtype=None): if columns is None: - columns = com._default_index(len(content)) + columns = ibase.default_index(len(content)) else: if len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... @@ -7878,7 +7879,7 @@ def convert(arr): def _get_names_from_index(data): has_some_name = any(getattr(s, 'name', None) is not None for s in data) if not has_some_name: - return com._default_index(len(data)) + return ibase.default_index(len(data)) index = lrange(len(data)) count = 0 diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8da678e0adec0..7305da4f56506 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -12,8 +12,8 @@ from pandas._libs import tslib, properties from pandas.core.dtypes.common import ( - _ensure_int64, - _ensure_object, + ensure_int64, + ensure_object, is_scalar, is_number, is_integer, is_bool, @@ -35,7 +35,7 @@ from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame from pandas.core.base import PandasObject, SelectionMixin -from pandas.core.index import (Index, MultiIndex, _ensure_index, +from pandas.core.index import (Index, MultiIndex, ensure_index, InvalidIndexError, RangeIndex) import pandas.core.indexing as indexing from pandas.core.indexes.datetimes import DatetimeIndex @@ -3235,7 +3235,7 @@ def _drop_axis(self, labels, axis, level=None, errors='raise'): # Case for non-unique axis else: - labels = _ensure_object(com._index_labels_to_array(labels)) + labels = ensure_object(com._index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') @@ -3889,9 +3889,9 @@ def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False, if index is None: continue - index = _ensure_index(index) + index = ensure_index(index) if indexer is not None: - indexer = _ensure_int64(indexer) + indexer = ensure_int64(indexer) # TODO: speed up on homogeneous DataFrame objects new_data = new_data.reindex_indexer(index, indexer, axis=baxis, diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 78631bfae9e01..169416d6f8211 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -18,6 +18,7 @@ from pandas.util._decorators import Substitution, Appender from pandas import compat +import pandas.core.indexes.base as ibase import pandas.core.common as com from pandas.core.panel import Panel from pandas.compat import lzip, map @@ -35,8 +36,8 @@ is_numeric_dtype, is_integer_dtype, is_interval_dtype, - _ensure_platform_int, - _ensure_int64) + ensure_platform_int, + ensure_int64) from pandas.core.dtypes.missing import isna, notna import pandas.core.algorithms as algorithms from pandas.core.frame import DataFrame @@ -1165,7 +1166,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, verify_integrity=False) if is_integer_dtype(out): - out = _ensure_int64(out) + out = ensure_int64(out) return Series(out, index=mi, name=self._selection_name) # for compat. with libgroupby.value_counts need to ensure every @@ -1196,7 +1197,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, verify_integrity=False) if is_integer_dtype(out): - out = _ensure_int64(out) + out = ensure_int64(out) return Series(out, index=mi, name=self._selection_name) def count(self): @@ -1205,7 +1206,7 @@ def count(self): val = self.obj.get_values() mask = (ids != -1) & ~isna(val) - ids = _ensure_platform_int(ids) + ids = ensure_platform_int(ids) out = np.bincount(ids[mask], minlength=ngroups or 0) return Series(out, @@ -1567,7 +1568,7 @@ def groupby_series(obj, col=None): results = concat(results, axis=1) if not self.as_index: - results.index = com._default_index(len(results)) + results.index = ibase.default_index(len(results)) return results boxplot = boxplot_frame_groupby diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ff2ed6970ee76..cb045b08f3629 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -28,7 +28,7 @@ class providing the base-class of operations. from pandas.core.dtypes.common import ( is_numeric_dtype, is_scalar, - _ensure_float) + ensure_float) from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.missing import isna, notna @@ -842,7 +842,7 @@ def _python_agg_general(self, func, *args, **kwargs): # since we are masking, make sure that we have a float object values = result if is_numeric_dtype(values.dtype): - values = _ensure_float(values) + values = ensure_float(values) output[name] = self._try_cast(values[mask], result) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index d5c4c2946a632..a1511b726c705 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -16,7 +16,7 @@ from pandas.core.index import ( Index, MultiIndex, CategoricalIndex) from pandas.core.dtypes.common import ( - _ensure_categorical, + ensure_categorical, is_hashable, is_list_like, is_timedelta64_dtype, @@ -360,7 +360,7 @@ def indices(self): if isinstance(self.grouper, BaseGrouper): return self.grouper.indices - values = _ensure_categorical(self.grouper) + values = ensure_categorical(self.grouper) return values._reverse_indexer() @property diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 65b9144c0ddc9..f2c55a56b119d 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -18,12 +18,12 @@ from pandas.core.base import SelectionMixin from pandas.core.dtypes.missing import isna, _maybe_fill from pandas.core.index import ( - Index, MultiIndex, _ensure_index) + Index, MultiIndex, ensure_index) from pandas.core.dtypes.common import ( - _ensure_float64, - _ensure_platform_int, - _ensure_int64, - _ensure_object, + ensure_float64, + ensure_platform_int, + ensure_int64, + ensure_object, needs_i8_conversion, is_integer_dtype, is_complex_dtype, @@ -231,7 +231,7 @@ def size(self): """ ids, _, ngroup = self.group_info - ids = _ensure_platform_int(ids) + ids = ensure_platform_int(ids) if ngroup: out = np.bincount(ids[ids != -1], minlength=ngroup) else: @@ -260,7 +260,7 @@ def group_info(self): comp_ids, obs_group_ids = self._get_compressed_labels() ngroups = len(obs_group_ids) - comp_ids = _ensure_int64(comp_ids) + comp_ids = ensure_int64(comp_ids) return comp_ids, obs_group_ids, ngroups @cache_readonly @@ -312,7 +312,7 @@ def get_group_levels(self): name_list = [] for ping, labels in zip(self.groupings, self.recons_labels): - labels = _ensure_platform_int(labels) + labels = ensure_platform_int(labels) levels = ping.result_index.take(labels) name_list.append(levels) @@ -464,16 +464,16 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, values = values.view('int64') is_numeric = True elif is_bool_dtype(values.dtype): - values = _ensure_float64(values) + values = ensure_float64(values) elif is_integer_dtype(values): # we use iNaT for the missing value on ints # so pre-convert to guard this condition if (values == iNaT).any(): - values = _ensure_float64(values) + values = ensure_float64(values) else: values = values.astype('int64', copy=False) elif is_numeric and not is_complex_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) else: values = values.astype(object) @@ -482,7 +482,7 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, kind, how, values, is_numeric) except NotImplementedError: if is_numeric: - values = _ensure_float64(values) + values = ensure_float64(values) func = self._get_cython_function( kind, how, values, is_numeric) else: @@ -528,7 +528,7 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, result, (counts > 0).view(np.uint8)) except ValueError: result = lib.row_bool_subset_object( - _ensure_object(result), + ensure_object(result), (counts > 0).view(np.uint8)) else: result = result[counts > 0] @@ -671,8 +671,8 @@ class BinGrouper(BaseGrouper): def __init__(self, bins, binlabels, filter_empty=False, mutated=False, indexer=None): - self.bins = _ensure_int64(bins) - self.binlabels = _ensure_index(binlabels) + self.bins = ensure_int64(bins) + self.binlabels = ensure_index(binlabels) self._filter_empty_groups = filter_empty self.mutated = mutated self.indexer = indexer @@ -737,7 +737,7 @@ def group_info(self): obs_group_ids = np.arange(ngroups) rep = np.diff(np.r_[0, self.bins]) - rep = _ensure_platform_int(rep) + rep = ensure_platform_int(rep) if ngroups == len(self.bins): comp_ids = np.repeat(np.arange(ngroups), rep) else: @@ -808,7 +808,7 @@ class DataSplitter(object): def __init__(self, data, labels, ngroups, axis=0): self.data = data - self.labels = _ensure_int64(labels) + self.labels = ensure_int64(labels) self.ngroups = ngroups self.axis = axis diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 2286033e97d85..b409d695a73e8 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -3,8 +3,8 @@ from pandas.core.indexes.base import (Index, _new_Index, - _ensure_index, - _ensure_index_from_sequences, + ensure_index, + ensure_index_from_sequences, InvalidIndexError) # noqa from pandas.core.indexes.category import CategoricalIndex # noqa from pandas.core.indexes.multi import MultiIndex # noqa @@ -36,7 +36,7 @@ 'InvalidIndexError', 'TimedeltaIndex', 'PeriodIndex', 'DatetimeIndex', '_new_Index', 'NaT', - '_ensure_index', '_ensure_index_from_sequences', + 'ensure_index', 'ensure_index_from_sequences', '_get_combined_index', '_get_objs_combined_axis', '_union_indexes', '_get_consensus_names', @@ -66,7 +66,7 @@ def _get_combined_index(indexes, intersect=False, sort=False): index = index.intersection(other) else: index = _union_indexes(indexes, sort=sort) - index = _ensure_index(index) + index = ensure_index(index) if sort: try: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b9639fc804a36..83b70baf4065b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -23,10 +23,10 @@ from pandas.core.dtypes.missing import isna, array_equivalent from pandas.core.dtypes.cast import maybe_cast_to_integer_array from pandas.core.dtypes.common import ( - _ensure_int64, - _ensure_object, - _ensure_categorical, - _ensure_platform_int, + ensure_int64, + ensure_object, + ensure_categorical, + ensure_platform_int, is_integer, is_float, is_dtype_equal, @@ -1867,7 +1867,7 @@ def is_type_compatible(self, kind): def is_all_dates(self): if self._data is None: return False - return is_datetime_array(_ensure_object(self.values)) + return is_datetime_array(ensure_object(self.values)) def __reduce__(self): d = dict(data=self._data) @@ -2071,7 +2071,7 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): if kwargs: nv.validate_take(tuple(), kwargs) - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) if self._can_hold_na: taken = self._assert_take_fillable(self.values, indices, allow_fill=allow_fill, @@ -2087,7 +2087,7 @@ def take(self, indices, axis=0, allow_fill=True, def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=np.nan): """ Internal method to handle NA filling of take """ - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: @@ -2679,7 +2679,7 @@ def union(self, other): """ self._assert_can_do_setop(other) - other = _ensure_index(other) + other = ensure_index(other) if len(other) == 0 or self.equals(other): return self._get_consensus_name(other) @@ -2779,7 +2779,7 @@ def intersection(self, other): """ self._assert_can_do_setop(other) - other = _ensure_index(other) + other = ensure_index(other) if self.equals(other): return self._get_consensus_name(other) @@ -3234,7 +3234,7 @@ def droplevel(self, level=0): @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): method = missing.clean_reindex_fill_method(method) - target = _ensure_index(target) + target = ensure_index(target) if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) @@ -3242,7 +3242,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): # this fix False and True would be treated as 0 and 1 respectively. # (GH #16877) if target.is_boolean() and self.is_numeric(): - return _ensure_platform_int(np.repeat(-1, target.size)) + return ensure_platform_int(np.repeat(-1, target.size)) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: @@ -3273,7 +3273,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): indexer = self._engine.get_indexer(target._ndarray_values) - return _ensure_platform_int(indexer) + return ensure_platform_int(indexer) def _convert_tolerance(self, tolerance, target): # override this method on subclasses @@ -3375,7 +3375,7 @@ def _filter_indexer_tolerance(self, target, indexer, tolerance): @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): - target = _ensure_index(target) + target = ensure_index(target) if is_categorical(target): target = target.astype(target.dtype.categories.dtype) pself, ptarget = self._maybe_promote(target) @@ -3389,7 +3389,7 @@ def get_indexer_non_unique(self, target): tgt_values = target._ndarray_values indexer, missing = self._engine.get_indexer_non_unique(tgt_values) - return _ensure_platform_int(indexer), missing + return ensure_platform_int(indexer), missing def get_indexer_for(self, target, **kwargs): """ @@ -3431,7 +3431,7 @@ def groupby(self, values): from .multi import MultiIndex if isinstance(values, MultiIndex): values = values.values - values = _ensure_categorical(values) + values = ensure_categorical(values) result = values._reverse_indexer() # map to the label @@ -3619,7 +3619,7 @@ def reindex(self, target, method=None, level=None, limit=None, attrs.pop('freq', None) # don't preserve freq target = self._simple_new(None, dtype=self.dtype, **attrs) else: - target = _ensure_index(target) + target = ensure_index(target) if level is not None: if method is not None: @@ -3667,7 +3667,7 @@ def _reindex_non_unique(self, target): """ - target = _ensure_index(target) + target = ensure_index(target) indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) @@ -3676,11 +3676,11 @@ def _reindex_non_unique(self, target): if len(missing): length = np.arange(len(indexer)) - missing = _ensure_platform_int(missing) + missing = ensure_platform_int(missing) missing_labels = target.take(missing) - missing_indexer = _ensure_int64(length[~check]) + missing_indexer = ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values - cur_indexer = _ensure_int64(length[check]) + cur_indexer = ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels @@ -3754,7 +3754,7 @@ def join(self, other, how='left', level=None, return_indexers=False, return self._join_level(other, level, how=how, return_indexers=return_indexers) - other = _ensure_index(other) + other = ensure_index(other) if len(other) == 0 and how in ('left', 'outer'): join_index = self._shallow_copy() @@ -3881,8 +3881,8 @@ def _join_non_unique(self, other, how='left', return_indexers=False): how=how, sort=True) - left_idx = _ensure_platform_int(left_idx) - right_idx = _ensure_platform_int(right_idx) + left_idx = ensure_platform_int(left_idx) + right_idx = ensure_platform_int(right_idx) join_index = np.asarray(self._ndarray_values.take(left_idx)) mask = left_idx == -1 @@ -3915,7 +3915,7 @@ def _get_leaf_sorter(labels): return np.empty(0, dtype='int64') if len(labels) == 1: - lab = _ensure_int64(labels[0]) + lab = ensure_int64(labels[0]) sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max()) return sorter @@ -3926,8 +3926,8 @@ def _get_leaf_sorter(labels): tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] - lab = _ensure_int64(labels[-1]) - return lib.get_level_sorter(lab, _ensure_int64(starts)) + lab = ensure_int64(labels[-1]) + return lib.get_level_sorter(lab, ensure_int64(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError('Join on level between two MultiIndex objects ' @@ -3959,7 +3959,7 @@ def _get_leaf_sorter(labels): join_index = left[left_indexer] else: - left_lev_indexer = _ensure_int64(left_lev_indexer) + left_lev_indexer = ensure_int64(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) @@ -4018,9 +4018,9 @@ def _get_leaf_sorter(labels): if return_indexers: left_indexer = (None if left_indexer is None - else _ensure_platform_int(left_indexer)) + else ensure_platform_int(left_indexer)) right_indexer = (None if right_indexer is None - else _ensure_platform_int(right_indexer)) + else ensure_platform_int(right_indexer)) return join_index, left_indexer, right_indexer else: return join_index @@ -4064,8 +4064,8 @@ def _join_monotonic(self, other, how='left', return_indexers=False): join_index = self._wrap_joined_index(join_index, other) if return_indexers: - lidx = None if lidx is None else _ensure_platform_int(lidx) - ridx = None if ridx is None else _ensure_platform_int(ridx) + lidx = None if lidx is None else ensure_platform_int(lidx) + ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx else: return join_index @@ -4883,7 +4883,7 @@ def _add_logical_methods_disabled(cls): Index._add_comparison_methods() -def _ensure_index_from_sequences(sequences, names=None): +def ensure_index_from_sequences(sequences, names=None): """Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a @@ -4900,18 +4900,18 @@ def _ensure_index_from_sequences(sequences, names=None): Examples -------- - >>> _ensure_index_from_sequences([[1, 2, 3]], names=['name']) + >>> ensure_index_from_sequences([[1, 2, 3]], names=['name']) Int64Index([1, 2, 3], dtype='int64', name='name') - >>> _ensure_index_from_sequences([['a', 'a'], ['a', 'b']], - names=['L1', 'L2']) + >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']], + names=['L1', 'L2']) MultiIndex(levels=[['a'], ['a', 'b']], labels=[[0, 0], [0, 1]], names=['L1', 'L2']) See Also -------- - _ensure_index + ensure_index """ from .multi import MultiIndex @@ -4923,7 +4923,7 @@ def _ensure_index_from_sequences(sequences, names=None): return MultiIndex.from_arrays(sequences, names=names) -def _ensure_index(index_like, copy=False): +def ensure_index(index_like, copy=False): """ Ensure that we have an index from some index-like object @@ -4939,19 +4939,19 @@ def _ensure_index(index_like, copy=False): Examples -------- - >>> _ensure_index(['a', 'b']) + >>> ensure_index(['a', 'b']) Index(['a', 'b'], dtype='object') - >>> _ensure_index([('a', 'a'), ('b', 'c')]) + >>> ensure_index([('a', 'a'), ('b', 'c')]) Index([('a', 'a'), ('b', 'c')], dtype='object') - >>> _ensure_index([['a', 'a'], ['b', 'c']]) + >>> ensure_index([['a', 'a'], ['b', 'c']]) MultiIndex(levels=[['a'], ['b', 'c']], labels=[[0, 0], [0, 1]]) See Also -------- - _ensure_index_from_sequences + ensure_index_from_sequences """ if isinstance(index_like, Index): if copy: @@ -5009,3 +5009,8 @@ def _trim_front(strings): def _validate_join_method(method): if method not in ['left', 'right', 'inner', 'outer']: raise ValueError('do not recognize join method %s' % method) + + +def default_index(n): + from pandas.core.index import RangeIndex + return RangeIndex(0, n, name=None) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 7c63b3c667c01..a03e478f81caf 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -9,7 +9,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.common import ( is_categorical_dtype, - _ensure_platform_int, + ensure_platform_int, is_list_like, is_interval_dtype, is_scalar) @@ -489,7 +489,7 @@ def reindex(self, target, method=None, level=None, limit=None, raise NotImplementedError("argument limit is not implemented for " "CategoricalIndex.reindex") - target = ibase._ensure_index(target) + target = ibase.ensure_index(target) if not is_categorical_dtype(target) and not target.is_unique: raise ValueError("cannot reindex with a non-unique indexer") @@ -554,7 +554,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): from pandas.core.arrays.categorical import _recode_for_categories method = missing.clean_reindex_fill_method(method) - target = ibase._ensure_index(target) + target = ibase.ensure_index(target) if self.is_unique and self.equals(target): return np.arange(len(self), dtype='intp') @@ -583,23 +583,23 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): codes = self.categories.get_indexer(target) indexer, _ = self._engine.get_indexer_non_unique(codes) - return _ensure_platform_int(indexer) + return ensure_platform_int(indexer) @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): - target = ibase._ensure_index(target) + target = ibase.ensure_index(target) if isinstance(target, CategoricalIndex): # Indexing on codes is more efficient if categories are the same: if target.categories is self.categories: target = target.codes indexer, missing = self._engine.get_indexer_non_unique(target) - return _ensure_platform_int(indexer), missing + return ensure_platform_int(indexer), missing target = target.values codes = self.categories.get_indexer(target) indexer, missing = self._engine.get_indexer_non_unique(codes) - return _ensure_platform_int(indexer), missing + return ensure_platform_int(indexer), missing @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): @@ -644,7 +644,7 @@ def _convert_index_indexer(self, keyarr): def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) taken = self._assert_take_fillable(self.codes, indices, allow_fill=allow_fill, fill_value=fill_value, diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 3f0bdf18f7230..3ae5eb3a8dbf5 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -17,7 +17,7 @@ from pandas._libs.tslibs.timestamps import round_ns from pandas.core.dtypes.common import ( - _ensure_int64, + ensure_int64, is_dtype_equal, is_float, is_integer, @@ -391,7 +391,7 @@ def sort_values(self, return_indexer=False, ascending=True): def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) - indices = _ensure_int64(indices) + indices = ensure_int64(indices) maybe_slice = lib.maybe_indices_to_slice(indices, len(self)) if isinstance(maybe_slice, slice): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 4732178d552be..7257be421c3e1 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -25,7 +25,7 @@ is_list_like, is_scalar, pandas_dtype, - _ensure_int64) + ensure_int64) from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna @@ -504,7 +504,7 @@ def _generate_range(cls, start, end, periods, name, freq, tz=None, periods, freq) if tz is not None and getattr(index, 'tz', None) is None: - arr = conversion.tz_localize_to_utc(_ensure_int64(index), + arr = conversion.tz_localize_to_utc(ensure_int64(index), tz, ambiguous=ambiguous) @@ -563,7 +563,7 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, values = np.array(values, copy=False) if not is_datetime64_dtype(values): - values = _ensure_int64(values).view(_NS_DTYPE) + values = ensure_int64(values).view(_NS_DTYPE) values = getattr(values, 'values', values) @@ -1607,7 +1607,7 @@ def delete(self, loc): else: if is_list_like(loc): loc = lib.maybe_indices_to_slice( - _ensure_int64(np.array(loc)), len(self)) + ensure_int64(np.array(loc)), len(self)) if isinstance(loc, slice) and loc.step in (1, None): if (loc.start in (0, None) or loc.stop in (len(self), None)): freq = self.freq diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 9375a60d0964c..e92f980caf3dc 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -8,7 +8,7 @@ from pandas.core.dtypes.missing import isna from pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype from pandas.core.dtypes.common import ( - _ensure_platform_int, + ensure_platform_int, is_list_like, is_datetime_or_timedelta_dtype, is_datetime64tz_dtype, @@ -21,7 +21,7 @@ is_number, is_integer) from pandas.core.indexes.base import ( - Index, _ensure_index, + Index, ensure_index, default_pprint, _index_shared_docs) from pandas._libs import Timestamp, Timedelta @@ -700,7 +700,7 @@ def get_value(self, series, key): def get_indexer(self, target, method=None, limit=None, tolerance=None): self._check_method(method) - target = _ensure_index(target) + target = ensure_index(target) target = self._maybe_cast_indexed(target) if self.equals(target): @@ -724,7 +724,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): else: indexer = np.concatenate([self.get_loc(i) for i in target]) - return _ensure_platform_int(indexer) + return ensure_platform_int(indexer) def _get_reindexer(self, target): """ @@ -799,7 +799,7 @@ def _get_reindexer(self, target): @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): - target = self._maybe_cast_indexed(_ensure_index(target)) + target = self._maybe_cast_indexed(ensure_index(target)) return super(IntervalIndex, self).get_indexer_non_unique(target) @Appender(_index_shared_docs['where']) @@ -855,7 +855,7 @@ def insert(self, loc, item): def _as_like_interval_index(self, other): self._assert_can_do_setop(other) - other = _ensure_index(other) + other = ensure_index(other) if not isinstance(other, IntervalIndex): msg = ('the other index needs to be an IntervalIndex too, but ' 'was type {}').format(other.__class__.__name__) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a791ce1d87264..0d4ceb2783bad 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -14,8 +14,8 @@ from pandas.core.dtypes.dtypes import ( ExtensionDtype, PandasExtensionDtype) from pandas.core.dtypes.common import ( - _ensure_int64, - _ensure_platform_int, + ensure_int64, + ensure_platform_int, is_categorical_dtype, is_object_dtype, is_hashable, @@ -36,7 +36,7 @@ from pandas.core.config import get_option from pandas.core.indexes.base import ( - Index, _ensure_index, + Index, ensure_index, InvalidIndexError, _index_shared_docs) from pandas.core.indexes.frozen import ( @@ -302,13 +302,13 @@ def _set_levels(self, levels, level=None, copy=False, validate=True, if level is None: new_levels = FrozenList( - _ensure_index(lev, copy=copy)._shallow_copy() + ensure_index(lev, copy=copy)._shallow_copy() for lev in levels) else: level = [self._get_level_number(l) for l in level] new_levels = list(self._levels) for l, v in zip(level, levels): - new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy() + new_levels[l] = ensure_index(v, copy=copy)._shallow_copy() new_levels = FrozenList(new_levels) if verify_integrity: @@ -1227,7 +1227,7 @@ def lexsort_depth(self): else: return 0 - int64_labels = [_ensure_int64(lab) for lab in self.labels] + int64_labels = [ensure_int64(lab) for lab in self.labels] for k in range(self.nlevels, 0, -1): if libalgos.is_lexsorted(int64_labels[:k]): return k @@ -1431,7 +1431,7 @@ def _sort_levels_monotonic(self): lev = lev.take(indexer) # indexer to reorder the labels - indexer = _ensure_int64(indexer) + indexer = ensure_int64(indexer) ri = lib.get_reverse_indexer(indexer, len(indexer)) lab = algos.take_1d(ri, lab) @@ -1594,7 +1594,7 @@ def __getitem__(self, key): def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) taken = self._assert_take_fillable(self.labels, indices, allow_fill=allow_fill, fill_value=fill_value, @@ -1895,7 +1895,7 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True): if not ascending: indexer = indexer[::-1] - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) new_labels = [lab.take(indexer) for lab in self.labels] new_index = MultiIndex(labels=new_labels, levels=self.levels, @@ -1940,11 +1940,11 @@ def _convert_listlike_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): method = missing.clean_reindex_fill_method(method) - target = _ensure_index(target) + target = ensure_index(target) # empty indexer if is_list_like(target) and not len(target): - return _ensure_platform_int(np.array([])) + return ensure_platform_int(np.array([])) if not isinstance(target, MultiIndex): try: @@ -1973,7 +1973,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): else: indexer = self._engine.get_indexer(target) - return _ensure_platform_int(indexer) + return ensure_platform_int(indexer) @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): @@ -2010,12 +2010,12 @@ def reindex(self, target, method=None, level=None, limit=None, target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs) else: - target = _ensure_index(target) + target = ensure_index(target) target, indexer, _ = self._join_level(target, level, how='right', return_indexers=True, keep_order=False) else: - target = _ensure_index(target) + target = ensure_index(target) if self.equals(target): indexer = None else: @@ -2399,7 +2399,7 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels): # selected from pandas import Series mapper = Series(indexer) - indexer = labels.take(_ensure_platform_int(indexer)) + indexer = labels.take(ensure_platform_int(indexer)) result = Series(Index(indexer).isin(r).nonzero()[0]) m = result.map(mapper)._ndarray_values @@ -2628,7 +2628,7 @@ def equals(self, other): return False if not isinstance(other, MultiIndex): - other_vals = com._values_from_object(_ensure_index(other)) + other_vals = com._values_from_object(ensure_index(other)) return array_equivalent(self._ndarray_values, other_vals) if self.nlevels != other.nlevels: @@ -2826,7 +2826,7 @@ def insert(self, loc, item): lev_loc = level.get_loc(k) new_levels.append(level) - new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc)) + new_labels.append(np.insert(ensure_int64(labels), loc, lev_loc)) return MultiIndex(levels=new_levels, labels=new_labels, names=self.names, verify_integrity=False) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a8e0c7f1aaa6a..841d1e69485ca 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -15,7 +15,7 @@ is_period_dtype, is_bool_dtype, pandas_dtype, - _ensure_object) + ensure_object) import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc @@ -33,7 +33,7 @@ from pandas.core.arrays.period import PeriodArrayMixin from pandas.core.base import _shared_docs -from pandas.core.indexes.base import _index_shared_docs, _ensure_index +from pandas.core.indexes.base import _index_shared_docs, ensure_index from pandas import compat from pandas.util._decorators import (Appender, Substitution, cache_readonly, @@ -255,7 +255,7 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, "floating point in construction") # anything else, likely an array of strings or periods - data = _ensure_object(data) + data = ensure_object(data) freq = freq or period.extract_freq(data) data = period.extract_ordinals(data, freq) return cls._from_ordinals(data, name=name, freq=freq) @@ -567,7 +567,7 @@ def get_value(self, series, key): @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): - target = _ensure_index(target) + target = ensure_index(target) if hasattr(target, 'freq') and target.freq != self.freq: msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, target.freqstr) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 4e192548a1f2d..939ec0b79ac6b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -81,7 +81,7 @@ def __new__(cls, start=None, stop=None, step=None, **dict(start._get_data_as_items())) # validate the arguments - def _ensure_int(value, field): + def ensure_int(value, field): msg = ("RangeIndex(...) must be called with integers," " {value} was passed for {field}") if not is_scalar(value): @@ -102,18 +102,18 @@ def _ensure_int(value, field): elif start is None: start = 0 else: - start = _ensure_int(start, 'start') + start = ensure_int(start, 'start') if stop is None: stop = start start = 0 else: - stop = _ensure_int(stop, 'stop') + stop = ensure_int(stop, 'stop') if step is None: step = 1 elif step == 0: raise ValueError("Step must not be zero") else: - step = _ensure_int(step, 'step') + step = ensure_int(step, 'step') return cls._simple_new(start, stop, step, name) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 1ed6145f01a44..dc26c9cc0c248 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -11,7 +11,7 @@ is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype, - _ensure_int64) + ensure_int64) from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries @@ -736,7 +736,7 @@ def delete(self, loc): else: if is_list_like(loc): loc = lib.maybe_indices_to_slice( - _ensure_int64(np.array(loc)), len(self)) + ensure_int64(np.array(loc)), len(self)) if isinstance(loc, slice) and loc.step in (1, None): if (loc.start in (0, None) or loc.stop in (len(self), None)): freq = self.freq diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index ec06099e3bbd2..8ffc7548059b7 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -13,7 +13,7 @@ is_iterator, is_scalar, is_sparse, - _ensure_platform_int) + ensure_platform_int) from pandas.core.dtypes.missing import isna, _infer_fill_value from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender @@ -1483,7 +1483,7 @@ def _convert_for_reindex(self, key, axis=None): keyarr = labels._convert_arr_indexer(keyarr) if not labels.is_integer(): - keyarr = _ensure_platform_int(keyarr) + keyarr = ensure_platform_int(keyarr) return labels.take(keyarr) return keyarr diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 208d7b8bcf8a7..5a5418dcc1e7f 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -21,7 +21,7 @@ CategoricalDtype) from pandas.core.dtypes.common import ( _TD_DTYPE, _NS_DTYPE, - _ensure_int64, _ensure_platform_int, + ensure_int64, ensure_platform_int, is_integer, is_dtype_equal, is_timedelta64_dtype, @@ -65,7 +65,7 @@ import pandas.core.common as com import pandas.core.algorithms as algos -from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.index import Index, MultiIndex, ensure_index from pandas.core.indexing import maybe_convert_indices, check_setitem_lengths from pandas.core.arrays import Categorical from pandas.core.indexes.datetimes import DatetimeIndex @@ -1297,7 +1297,7 @@ def shift(self, periods, axis=0, mgr=None): axis = new_values.ndim - axis - 1 if np.prod(new_values.shape): - new_values = np.roll(new_values, _ensure_platform_int(periods), + new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis) axis_indexer = [slice(None)] * self.ndim @@ -3271,7 +3271,7 @@ class BlockManager(PandasObject): '_is_consolidated', '_blknos', '_blklocs'] def __init__(self, blocks, axes, do_integrity_check=True): - self.axes = [_ensure_index(ax) for ax in axes] + self.axes = [ensure_index(ax) for ax in axes] self.blocks = tuple(blocks) for block in blocks: @@ -3296,8 +3296,8 @@ def __init__(self, blocks, axes, do_integrity_check=True): def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ if axes is None: - axes = [_ensure_index([])] + [_ensure_index(a) - for a in self.axes[1:]] + axes = [ensure_index([])] + [ensure_index(a) + for a in self.axes[1:]] # preserve dtype if possible if self.ndim == 1: @@ -3321,7 +3321,7 @@ def ndim(self): return len(self.axes) def set_axis(self, axis, new_labels): - new_labels = _ensure_index(new_labels) + new_labels = ensure_index(new_labels) old_len = len(self.axes[axis]) new_len = len(new_labels) @@ -3444,7 +3444,7 @@ def unpickle_block(values, mgr_locs): if (isinstance(state, tuple) and len(state) >= 4 and '0.14.1' in state[3]): state = state[3]['0.14.1'] - self.axes = [_ensure_index(ax) for ax in state['axes']] + self.axes = [ensure_index(ax) for ax in state['axes']] self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) for b in state['blocks']) else: @@ -3452,7 +3452,7 @@ def unpickle_block(values, mgr_locs): # little while longer ax_arrays, bvalues, bitems = state[:3] - self.axes = [_ensure_index(ax) for ax in ax_arrays] + self.axes = [ensure_index(ax) for ax in ax_arrays] if len(bitems) == 1 and self.axes[0].equals(bitems[0]): # This is a workaround for pre-0.14.1 pickles that didn't @@ -4386,7 +4386,7 @@ def reindex_axis(self, new_index, axis, method=None, limit=None, """ Conform block manager to new index. """ - new_index = _ensure_index(new_index) + new_index = ensure_index(new_index) new_index, indexer = self.axes[axis].reindex(new_index, method=method, limit=limit) @@ -4665,7 +4665,7 @@ def __init__(self, block, axis, do_integrity_check=False, fastpath=False): 'more than 1 block') block = block[0] else: - self.axes = [_ensure_index(axis)] + self.axes = [ensure_index(axis)] # create the block here if isinstance(block, list): @@ -4891,7 +4891,7 @@ def form_blocks(arrays, names, axes): items_dict = defaultdict(list) extra_locs = [] - names_idx = _ensure_index(names) + names_idx = ensure_index(names) if names_idx.equals(axes[0]): names_indexer = np.arange(len(names_idx)) else: @@ -5209,7 +5209,7 @@ def _factor_indexer(shape, labels): expanded label indexer """ mult = np.array(shape)[::-1].cumprod()[::-1] - return _ensure_platform_int( + return ensure_platform_int( np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) @@ -5229,7 +5229,7 @@ def _get_blkno_placements(blknos, blk_count, group=True): """ - blknos = _ensure_int64(blknos) + blknos = ensure_int64(blknos) # FIXME: blk_count is unused, but it may avoid the use of dicts in cython for blkno, indexer in libinternals.get_blkno_indexers(blknos, group): diff --git a/pandas/core/missing.py b/pandas/core/missing.py index e9b9a734ec5f5..16820dcbb55bc 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -18,7 +18,7 @@ is_scalar, is_integer, needs_i8_conversion, - _ensure_float64) + ensure_float64) from pandas.core.dtypes.cast import infer_dtype_from_array from pandas.core.dtypes.missing import isna @@ -480,7 +480,7 @@ def pad_1d(values, limit=None, mask=None, dtype=None): elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _pad_1d_datetime elif is_integer_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) _method = algos.pad_inplace_float64 elif values.dtype == np.object_: _method = algos.pad_inplace_object @@ -506,7 +506,7 @@ def backfill_1d(values, limit=None, mask=None, dtype=None): elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _backfill_1d_datetime elif is_integer_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) _method = algos.backfill_inplace_float64 elif values.dtype == np.object_: _method = algos.backfill_inplace_object @@ -533,7 +533,7 @@ def pad_2d(values, limit=None, mask=None, dtype=None): elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _pad_2d_datetime elif is_integer_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) _method = algos.pad_2d_inplace_float64 elif values.dtype == np.object_: _method = algos.pad_2d_inplace_object @@ -564,7 +564,7 @@ def backfill_2d(values, limit=None, mask=None, dtype=None): elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _backfill_2d_datetime elif is_integer_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) _method = algos.backfill_2d_inplace_float64 elif values.dtype == np.object_: _method = algos.backfill_2d_inplace_object diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 1ddf77cf71a11..bccc5a587bd83 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -34,7 +34,7 @@ is_list_like, is_scalar, is_extension_array_dtype, - _ensure_object) + ensure_object) from pandas.core.dtypes.cast import ( maybe_upcast_putmask, find_common_type, construct_1d_object_array_from_listlike) @@ -1387,8 +1387,8 @@ def na_op(x, y): if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)): result = op(x, y) # when would this be hit? else: - x = _ensure_object(x) - y = _ensure_object(y) + x = ensure_object(x) + y = ensure_object(y) result = libops.vec_binop(x, y, op) else: # let null fall thru diff --git a/pandas/core/panel.py b/pandas/core/panel.py index a1812cb5801b9..16ade3fae90a1 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -17,12 +17,13 @@ import pandas.core.ops as ops import pandas.core.common as com +import pandas.core.indexes.base as ibase from pandas import compat from pandas.compat import (map, zip, range, u, OrderedDict) from pandas.compat.numpy import function as nv from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame, _shared_docs -from pandas.core.index import (Index, MultiIndex, _ensure_index, +from pandas.core.index import (Index, MultiIndex, ensure_index, _get_objs_combined_axis) from pandas.io.formats.printing import pprint_thing from pandas.core.indexing import maybe_droplevels @@ -198,7 +199,7 @@ def _init_dict(self, data, axes, dtype=None): # prefilter if haxis passed if haxis is not None: - haxis = _ensure_index(haxis) + haxis = ensure_index(haxis) data = OrderedDict((k, v) for k, v in compat.iteritems(data) if k in haxis) @@ -319,9 +320,9 @@ def _init_matrix(self, data, axes, dtype=None, copy=False): fixed_axes = [] for i, ax in enumerate(axes): if ax is None: - ax = com._default_index(shape[i]) + ax = ibase.default_index(shape[i]) else: - ax = _ensure_index(ax) + ax = ensure_index(ax) fixed_axes.append(ax) return create_block_manager_from_blocks([values], fixed_axes) @@ -1536,7 +1537,7 @@ def _extract_axis(self, data, axis=0, intersect=False): if index is None: index = Index([]) - return _ensure_index(index) + return ensure_index(index) Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0, diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index b36e9b8d900fd..1d6105cb68bf1 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -5,12 +5,13 @@ import numpy as np from pandas import compat, DataFrame, Series, Index, MultiIndex from pandas.core.index import (_get_objs_combined_axis, - _ensure_index, _get_consensus_names, + ensure_index, _get_consensus_names, _all_indexes_same) from pandas.core.arrays.categorical import (_factorize_from_iterable, _factorize_from_iterables) from pandas.core.internals import concatenate_block_managers from pandas.core import common as com +import pandas.core.indexes.base as ibase from pandas.core.generic import NDFrame import pandas.core.dtypes.concat as _concat @@ -477,7 +478,7 @@ def _get_concat_axis(self): if self.axis == 0: indexes = [x.index for x in self.objs] elif self.ignore_index: - idx = com._default_index(len(self.objs)) + idx = ibase.default_index(len(self.objs)) return idx elif self.keys is None: names = [None] * len(self.objs) @@ -497,14 +498,14 @@ def _get_concat_axis(self): if has_names: return Index(names) else: - return com._default_index(len(self.objs)) + return ibase.default_index(len(self.objs)) else: - return _ensure_index(self.keys) + return ensure_index(self.keys) else: indexes = [x._data.axes[self.axis] for x in self.objs] if self.ignore_index: - idx = com._default_index(sum(len(i) for i in indexes)) + idx = ibase.default_index(sum(len(i) for i in indexes)) return idx if self.keys is None: @@ -540,16 +541,16 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None): if levels is None: _, levels = _factorize_from_iterables(zipped) else: - levels = [_ensure_index(x) for x in levels] + levels = [ensure_index(x) for x in levels] else: zipped = [keys] if names is None: names = [None] if levels is None: - levels = [_ensure_index(keys)] + levels = [ensure_index(keys)] else: - levels = [_ensure_index(x) for x in levels] + levels = [ensure_index(x) for x in levels] if not _all_indexes_same(indexes): label_list = [] @@ -608,7 +609,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None): # do something a bit more speedy for hlevel, level in zip(zipped, levels): - hlevel = _ensure_index(hlevel) + hlevel = ensure_index(hlevel) mapped = level.get_indexer(hlevel) mask = mapped == -1 diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index e38c069b3c3fb..25d8cb4e804a2 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -31,9 +31,9 @@ is_bool_dtype, is_list_like, is_datetimelike, - _ensure_int64, - _ensure_float64, - _ensure_object, + ensure_int64, + ensure_float64, + ensure_object, _get_dtype) from pandas.core.dtypes.missing import na_value_for_dtype from pandas.core.internals import (items_overlap_with_suffix, @@ -1212,9 +1212,9 @@ def _asof_by_function(direction, on_type, by_type): _type_casters = { - 'int64_t': _ensure_int64, - 'double': _ensure_float64, - 'object': _ensure_object, + 'int64_t': ensure_int64, + 'double': ensure_float64, + 'object': ensure_object, } _cython_types = { @@ -1490,8 +1490,8 @@ def _get_single_indexer(join_key, index, sort=False): left_key, right_key, count = _factorize_keys(join_key, index, sort=sort) left_indexer, right_indexer = libjoin.left_outer_join( - _ensure_int64(left_key), - _ensure_int64(right_key), + ensure_int64(left_key), + ensure_int64(right_key), count, sort=sort) return left_indexer, right_indexer @@ -1553,16 +1553,16 @@ def _factorize_keys(lk, rk, sort=True): # Same categories in different orders -> recode rk = _recode_for_categories(rk.codes, rk.categories, lk.categories) - lk = _ensure_int64(lk.codes) - rk = _ensure_int64(rk) + lk = ensure_int64(lk.codes) + rk = ensure_int64(rk) elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk): klass = libhashtable.Int64Factorizer - lk = _ensure_int64(com._values_from_object(lk)) - rk = _ensure_int64(com._values_from_object(rk)) + lk = ensure_int64(com._values_from_object(lk)) + rk = ensure_int64(com._values_from_object(rk)) else: klass = libhashtable.Factorizer - lk = _ensure_object(lk) - rk = _ensure_object(rk) + lk = ensure_object(lk) + rk = ensure_object(rk) rizer = klass(max(len(lk), len(rk))) @@ -1600,7 +1600,7 @@ def _sort_labels(uniques, left, right): labels = np.concatenate([left, right]) _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1) - new_labels = _ensure_int64(new_labels) + new_labels = ensure_int64(new_labels) new_left, new_right = new_labels[:llength], new_labels[llength:] return new_left, new_right diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index d5d2e594b8d6b..2f2dc1264e996 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -8,7 +8,7 @@ import numpy as np from pandas.core.dtypes.common import ( - _ensure_platform_int, + ensure_platform_int, is_list_like, is_bool_dtype, needs_i8_conversion, is_sparse, is_object_dtype) from pandas.core.dtypes.cast import maybe_promote @@ -141,7 +141,7 @@ def _make_sorted_values_labels(self): ngroups = len(obs_ids) indexer = _algos.groupsort_indexer(comp_index, ngroups)[0] - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) self.sorted_values = algos.take_nd(self.values, indexer, axis=0) self.sorted_labels = [l.take(indexer) for l in to_sort] @@ -156,7 +156,7 @@ def _make_selectors(self): comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes) ngroups = len(obs_ids) - comp_index = _ensure_platform_int(comp_index) + comp_index = ensure_platform_int(comp_index) stride = self.index.levshape[self.level] + self.lift self.full_shape = ngroups, stride diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index bbdce762feee3..031c94c06d3c8 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -12,7 +12,7 @@ is_timedelta64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, - _ensure_int64) + ensure_int64) import pandas.core.algorithms as algos import pandas.core.nanops as nanops @@ -335,7 +335,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None, bins = unique_bins side = 'left' if right else 'right' - ids = _ensure_int64(bins.searchsorted(x, side=side)) + ids = ensure_int64(bins.searchsorted(x, side=side)) if include_lowest: # Numpy 1.9 support: ensure this mask is a Numpy array diff --git a/pandas/core/series.py b/pandas/core/series.py index 0bdb9d9cc23a6..77445159129f2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -32,7 +32,7 @@ is_dict_like, is_scalar, _is_unorderable_exception, - _ensure_platform_int, + ensure_platform_int, pandas_dtype) from pandas.core.dtypes.generic import ( ABCSparseArray, ABCDataFrame, ABCIndexClass) @@ -51,7 +51,7 @@ na_value_for_dtype) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, - Float64Index, _ensure_index) + Float64Index, ensure_index) from pandas.core.indexing import check_bool_indexer, maybe_convert_indices from pandas.core import generic, base from pandas.core.internals import SingleBlockManager @@ -71,6 +71,8 @@ import pandas.core.common as com import pandas.core.nanops as nanops +import pandas.core.indexes.base as ibase + import pandas.io.formats.format as fmt from pandas.util._decorators import ( Appender, deprecate, deprecate_kwarg, Substitution) @@ -187,7 +189,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None, else: if index is not None: - index = _ensure_index(index) + index = ensure_index(index) if data is None: data = {} @@ -256,7 +258,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None, if index is None: if not is_list_like(data): data = [data] - index = com._default_index(len(data)) + index = ibase.default_index(len(data)) elif is_list_like(data): # a scalar numpy array is list-like but doesn't @@ -373,7 +375,7 @@ def _set_axis(self, axis, labels, fastpath=False): """ override generic, we want to set the _typ here """ if not fastpath: - labels = _ensure_index(labels) + labels = ensure_index(labels) is_all_dates = labels.is_all_dates if is_all_dates: @@ -1202,7 +1204,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): """ inplace = validate_bool_kwarg(inplace, 'inplace') if drop: - new_index = com._default_index(len(self)) + new_index = ibase.default_index(len(self)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] @@ -2079,7 +2081,7 @@ def __rmatmul__(self, other): @deprecate_kwarg(old_arg_name='v', new_arg_name='value') def searchsorted(self, value, side='left', sorter=None): if sorter is not None: - sorter = _ensure_platform_int(sorter) + sorter = ensure_platform_int(sorter) return self._values.searchsorted(Series(value)._values, side=side, sorter=sorter) @@ -2500,7 +2502,7 @@ def _try_kind_sort(arr): bad = isna(arr) good = ~bad - idx = com._default_index(len(self)) + idx = ibase.default_index(len(self)) argsorted = _try_kind_sort(arr[good]) @@ -2676,7 +2678,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, indexer = nargsort(index, kind=kind, ascending=ascending, na_position=na_position) - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) new_index = index.take(indexer) new_index = new_index._sort_levels_monotonic() @@ -3537,7 +3539,7 @@ def memory_usage(self, index=True, deep=False): @Appender(generic._shared_docs['_take']) def _take(self, indices, axis=0, is_copy=False): - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) new_index = self.index.take(indices) if is_categorical_dtype(self): diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 212f44e55c489..5aa9ea658482b 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -3,8 +3,8 @@ import numpy as np from pandas.compat import long, string_types, PY3 from pandas.core.dtypes.common import ( - _ensure_platform_int, - _ensure_int64, + ensure_platform_int, + ensure_int64, is_list_like, is_categorical_dtype) from pandas.core.dtypes.cast import infer_dtype_from_array @@ -57,7 +57,7 @@ def maybe_lift(lab, size): # so that all output values are non-negative return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) - labels = map(_ensure_int64, labels) + labels = map(ensure_int64, labels) if not xnull: labels, shape = map(list, zip(*map(maybe_lift, labels, shape))) @@ -338,9 +338,9 @@ def get_group_index_sorter(group_index, ngroups): do_groupsort = (count > 0 and ((alpha + beta * ngroups) < (count * np.log(count)))) if do_groupsort: - sorter, _ = algos.groupsort_indexer(_ensure_int64(group_index), + sorter, _ = algos.groupsort_indexer(ensure_int64(group_index), ngroups) - return _ensure_platform_int(sorter) + return ensure_platform_int(sorter) else: return group_index.argsort(kind='mergesort') @@ -355,7 +355,7 @@ def compress_group_index(group_index, sort=True): size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT) table = hashtable.Int64HashTable(size_hint) - group_index = _ensure_int64(group_index) + group_index = ensure_int64(group_index) # note, group labels come out ascending (ie, 1,2,3 etc) comp_ids, obs_group_ids = table.get_labels_groupby(group_index) @@ -462,7 +462,7 @@ def sort_mixed(values): if not is_list_like(labels): raise TypeError("Only list-like objects or None are allowed to be" "passed to safe_sort as labels") - labels = _ensure_platform_int(np.asarray(labels)) + labels = ensure_platform_int(np.asarray(labels)) from pandas import Index if not assume_unique and not Index(values).is_unique: @@ -474,7 +474,7 @@ def sort_mixed(values): values, algorithms._hashtables) t = hash_klass(len(values)) t.map_locations(values) - sorter = _ensure_platform_int(t.lookup(ordered)) + sorter = ensure_platform_int(t.lookup(ordered)) reverse_indexer = np.empty(len(sorter), dtype=np.int_) reverse_indexer.put(sorter, np.arange(len(sorter))) @@ -487,4 +487,4 @@ def sort_mixed(values): new_labels = reverse_indexer.take(labels, mode='wrap') np.putmask(new_labels, mask, na_sentinel) - return ordered, _ensure_platform_int(new_labels) + return ordered, ensure_platform_int(new_labels) diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index ff58f7d104ff9..6f0ffbff22028 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -16,7 +16,7 @@ from pandas.core.dtypes.generic import ABCSparseSeries from pandas.core.dtypes.common import ( - _ensure_platform_int, + ensure_platform_int, is_float, is_integer, is_object_dtype, is_integer_dtype, @@ -468,7 +468,7 @@ def take(self, indices, axis=0, allow_fill=True, # return scalar return self[indices] - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) n = len(self) if allow_fill and fill_value is not None: # allow -1 to indicate self.fill_value, diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 1feddf004058a..f7071061d07ab 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -12,10 +12,10 @@ from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.cast import maybe_upcast, find_common_type -from pandas.core.dtypes.common import _ensure_platform_int, is_scipy_sparse +from pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse from pandas.compat.numpy import function as nv -from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.index import Index, MultiIndex, ensure_index from pandas.core.series import Series from pandas.core.frame import DataFrame, extract_index, _prep_ndarray import pandas.core.algorithms as algos @@ -27,6 +27,7 @@ from pandas.util._decorators import Appender import pandas.core.ops as ops import pandas.core.common as com +import pandas.core.indexes.base as ibase _shared_doc_kwargs = dict(klass='SparseDataFrame') @@ -111,7 +112,7 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None, if index is None: index = Index([]) else: - index = _ensure_index(index) + index = ensure_index(index) if columns is None: columns = Index([]) @@ -139,7 +140,7 @@ def _constructor(self): def _init_dict(self, data, index, columns, dtype=None): # pre-filter out columns if we passed it if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) data = {k: v for k, v in compat.iteritems(data) if k in columns} else: keys = com._dict_keys_to_ordered_list(data) @@ -219,9 +220,9 @@ def _init_spmatrix(self, data, index, columns, dtype=None, def _prep_index(self, data, index, columns): N, K = data.shape if index is None: - index = com._default_index(N) + index = ibase.default_index(N) if columns is None: - columns = com._default_index(K) + columns = ibase.default_index(K) if len(columns) != K: raise ValueError('Column length mismatch: {columns} vs. {K}' @@ -650,7 +651,7 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, index=index, columns=self.columns).__finalize__(self) indexer = self.index.get_indexer(index, method, limit=limit) - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) mask = indexer == -1 need_mask = mask.any() @@ -926,7 +927,7 @@ def to_manager(sdf, columns, index): """ # from BlockManager perspective - axes = [_ensure_index(columns), _ensure_index(index)] + axes = [ensure_index(columns), ensure_index(index)] return create_block_manager_from_arrays( [sdf[c] for c in columns], columns, axes) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index fb337d71fcf8d..96ee5b7954f45 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -11,11 +11,12 @@ from pandas.core.dtypes.missing import isna, notna from pandas.compat.numpy import function as nv -from pandas.core.index import Index, _ensure_index, InvalidIndexError +from pandas.core.index import Index, ensure_index, InvalidIndexError from pandas.core.series import Series from pandas.core.internals import SingleBlockManager from pandas.core import generic import pandas.core.common as com +import pandas.core.indexes.base as ibase import pandas.core.ops as ops import pandas._libs.index as libindex from pandas.util._decorators import Appender @@ -149,8 +150,8 @@ def __init__(self, data=None, index=None, sparse_index=None, kind='block', data.fill(v) if index is None: - index = com._default_index(sparse_index.length) - index = _ensure_index(index) + index = ibase.default_index(sparse_index.length) + index = ensure_index(index) # create/copy the manager if isinstance(data, SingleBlockManager): diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index c8204faa55cf8..83de83ab76a2c 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -14,7 +14,7 @@ _guess_datetime_format) from pandas.core.dtypes.common import ( - _ensure_object, + ensure_object, is_datetime64_ns_dtype, is_datetime64_dtype, is_datetime64tz_dtype, @@ -216,7 +216,7 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, raise TypeError('arg must be a string, datetime, list, tuple, ' '1-d array, or Series') - arg = _ensure_object(arg) + arg = ensure_object(arg) require_iso8601 = False if infer_datetime_format and format is None: @@ -787,7 +787,7 @@ def _convert_listlike(arg, format): raise TypeError('arg must be a string, datetime, list, tuple, ' '1-d array, or Series') - arg = _ensure_object(arg) + arg = ensure_object(arg) if infer_time_format and format is None: format = _guess_time_format_for_array(arg) diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index f1d13ccf36cf6..4bb5c223d1bcc 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -6,7 +6,7 @@ is_decimal, is_datetime_or_timedelta_dtype, is_number, - _ensure_object) + ensure_object) from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas._libs import lib @@ -130,7 +130,7 @@ def to_numeric(arg, errors='raise', downcast=None): elif is_datetime_or_timedelta_dtype(values): values = values.astype(np.int64) else: - values = _ensure_object(values) + values = ensure_object(values) coerce_numeric = False if errors in ('ignore', 'raise') else True values = lib.maybe_convert_numeric(values, set(), coerce_numeric=coerce_numeric) diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index ed2659973cc6a..63ab120833ba1 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -9,7 +9,7 @@ array_to_timedelta64) from pandas.core.dtypes.common import ( - _ensure_object, + ensure_object, is_integer_dtype, is_timedelta64_dtype, is_list_like) @@ -171,7 +171,7 @@ def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None): 'timedelta64[ns]', copy=False) else: try: - value = array_to_timedelta64(_ensure_object(arg), + value = array_to_timedelta64(ensure_object(arg), unit=unit, errors=errors) value = value.astype('timedelta64[ns]', copy=False) except ValueError: diff --git a/pandas/core/window.py b/pandas/core/window.py index e20db4df2cb2a..6b6f27bcb3863 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -27,7 +27,7 @@ needs_i8_conversion, is_timedelta64_dtype, is_list_like, - _ensure_float64, + ensure_float64, is_scalar) from pandas.core.base import PandasObject, SelectionMixin @@ -208,9 +208,9 @@ def _prep_values(self, values=None, kill_inf=True): # GH #12373 : rolling functions error on float32 data # make sure the data is coerced to float64 if is_float_dtype(values.dtype): - values = _ensure_float64(values) + values = ensure_float64(values) elif is_integer_dtype(values.dtype): - values = _ensure_float64(values) + values = ensure_float64(values) elif needs_i8_conversion(values.dtype): raise NotImplementedError("ops for {action} for this " "dtype {dtype} are not " @@ -219,7 +219,7 @@ def _prep_values(self, values=None, kill_inf=True): dtype=values.dtype)) else: try: - values = _ensure_float64(values) + values = ensure_float64(values) except (ValueError, TypeError): raise TypeError("cannot handle this type -> {0}" "".format(values.dtype)) @@ -265,7 +265,7 @@ def _wrap_results(self, results, blocks, obj): """ from pandas import Series, concat - from pandas.core.index import _ensure_index + from pandas.core.index import ensure_index final = [] for result, block in zip(results, blocks): @@ -286,7 +286,7 @@ def _wrap_results(self, results, blocks, obj): if self._selection is not None: - selection = _ensure_index(self._selection) + selection = ensure_index(self._selection) # need to reorder to include original location of # the on column (if its not already there) @@ -857,7 +857,7 @@ def _apply(self, func, name=None, window=None, center=None, def func(arg, window, min_periods=None, closed=None): minp = check_minp(min_periods, window) # ensure we are only rolling on floats - arg = _ensure_float64(arg) + arg = ensure_float64(arg) return cfunc(arg, window, minp, indexi, closed, **kwargs) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 5f97447d29cbc..f69e4a484d177 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -36,7 +36,7 @@ from pandas.core.dtypes.generic import ABCSparseArray, ABCMultiIndex from pandas.core.base import PandasObject import pandas.core.common as com -from pandas.core.index import Index, _ensure_index +from pandas.core.index import Index, ensure_index from pandas.core.config import get_option, set_option from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex @@ -426,7 +426,7 @@ def __init__(self, frame, buf=None, columns=None, col_space=None, self.kwds = kwds if columns is not None: - self.columns = _ensure_index(columns) + self.columns = ensure_index(columns) self.frame = self.frame[self.columns] else: self.columns = frame.columns diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 65df2bffb4abf..486040fa52f35 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -16,7 +16,7 @@ from pandas.compat import (range, lrange, PY3, StringIO, lzip, zip, string_types, map, u) from pandas.core.dtypes.common import ( - is_integer, _ensure_object, + is_integer, ensure_object, is_list_like, is_integer_dtype, is_float, is_dtype_equal, is_object_dtype, is_string_dtype, @@ -25,7 +25,7 @@ from pandas.core.dtypes.missing import isna from pandas.core.dtypes.cast import astype_nansafe from pandas.core.index import (Index, MultiIndex, RangeIndex, - _ensure_index_from_sequences) + ensure_index_from_sequences) from pandas.core.series import Series from pandas.core.frame import DataFrame from pandas.core.arrays import Categorical @@ -1521,7 +1521,7 @@ def _agg_index(self, index, try_parse_dates=True): arrays.append(arr) names = self.index_names - index = _ensure_index_from_sequences(arrays, names) + index = ensure_index_from_sequences(arrays, names) return index @@ -1889,7 +1889,7 @@ def read(self, nrows=None): try_parse_dates=True) arrays.append(values) - index = _ensure_index_from_sequences(arrays) + index = ensure_index_from_sequences(arrays) if self.usecols is not None: names = self._filter_usecols(names) @@ -3005,7 +3005,7 @@ def converter(*date_cols): try: return tools.to_datetime( - _ensure_object(strs), + ensure_object(strs), utc=None, box=False, dayfirst=dayfirst, @@ -3222,7 +3222,7 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None): index = Index([]) else: data = [Series([], dtype=dtype[name]) for name in index_names] - index = _ensure_index_from_sequences(data, names=index_names) + index = ensure_index_from_sequences(data, names=index_names) index_col.sort() for i, n in enumerate(index_col): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 35e244bf2f9eb..f2d6fe01e0573 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -28,9 +28,9 @@ is_timedelta64_dtype, is_datetime64tz_dtype, is_datetime64_dtype, - _ensure_object, - _ensure_int64, - _ensure_platform_int) + ensure_object, + ensure_int64, + ensure_platform_int) from pandas.core.dtypes.missing import array_equivalent from pandas.core import config @@ -44,7 +44,7 @@ from pandas.core.internals import (BlockManager, make_block, _block2d_to_blocknd, _factor_indexer, _block_shape) -from pandas.core.index import _ensure_index +from pandas.core.index import ensure_index from pandas.core.computation.pytables import Expr, maybe_expression from pandas.io.common import _stringify_path @@ -3725,8 +3725,8 @@ def process_filter(field, filt): elif field in axis_values: # we need to filter on this dimension - values = _ensure_index(getattr(obj, field).values) - filt = _ensure_index(filt) + values = ensure_index(getattr(obj, field).values) + filt = ensure_index(filt) # hack until we support reversed dim flags if isinstance(obj, DataFrame): @@ -3892,8 +3892,8 @@ def read(self, where=None, columns=None, **kwargs): if len(unique(key)) == len(key): sorter, _ = algos.groupsort_indexer( - _ensure_int64(key), np.prod(N)) - sorter = _ensure_platform_int(sorter) + ensure_int64(key), np.prod(N)) + sorter = ensure_platform_int(sorter) # create the objs for c in self.values_axes: @@ -3938,7 +3938,7 @@ def read(self, where=None, columns=None, **kwargs): unique_tuples = com._asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) new_index = long_index.take(indexer) new_values = lp.values.take(indexer, axis=0) @@ -4236,7 +4236,7 @@ def read(self, where=None, columns=None, **kwargs): for a in self.values_axes: # we could have a multi-index constructor here - # _ensure_index doesn't recognized our list-of-tuples here + # ensure_index doesn't recognized our list-of-tuples here if info.get('type') == 'MultiIndex': cols = MultiIndex.from_tuples(a.values) else: @@ -4437,18 +4437,18 @@ def is_transposed(self): def _reindex_axis(obj, axis, labels, other=None): ax = obj._get_axis(axis) - labels = _ensure_index(labels) + labels = ensure_index(labels) # try not to reindex even if other is provided # if it equals our current index if other is not None: - other = _ensure_index(other) + other = ensure_index(other) if (other is None or labels.equals(other)) and labels.equals(ax): return obj - labels = _ensure_index(labels.unique()) + labels = ensure_index(labels.unique()) if other is not None: - labels = _ensure_index(other.unique()) & labels + labels = ensure_index(other.unique()) & labels if not labels.equals(ax): slicer = [slice(None, None)] * obj.ndim slicer[axis] = labels @@ -4656,7 +4656,7 @@ def _convert_string_array(data, encoding, errors, itemsize=None): # create the sized dtype if itemsize is None: - ensured = _ensure_object(data.ravel()) + ensured = ensure_object(data.ravel()) itemsize = libwriters.max_len_string_array(ensured) data = np.asarray(data, dtype="S%d" % itemsize) @@ -4688,7 +4688,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None, encoding = _ensure_encoding(encoding) if encoding is not None and len(data): - itemsize = libwriters.max_len_string_array(_ensure_object(data)) + itemsize = libwriters.max_len_string_array(ensure_object(data)) if compat.PY3: dtype = "U{0}".format(itemsize) else: diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 4ce2ed4e36139..efd5f337fdf69 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -28,7 +28,7 @@ zip, BytesIO) from pandas.core.arrays import Categorical from pandas.core.base import StringMixin -from pandas.core.dtypes.common import (is_categorical_dtype, _ensure_object, +from pandas.core.dtypes.common import (is_categorical_dtype, ensure_object, is_datetime64_dtype) from pandas.core.frame import DataFrame from pandas.core.series import Series @@ -1818,7 +1818,7 @@ def _dtype_to_stata_type(dtype, column): if dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? - itemsize = max_len_string_array(_ensure_object(column.values)) + itemsize = max_len_string_array(ensure_object(column.values)) return max(itemsize, 1) elif dtype == np.float64: return 255 @@ -1863,7 +1863,7 @@ def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, if not (inferred_dtype in ('string', 'unicode') or len(column) == 0): raise ValueError('Writing general object arrays is not supported') - itemsize = max_len_string_array(_ensure_object(column.values)) + itemsize = max_len_string_array(ensure_object(column.values)) if itemsize > max_str_len: if dta_version >= 117: return '%9s' @@ -2418,7 +2418,7 @@ def _dtype_to_stata_type_117(dtype, column, force_strl): if dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? - itemsize = max_len_string_array(_ensure_object(column.values)) + itemsize = max_len_string_array(ensure_object(column.values)) itemsize = max(itemsize, 1) if itemsize <= 2045: return itemsize diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index f81767156b255..5f1f6dc5bca87 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -35,8 +35,8 @@ is_bool, is_scalar, is_scipy_sparse, - _ensure_int32, - _ensure_categorical) + ensure_int32, + ensure_categorical) from pandas.util import testing as tm import pandas.util._test_decorators as td @@ -1217,19 +1217,19 @@ def test_is_scipy_sparse(spmatrix): # noqa: F811 def test_ensure_int32(): values = np.arange(10, dtype=np.int32) - result = _ensure_int32(values) + result = ensure_int32(values) assert (result.dtype == np.int32) values = np.arange(10, dtype=np.int64) - result = _ensure_int32(values) + result = ensure_int32(values) assert (result.dtype == np.int32) def test_ensure_categorical(): values = np.arange(10, dtype=np.int32) - result = _ensure_categorical(values) + result = ensure_categorical(values) assert (result.dtype == 'category') values = Categorical(values) - result = _ensure_categorical(values) + result = ensure_categorical(values) tm.assert_categorical_equal(result, values) diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index 9a838341c7d8c..9dcc13c15736f 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -5,7 +5,7 @@ from numpy import nan import numpy as np -from pandas.core.dtypes.common import _ensure_int64 +from pandas.core.dtypes.common import ensure_int64 from pandas import Index, isna from pandas.core.groupby.ops import generate_bins_generic from pandas.util.testing import assert_almost_equal @@ -90,8 +90,8 @@ def _check(dtype): bins = np.array([6, 12, 20]) out = np.zeros((3, 4), dtype) counts = np.zeros(len(out), dtype=np.int64) - labels = _ensure_int64(np.repeat(np.arange(3), - np.diff(np.r_[0, bins]))) + labels = ensure_int64(np.repeat(np.arange(3), + np.diff(np.r_[0, bins]))) func = getattr(groupby, 'group_ohlc_%s' % dtype) func(out, counts, obj[:, None], labels) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 7fccf1f57a886..57b04bfd82528 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -7,7 +7,7 @@ from pandas.util import testing as tm from pandas import Series, DataFrame, Timestamp, MultiIndex, concat, date_range from pandas.core.dtypes.common import ( - _ensure_platform_int, is_timedelta64_dtype) + ensure_platform_int, is_timedelta64_dtype) from pandas.compat import StringIO from pandas._libs import groupby @@ -76,7 +76,7 @@ def test_transform_fast(): grp = df.groupby('id')['val'] values = np.repeat(grp.mean().values, - _ensure_platform_int(grp.count().values)) + ensure_platform_int(grp.count().values)) expected = pd.Series(values, index=df.index, name='val') result = grp.transform(np.mean) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 639e51e9361ab..7b105390db40b 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -21,7 +21,7 @@ DataFrame, Float64Index, Int64Index, UInt64Index, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, isna) -from pandas.core.index import _get_combined_index, _ensure_index_from_sequences +from pandas.core.index import _get_combined_index, ensure_index_from_sequences from pandas.util.testing import assert_almost_equal from pandas.compat.numpy import np_datetime64_compat @@ -2455,7 +2455,7 @@ class TestIndexUtils(object): names=['L1', 'L2'])), ]) def test_ensure_index_from_sequences(self, data, names, expected): - result = _ensure_index_from_sequences(data, names) + result = ensure_index_from_sequences(data, names) tm.assert_index_equal(result, expected)
Also updated numpy_helper to not use things from numpy's deprecated C API. This won't get rid of the warnings since cython still causes them, but it's still nice. Not sure how to lint for this (or if we really want to), will see if google knows.
https://api.github.com/repos/pandas-dev/pandas/pulls/21870
2018-07-12T01:46:58Z
2018-07-17T12:21:04Z
2018-07-17T12:21:04Z
2020-04-05T17:41:20Z
BUG: np array indexer modifed in iloc
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index ed4022d422b4d..e255f1208869e 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -363,8 +363,7 @@ Indexing - ``DataFrame.__getitem__`` now accepts dictionaries and dictionary keys as list-likes of labels, consistently with ``Series.__getitem__`` (:issue:`21294`) - Fixed ``DataFrame[np.nan]`` when columns are non-unique (:issue:`21428`) - Bug when indexing :class:`DatetimeIndex` with nanosecond resolution dates and timezones (:issue:`11679`) - -- +- Bug where indexing with a Numpy array containing negative values would mutate the indexer (:issue:`21867`) Missing ^^^^^^^ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 80396a9149d5a..ec06099e3bbd2 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2596,6 +2596,7 @@ def maybe_convert_indices(indices, n): mask = indices < 0 if mask.any(): + indices = indices.copy() indices[mask] += n mask = (indices >= n) | (indices < 0) diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 49047e1da0996..81397002abd2b 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -126,6 +126,21 @@ def test_iloc_getitem_neg_int(self): typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) + def test_iloc_array_not_mutating_negative_indices(self): + + # GH 21867 + array_with_neg_numbers = np.array([1, 2, -1]) + array_copy = array_with_neg_numbers.copy() + df = pd.DataFrame({ + 'A': [100, 101, 102], + 'B': [103, 104, 105], + 'C': [106, 107, 108]}, + index=[1, 2, 3]) + df.iloc[array_with_neg_numbers] + tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy) + df.iloc[:, array_with_neg_numbers] + tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy) + def test_iloc_getitem_list_int(self): # list of ints
- [ x ] closes #20852 - [ x ] tests passed - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/21867
2018-07-11T21:55:39Z
2018-07-14T14:50:22Z
2018-07-14T14:50:22Z
2018-07-14T14:50:29Z
Replaced PANDAS_DATETIMEUNIT with NPY_DATETIMEUNIT
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index 89753ccf7d773..11e1e6522ef3b 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -21,6 +21,7 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #include <numpy/arrayobject.h> #include <numpy/arrayscalars.h> +#include <numpy/ndarraytypes.h> #include "np_datetime.h" #if PY_MAJOR_VERSION >= 3 @@ -511,21 +512,21 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj, return -1; } -npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, +npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, pandas_datetimestruct *d) { - npy_datetime result = PANDAS_DATETIME_NAT; + npy_datetime result = NPY_DATETIME_NAT; convert_datetimestruct_to_datetime(fr, d, &result); return result; } -void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr, +void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, pandas_datetimestruct *result) { convert_datetime_to_datetimestruct(fr, val, result); } void pandas_timedelta_to_timedeltastruct(npy_timedelta val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_timedeltastruct *result) { convert_timedelta_to_timedeltastruct(fr, val, result); } @@ -537,15 +538,15 @@ void pandas_timedelta_to_timedeltastruct(npy_timedelta val, * * Returns 0 on success, -1 on failure. */ -int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, +int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, const pandas_datetimestruct *dts, npy_datetime *out) { npy_datetime ret; - if (base == PANDAS_FR_Y) { + if (base == NPY_FR_Y) { /* Truncate to the year */ ret = dts->year - 1970; - } else if (base == PANDAS_FR_M) { + } else if (base == NPY_FR_M) { /* Truncate to the month */ ret = 12 * (dts->year - 1970) + (dts->month - 1); } else { @@ -553,7 +554,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, npy_int64 days = get_datetimestruct_days(dts); switch (base) { - case PANDAS_FR_W: + case NPY_FR_W: /* Truncate to weeks */ if (days >= 0) { ret = days / 7; @@ -561,31 +562,31 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, ret = (days - 6) / 7; } break; - case PANDAS_FR_D: + case NPY_FR_D: ret = days; break; - case PANDAS_FR_h: + case NPY_FR_h: ret = days * 24 + dts->hour; break; - case PANDAS_FR_m: + case NPY_FR_m: ret = (days * 24 + dts->hour) * 60 + dts->min; break; - case PANDAS_FR_s: + case NPY_FR_s: ret = ((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec; break; - case PANDAS_FR_ms: + case NPY_FR_ms: ret = (((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * 1000 + dts->us / 1000; break; - case PANDAS_FR_us: + case NPY_FR_us: ret = (((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * 1000000 + dts->us; break; - case PANDAS_FR_ns: + case NPY_FR_ns: ret = ((((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * 1000000 + @@ -593,7 +594,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, 1000 + dts->ps / 1000; break; - case PANDAS_FR_ps: + case NPY_FR_ps: ret = ((((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * 1000000 + @@ -601,7 +602,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, 1000000 + dts->ps; break; - case PANDAS_FR_fs: + case NPY_FR_fs: /* only 2.6 hours */ ret = (((((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * @@ -612,7 +613,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, 1000 + dts->as / 1000; break; - case PANDAS_FR_as: + case NPY_FR_as: /* only 9.2 secs */ ret = (((((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * @@ -640,7 +641,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, /* * Converts a datetime based on the given metadata into a datetimestruct */ -int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, +int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, npy_datetime dt, pandas_datetimestruct *out) { npy_int64 perday; @@ -656,11 +657,11 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, * for negative values. */ switch (base) { - case PANDAS_FR_Y: + case NPY_FR_Y: out->year = 1970 + dt; break; - case PANDAS_FR_M: + case NPY_FR_M: if (dt >= 0) { out->year = 1970 + dt / 12; out->month = dt % 12 + 1; @@ -670,16 +671,16 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, } break; - case PANDAS_FR_W: + case NPY_FR_W: /* A week is 7 days */ set_datetimestruct_days(dt * 7, out); break; - case PANDAS_FR_D: + case NPY_FR_D: set_datetimestruct_days(dt, out); break; - case PANDAS_FR_h: + case NPY_FR_h: perday = 24LL; if (dt >= 0) { @@ -693,7 +694,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->hour = dt; break; - case PANDAS_FR_m: + case NPY_FR_m: perday = 24LL * 60; if (dt >= 0) { @@ -708,7 +709,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->min = dt % 60; break; - case PANDAS_FR_s: + case NPY_FR_s: perday = 24LL * 60 * 60; if (dt >= 0) { @@ -724,7 +725,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->sec = dt % 60; break; - case PANDAS_FR_ms: + case NPY_FR_ms: perday = 24LL * 60 * 60 * 1000; if (dt >= 0) { @@ -741,7 +742,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->us = (dt % 1000LL) * 1000; break; - case PANDAS_FR_us: + case NPY_FR_us: perday = 24LL * 60LL * 60LL * 1000LL * 1000LL; if (dt >= 0) { @@ -758,7 +759,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->us = dt % 1000000LL; break; - case PANDAS_FR_ns: + case NPY_FR_ns: perday = 24LL * 60LL * 60LL * 1000LL * 1000LL * 1000LL; if (dt >= 0) { @@ -776,7 +777,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->ps = (dt % 1000LL) * 1000; break; - case PANDAS_FR_ps: + case NPY_FR_ps: perday = 24LL * 60 * 60 * 1000 * 1000 * 1000 * 1000; if (dt >= 0) { @@ -794,7 +795,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->ps = dt % 1000000LL; break; - case PANDAS_FR_fs: + case NPY_FR_fs: /* entire range is only +- 2.6 hours */ if (dt >= 0) { out->hour = dt / (60 * 60 * 1000000000000000LL); @@ -821,7 +822,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, } break; - case PANDAS_FR_as: + case NPY_FR_as: /* entire range is only +- 9.2 seconds */ if (dt >= 0) { out->sec = (dt / 1000000000000000000LL) % 60; @@ -861,7 +862,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, * * Returns 0 on success, -1 on failure. */ -int convert_timedelta_to_timedeltastruct(PANDAS_DATETIMEUNIT base, +int convert_timedelta_to_timedeltastruct(NPY_DATETIMEUNIT base, npy_timedelta td, pandas_timedeltastruct *out) { npy_int64 frac; @@ -874,7 +875,7 @@ int convert_timedelta_to_timedeltastruct(PANDAS_DATETIMEUNIT base, memset(out, 0, sizeof(pandas_timedeltastruct)); switch (base) { - case PANDAS_FR_ns: + case NPY_FR_ns: // put frac in seconds if (td < 0 && td % (1000LL * 1000LL * 1000LL) != 0) diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index b6c0852bfe764..5644ac036f198 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -19,29 +19,6 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #include <numpy/ndarraytypes.h> -typedef enum { - PANDAS_FR_Y = 0, // Years - PANDAS_FR_M = 1, // Months - PANDAS_FR_W = 2, // Weeks - // Gap where NPY_FR_B was - PANDAS_FR_D = 4, // Days - PANDAS_FR_h = 5, // hours - PANDAS_FR_m = 6, // minutes - PANDAS_FR_s = 7, // seconds - PANDAS_FR_ms = 8, // milliseconds - PANDAS_FR_us = 9, // microseconds - PANDAS_FR_ns = 10, // nanoseconds - PANDAS_FR_ps = 11, // picoseconds - PANDAS_FR_fs = 12, // femtoseconds - PANDAS_FR_as = 13, // attoseconds - PANDAS_FR_GENERIC = 14 // Generic, unbound units, can - // convert to anything -} PANDAS_DATETIMEUNIT; - -#define PANDAS_DATETIME_NUMUNITS 13 - -#define PANDAS_DATETIME_NAT NPY_MIN_INT64 - typedef struct { npy_int64 year; npy_int32 month, day, hour, min, sec, us, ps, as; @@ -61,14 +38,14 @@ extern const pandas_datetimestruct _NS_MAX_DTS; int convert_pydatetime_to_datetimestruct(PyObject *obj, pandas_datetimestruct *out); -npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, +npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, pandas_datetimestruct *d); -void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr, +void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, pandas_datetimestruct *result); void pandas_timedelta_to_timedeltastruct(npy_timedelta val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_timedeltastruct *result); int dayofweek(int y, int m, int d); @@ -103,7 +80,7 @@ add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes); int -convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, +convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, npy_datetime dt, pandas_datetimestruct *out); diff --git a/pandas/_libs/src/datetime/np_datetime_strings.c b/pandas/_libs/src/datetime/np_datetime_strings.c index 2ea69e2ac1636..b1852094c301e 100644 --- a/pandas/_libs/src/datetime/np_datetime_strings.c +++ b/pandas/_libs/src/datetime/np_datetime_strings.c @@ -27,7 +27,8 @@ This file implements string parsing and creation for NumPy datetime. #include <time.h> #include <numpy/arrayobject.h> -#include "numpy/arrayscalars.h" +#include <numpy/arrayscalars.h> +#include <numpy/ndarraytypes.h> #include "np_datetime.h" #include "np_datetime_strings.h" @@ -514,37 +515,36 @@ int parse_iso_8601_datetime(char *str, int len, * Provides a string length to use for converting datetime * objects with the given local and unit settings. */ -int get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base) { +int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { int len = 0; switch (base) { /* Generic units can only be used to represent NaT */ - /*case PANDAS_FR_GENERIC:*/ /* return 4;*/ - case PANDAS_FR_as: + case NPY_FR_as: len += 3; /* "###" */ - case PANDAS_FR_fs: + case NPY_FR_fs: len += 3; /* "###" */ - case PANDAS_FR_ps: + case NPY_FR_ps: len += 3; /* "###" */ - case PANDAS_FR_ns: + case NPY_FR_ns: len += 3; /* "###" */ - case PANDAS_FR_us: + case NPY_FR_us: len += 3; /* "###" */ - case PANDAS_FR_ms: + case NPY_FR_ms: len += 4; /* ".###" */ - case PANDAS_FR_s: + case NPY_FR_s: len += 3; /* ":##" */ - case PANDAS_FR_m: + case NPY_FR_m: len += 3; /* ":##" */ - case PANDAS_FR_h: + case NPY_FR_h: len += 3; /* "T##" */ - case PANDAS_FR_D: - case PANDAS_FR_W: + case NPY_FR_D: + case NPY_FR_W: len += 3; /* "-##" */ - case PANDAS_FR_M: + case NPY_FR_M: len += 3; /* "-##" */ - case PANDAS_FR_Y: + case NPY_FR_Y: len += 21; /* 64-bit year */ break; default: @@ -552,7 +552,7 @@ int get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base) { break; } - if (base >= PANDAS_FR_h) { + if (base >= NPY_FR_h) { if (local) { len += 5; /* "+####" or "-####" */ } else { @@ -581,7 +581,7 @@ int get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base) { * string was too short). */ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, - PANDAS_DATETIMEUNIT base) { + NPY_DATETIMEUNIT base) { char *substr = outstr, sublen = outlen; int tmplen; @@ -591,8 +591,8 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, * TODO: Could print weeks with YYYY-Www format if the week * epoch is a Monday. */ - if (base == PANDAS_FR_W) { - base = PANDAS_FR_D; + if (base == NPY_FR_W) { + base = NPY_FR_D; } /* YEAR */ @@ -614,7 +614,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= tmplen; /* Stop if the unit is years */ - if (base == PANDAS_FR_Y) { + if (base == NPY_FR_Y) { if (sublen > 0) { *substr = '\0'; } @@ -638,7 +638,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is months */ - if (base == PANDAS_FR_M) { + if (base == NPY_FR_M) { if (sublen > 0) { *substr = '\0'; } @@ -662,7 +662,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is days */ - if (base == PANDAS_FR_D) { + if (base == NPY_FR_D) { if (sublen > 0) { *substr = '\0'; } @@ -686,7 +686,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is hours */ - if (base == PANDAS_FR_h) { + if (base == NPY_FR_h) { goto add_time_zone; } @@ -707,7 +707,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is minutes */ - if (base == PANDAS_FR_m) { + if (base == NPY_FR_m) { goto add_time_zone; } @@ -728,7 +728,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is seconds */ - if (base == PANDAS_FR_s) { + if (base == NPY_FR_s) { goto add_time_zone; } @@ -753,7 +753,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 4; /* Stop if the unit is milliseconds */ - if (base == PANDAS_FR_ms) { + if (base == NPY_FR_ms) { goto add_time_zone; } @@ -774,7 +774,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is microseconds */ - if (base == PANDAS_FR_us) { + if (base == NPY_FR_us) { goto add_time_zone; } @@ -795,7 +795,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is nanoseconds */ - if (base == PANDAS_FR_ns) { + if (base == NPY_FR_ns) { goto add_time_zone; } @@ -816,7 +816,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is picoseconds */ - if (base == PANDAS_FR_ps) { + if (base == NPY_FR_ps) { goto add_time_zone; } @@ -837,7 +837,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is femtoseconds */ - if (base == PANDAS_FR_fs) { + if (base == NPY_FR_fs) { goto add_time_zone; } diff --git a/pandas/_libs/src/datetime/np_datetime_strings.h b/pandas/_libs/src/datetime/np_datetime_strings.h index ef7fe200aa58e..ff1d26e5168b5 100644 --- a/pandas/_libs/src/datetime/np_datetime_strings.h +++ b/pandas/_libs/src/datetime/np_datetime_strings.h @@ -60,7 +60,7 @@ parse_iso_8601_datetime(char *str, int len, * objects with the given local and unit settings. */ int -get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base); +get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base); /* * Converts an pandas_datetimestruct to an (almost) ISO 8601 @@ -74,6 +74,6 @@ get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base); */ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, - PANDAS_DATETIMEUNIT base); + NPY_DATETIMEUNIT base); #endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_ diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index cb6f0a220fafe..5011d33d189c2 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -54,7 +54,7 @@ npy_int64 unix_date_from_ymd(int year, int month, int day) { dts.year = year; dts.month = month; dts.day = day; - unix_date = pandas_datetimestruct_to_datetime(PANDAS_FR_D, &dts); + unix_date = pandas_datetimestruct_to_datetime(NPY_FR_D, &dts); return unix_date; } @@ -151,7 +151,7 @@ static npy_int64 DtoB(pandas_datetimestruct *dts, static npy_int64 asfreq_DTtoA(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; ordinal = downsample_daytime(ordinal, af_info); - pandas_datetime_to_datetimestruct(ordinal, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); if (dts.month > af_info->to_end) { return (npy_int64)(dts.year + 1 - 1970); } else { @@ -163,7 +163,7 @@ static int DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year) { pandas_datetimestruct dts; int quarter; - pandas_datetime_to_datetimestruct(ordinal, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); if (af_info->to_end != 12) { dts.month -= af_info->to_end; if (dts.month <= 0) { @@ -192,7 +192,7 @@ static npy_int64 asfreq_DTtoM(npy_int64 ordinal, asfreq_info *af_info) { ordinal = downsample_daytime(ordinal, af_info); - pandas_datetime_to_datetimestruct(ordinal, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); return (npy_int64)((dts.year - 1970) * 12 + dts.month - 1); } @@ -205,7 +205,7 @@ static npy_int64 asfreq_DTtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; pandas_datetimestruct dts; npy_int64 unix_date = downsample_daytime(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); // This usage defines roll_back the opposite way from the others roll_back = 1 - af_info->is_end; @@ -265,7 +265,7 @@ static npy_int64 asfreq_WtoB(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; npy_int64 unix_date = asfreq_WtoDT(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); roll_back = af_info->is_end; return DtoB(&dts, roll_back, unix_date); } @@ -305,7 +305,7 @@ static npy_int64 asfreq_MtoB(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; npy_int64 unix_date = asfreq_MtoDT(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); roll_back = af_info->is_end; return DtoB(&dts, roll_back, unix_date); } @@ -360,7 +360,7 @@ static npy_int64 asfreq_QtoB(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; npy_int64 unix_date = asfreq_QtoDT(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); roll_back = af_info->is_end; return DtoB(&dts, roll_back, unix_date); } @@ -417,7 +417,7 @@ static npy_int64 asfreq_AtoB(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; npy_int64 unix_date = asfreq_AtoDT(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); roll_back = af_info->is_end; return DtoB(&dts, roll_back, unix_date); } diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index e7f334b267461..eaa4eca44c15b 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -43,6 +43,7 @@ Numeric decoder derived from from TCL library #include <math.h> // NOLINT(build/include_order) #include <numpy/arrayobject.h> // NOLINT(build/include_order) #include <numpy/arrayscalars.h> // NOLINT(build/include_order) +#include <numpy/ndarraytypes.h> // NOLINT(build/include_order) #include <numpy/npy_math.h> // NOLINT(build/include_order) #include <stdio.h> // NOLINT(build/include_order) #include <ultrajson.h> // NOLINT(build/include_order) @@ -138,7 +139,7 @@ typedef struct __PyObjectEncoder { TypeContext basicTypeContext; int datetimeIso; - PANDAS_DATETIMEUNIT datetimeUnit; + NPY_DATETIMEUNIT datetimeUnit; // output format style for pandas data types int outputFormat; @@ -444,7 +445,7 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - PANDAS_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; + NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; if (((PyObjectEncoder *)tc->encoder)->datetimeIso) { PRINTMARK(); @@ -482,7 +483,7 @@ static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, PRINTMARK(); pandas_datetime_to_datetimestruct( - obj->obval, (PANDAS_DATETIMEUNIT)obj->obmeta.base, &dts); + obj->obval, (NPY_DATETIMEUNIT)obj->obmeta.base, &dts); return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); } @@ -512,7 +513,7 @@ static void *NpyDatetime64ToJSON(JSOBJ _obj, JSONTypeContext *tc, PRINTMARK(); pandas_datetime_to_datetimestruct((npy_datetime)GET_TC(tc)->longValue, - PANDAS_FR_ns, &dts); + NPY_FR_ns, &dts); return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); } @@ -1864,15 +1865,15 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; switch (base) { - case PANDAS_FR_ns: + case NPY_FR_ns: break; - case PANDAS_FR_us: + case NPY_FR_us: value /= 1000LL; break; - case PANDAS_FR_ms: + case NPY_FR_ms: value /= 1000000LL; break; - case PANDAS_FR_s: + case NPY_FR_s: value /= 1000000000LL; break; } @@ -2358,7 +2359,7 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) { pyEncoder.npyType = -1; pyEncoder.npyValue = NULL; pyEncoder.datetimeIso = 0; - pyEncoder.datetimeUnit = PANDAS_FR_ms; + pyEncoder.datetimeUnit = NPY_FR_ms; pyEncoder.outputFormat = COLUMNS; pyEncoder.defaultHandler = 0; pyEncoder.basicTypeContext.newObj = NULL; @@ -2416,13 +2417,13 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) { if (sdateFormat != NULL) { if (strcmp(sdateFormat, "s") == 0) { - pyEncoder.datetimeUnit = PANDAS_FR_s; + pyEncoder.datetimeUnit = NPY_FR_s; } else if (strcmp(sdateFormat, "ms") == 0) { - pyEncoder.datetimeUnit = PANDAS_FR_ms; + pyEncoder.datetimeUnit = NPY_FR_ms; } else if (strcmp(sdateFormat, "us") == 0) { - pyEncoder.datetimeUnit = PANDAS_FR_us; + pyEncoder.datetimeUnit = NPY_FR_us; } else if (strcmp(sdateFormat, "ns") == 0) { - pyEncoder.datetimeUnit = PANDAS_FR_ns; + pyEncoder.datetimeUnit = NPY_FR_ns; } else { PyErr_Format(PyExc_ValueError, "Invalid value '%s' for option 'date_unit'", diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index a3b7d6c59200c..b948be606645d 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -21,11 +21,10 @@ PyDateTime_IMPORT from np_datetime cimport (check_dts_bounds, pandas_datetimestruct, pandas_datetime_to_datetimestruct, _string_to_dts, - PANDAS_DATETIMEUNIT, PANDAS_FR_ns, npy_datetime, dt64_to_dtstruct, dtstruct_to_dt64, get_datetime64_unit, get_datetime64_value, - pydatetime_to_dt64) + pydatetime_to_dt64, NPY_DATETIMEUNIT, NPY_FR_ns) from np_datetime import OutOfBoundsDatetime from util cimport (is_string_object, @@ -62,13 +61,13 @@ cdef inline int64_t get_datetime64_nanos(object val) except? -1: """ cdef: pandas_datetimestruct dts - PANDAS_DATETIMEUNIT unit + NPY_DATETIMEUNIT unit npy_datetime ival unit = get_datetime64_unit(val) ival = get_datetime64_value(val) - if unit != PANDAS_FR_ns: + if unit != NPY_FR_ns: pandas_datetime_to_datetimestruct(ival, unit, &dts) check_dts_bounds(&dts) ival = dtstruct_to_dt64(&dts) @@ -93,7 +92,7 @@ def ensure_datetime64ns(ndarray arr, copy=True): cdef: Py_ssize_t i, n = arr.size ndarray[int64_t] ivalues, iresult - PANDAS_DATETIMEUNIT unit + NPY_DATETIMEUNIT unit pandas_datetimestruct dts shape = (<object> arr).shape @@ -107,7 +106,7 @@ def ensure_datetime64ns(ndarray arr, copy=True): return result unit = get_datetime64_unit(arr.flat[0]) - if unit == PANDAS_FR_ns: + if unit == NPY_FR_ns: if copy: arr = arr.copy() result = arr diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 1a0baa8271643..7c91c5551dc47 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -11,7 +11,7 @@ cdef extern from "numpy/ndarrayobject.h": cdef extern from "numpy/ndarraytypes.h": ctypedef struct PyArray_DatetimeMetaData: - PANDAS_DATETIMEUNIT base + NPY_DATETIMEUNIT base int64_t num cdef extern from "numpy/arrayscalars.h": @@ -34,24 +34,24 @@ cdef extern from "../src/datetime/np_datetime.h": int64_t days int32_t hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds - ctypedef enum PANDAS_DATETIMEUNIT: - PANDAS_FR_Y - PANDAS_FR_M - PANDAS_FR_W - PANDAS_FR_D - PANDAS_FR_B - PANDAS_FR_h - PANDAS_FR_m - PANDAS_FR_s - PANDAS_FR_ms - PANDAS_FR_us - PANDAS_FR_ns - PANDAS_FR_ps - PANDAS_FR_fs - PANDAS_FR_as + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as void pandas_datetime_to_datetimestruct(npy_datetime val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_datetimestruct *result) nogil @@ -70,7 +70,7 @@ cdef int64_t pydate_to_dt64(date val, pandas_datetimestruct *dts) cdef npy_datetime get_datetime64_value(object obj) nogil cdef npy_timedelta get_timedelta64_value(object obj) nogil -cdef PANDAS_DATETIMEUNIT get_datetime64_unit(object obj) nogil +cdef NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil cdef int _string_to_dts(object val, pandas_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1 diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 7f861a50f03b8..e58ec0702adcc 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -19,16 +19,16 @@ cdef extern from "../src/datetime/np_datetime.h": int cmp_pandas_datetimestruct(pandas_datetimestruct *a, pandas_datetimestruct *b) - npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, + npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, pandas_datetimestruct *d ) nogil void pandas_datetime_to_datetimestruct(npy_datetime val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_datetimestruct *result) nogil void pandas_timedelta_to_timedeltastruct(npy_timedelta val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_timedeltastruct *result ) nogil @@ -59,11 +59,11 @@ cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: return (<PyTimedeltaScalarObject*>obj).obval -cdef inline PANDAS_DATETIMEUNIT get_datetime64_unit(object obj) nogil: +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: """ returns the unit part of the dtype for a numpy datetime64 object. """ - return <PANDAS_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base + return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base # ---------------------------------------------------------------------- # Comparison @@ -127,22 +127,22 @@ cdef inline check_dts_bounds(pandas_datetimestruct *dts): cdef inline int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil: """Convenience function to call pandas_datetimestruct_to_datetime - with the by-far-most-common frequency PANDAS_FR_ns""" - return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, dts) + with the by-far-most-common frequency NPY_FR_ns""" + return pandas_datetimestruct_to_datetime(NPY_FR_ns, dts) cdef inline void dt64_to_dtstruct(int64_t dt64, pandas_datetimestruct* out) nogil: """Convenience function to call pandas_datetime_to_datetimestruct - with the by-far-most-common frequency PANDAS_FR_ns""" - pandas_datetime_to_datetimestruct(dt64, PANDAS_FR_ns, out) + with the by-far-most-common frequency NPY_FR_ns""" + pandas_datetime_to_datetimestruct(dt64, NPY_FR_ns, out) return cdef inline void td64_to_tdstruct(int64_t td64, pandas_timedeltastruct* out) nogil: """Convenience function to call pandas_timedelta_to_timedeltastruct - with the by-far-most-common frequency PANDAS_FR_ns""" - pandas_timedelta_to_timedeltastruct(td64, PANDAS_FR_ns, out) + with the by-far-most-common frequency NPY_FR_ns""" + pandas_timedelta_to_timedeltastruct(td64, NPY_FR_ns, out) return diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 1796a764ae326..0ec5d25beeeb9 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -24,12 +24,11 @@ PyDateTime_IMPORT from np_datetime cimport (pandas_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct, - PANDAS_FR_D, pandas_datetime_to_datetimestruct, - PANDAS_DATETIMEUNIT) + NPY_DATETIMEUNIT, NPY_FR_D) cdef extern from "../src/datetime/np_datetime.h": - int64_t pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, + int64_t pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, pandas_datetimestruct *d ) nogil @@ -188,7 +187,7 @@ cdef int64_t get_period_ordinal(pandas_datetimestruct *dts, int freq) nogil: elif freq == FR_MTH: return (dts.year - 1970) * 12 + dts.month - 1 - unix_date = pandas_datetimestruct_to_datetime(PANDAS_FR_D, dts) + unix_date = pandas_datetimestruct_to_datetime(NPY_FR_D, dts) if freq >= FR_SEC: seconds = unix_date * 86400 + dts.hour * 3600 + dts.min * 60 + dts.sec @@ -315,7 +314,7 @@ cdef void date_info_from_days_and_time(pandas_datetimestruct *dts, # abstime >= 0.0 and abstime <= 86400 # Calculate the date - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, dts) + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, dts) # Calculate the time inttime = <int>abstime
progress towards #21852 - [X] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Working on my C-fu as well. There may be a better way to do this (ex: even adding NPY_DATETIMEUNIT to Cython/includes) but figured I'd offer this up for review @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/21863
2018-07-11T17:47:47Z
2018-07-12T10:02:51Z
2018-07-12T10:02:51Z
2018-07-12T21:18:55Z
CLN: miscellaneous cleanups / fixes
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 034a56b2ac0cb..00379c7e9d511 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -301,6 +301,7 @@ Datetimelike ^^^^^^^^^^^^ - Fixed bug where two :class:`DateOffset` objects with different ``normalize`` attributes could evaluate as equal (:issue:`21404`) +- Fixed bug where :meth:`Timestamp.resolution` incorrectly returned 1-microsecond ``timedelta`` instead of 1-nanosecond :class:`Timedelta` (:issue:`21336`,:issue:`21365`) Timedelta ^^^^^^^^^ @@ -369,6 +370,7 @@ Missing ^^^^^^^ - Bug in :func:`DataFrame.fillna` where a ``ValueError`` would raise when one column contained a ``datetime64[ns, tz]`` dtype (:issue:`15522`) +- Bug in :func:`Series.hasnans` that could be incorrectly cached and return incorrect answers if null elements are introduced after an initial call (:issue:`19700`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 711db7cc8fbe2..864950ff03eae 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -407,6 +407,15 @@ cdef class _Timestamp(datetime): def asm8(self): return np.datetime64(self.value, 'ns') + @property + def resolution(self): + """ + Return resolution describing the smallest difference between two + times that can be represented by Timestamp object_state + """ + # GH#21336, GH#21365 + return Timedelta(nanoseconds=1) + def timestamp(self): """Return POSIX timestamp as float.""" # py27 compat, see GH#17329 diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index d747e69d1ff39..a0456630c9a0f 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -278,10 +278,15 @@ def is_list_like(obj): False >>> is_list_like(1) False + >>> is_list_like(np.array([2])) + True + >>> is_list_like(np.array(2))) + False """ return (isinstance(obj, Iterable) and - not isinstance(obj, string_and_binary_types)) + not isinstance(obj, string_and_binary_types) and + not (isinstance(obj, np.ndarray) and obj.ndim == 0)) def is_array_like(obj): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4e6ddf64145a8..6380944338010 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7679,6 +7679,9 @@ def convert(v): try: if is_list_like(values[0]) or hasattr(values[0], 'len'): values = np.array([convert(v) for v in values]) + elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: + # GH#21861 + values = np.array([convert(v) for v in values]) else: values = convert(values) except: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b8a89ac26c9d9..217bb3e7d1734 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -93,7 +93,7 @@ def _dt_index_cmp(opname, cls): def wrapper(self, other): func = getattr(super(DatetimeIndex, self), opname) - if isinstance(other, (datetime, compat.string_types)): + if isinstance(other, (datetime, np.datetime64, compat.string_types)): if isinstance(other, datetime): # GH#18435 strings get a pass from tzawareness compat self._assert_tzawareness_compat(other) @@ -105,8 +105,7 @@ def wrapper(self, other): else: if isinstance(other, list): other = DatetimeIndex(other) - elif not isinstance(other, (np.datetime64, np.ndarray, - Index, ABCSeries)): + elif not isinstance(other, (np.ndarray, Index, ABCSeries)): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. if opname == '__eq__': diff --git a/pandas/core/series.py b/pandas/core/series.py index a63c4be98f738..0bdb9d9cc23a6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -166,6 +166,10 @@ class Series(base.IndexOpsMixin, generic.NDFrame): ['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value', 'from_csv', 'valid']) + # Override cache_readonly bc Series is mutable + hasnans = property(base.IndexOpsMixin.hasnans.func, + doc=base.IndexOpsMixin.hasnans.__doc__) + def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False): diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 29618fb4dec52..ed2659973cc6a 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -85,7 +85,7 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise'): elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, unit=unit, box=box, errors=errors, name=arg.name) - elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 0: + elif isinstance(arg, np.ndarray) and arg.ndim == 0: # extract array scalar and process below arg = arg.item() elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1: diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 65527ac1b278f..f81767156b255 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -67,13 +67,14 @@ def __getitem__(self): [ [], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]), - Series([]), Series(['a']).str]) + Series([]), Series(['a']).str, + np.array([2])]) def test_is_list_like_passes(ll): assert inference.is_list_like(ll) @pytest.mark.parametrize( - "ll", [1, '2', object(), str]) + "ll", [1, '2', object(), str, np.array(2)]) def test_is_list_like_fails(ll): assert not inference.is_list_like(ll) diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 5272059163a07..4172bfd41b9db 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -172,6 +172,11 @@ def test_woy_boundary(self): 2005, 1, 1), (2005, 1, 2)]]) assert (result == [52, 52, 53, 53]).all() + def test_resolution(self): + # GH#21336, GH#21365 + dt = Timestamp('2100-01-01 00:00:00') + assert dt.resolution == Timedelta(nanoseconds=1) + class TestTimestampConstructors(object): diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py index 79e23459ac992..506e7e14ffc4f 100644 --- a/pandas/tests/series/test_internals.py +++ b/pandas/tests/series/test_internals.py @@ -11,6 +11,7 @@ from pandas import Series from pandas.core.indexes.datetimes import Timestamp import pandas._libs.lib as lib +import pandas as pd from pandas.util.testing import assert_series_equal import pandas.util.testing as tm @@ -309,3 +310,16 @@ def test_convert_preserve_all_bool(self): r = s._convert(datetime=True, numeric=True) e = Series([False, True, False, False], dtype=bool) tm.assert_series_equal(r, e) + + +def test_hasnans_unchached_for_series(): + # GH#19700 + idx = pd.Index([0, 1]) + assert not idx.hasnans + assert 'hasnans' in idx._cache + ser = idx.to_series() + assert not ser.hasnans + assert not hasattr(ser, '_cache') + ser.iloc[-1] = np.nan + assert ser.hasnans + assert pd.Series.hasnans.__doc__ == pd.Index.hasnans.__doc__
#21365 made good progress on #21336 so I copied the test from there and referenced it in the whatsnew. closes #19700 closes #19011 closes #21336 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21861
2018-07-11T15:46:10Z
2018-07-12T00:13:56Z
2018-07-12T00:13:56Z
2018-07-12T00:44:21Z
[CLN] resolve circular Period dependency, prepare setup.py
diff --git a/pandas/_libs/__init__.py b/pandas/_libs/__init__.py index ad72980105c4f..b02c423b79f43 100644 --- a/pandas/_libs/__init__.py +++ b/pandas/_libs/__init__.py @@ -1,9 +1,5 @@ # -*- coding: utf-8 -*- # flake8: noqa -from .tslibs import iNaT, NaT, Timestamp, Timedelta, OutOfBoundsDatetime - -# TODO -# period is directly dependent on tslib and imports python -# modules, so exposing Period as an alias is currently not possible -# from period import Period +from .tslibs import ( + iNaT, NaT, Timestamp, Timedelta, OutOfBoundsDatetime, Period) diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 22307f70ebe52..c7765a2c2b89c 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -4,5 +4,6 @@ from .conversion import normalize_date, localize_pydatetime, tz_convert_single from .nattype import NaT, iNaT from .np_datetime import OutOfBoundsDatetime +from .period import Period, IncompatibleFrequency from .timestamps import Timestamp from .timedeltas import delta_to_nanoseconds, ints_to_pytimedelta, Timedelta diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index a0be630aade9d..a53d794b48cfa 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -28,8 +28,6 @@ from timestamps import Timestamp from pandas._libs.properties import cache_readonly -from pandas.core.algorithms import unique # TODO: Avoid this non-cython import - # ---------------------------------------------------------------------- # Constants @@ -574,6 +572,10 @@ cdef class _FrequencyInferer(object): if len(self.ydiffs) > 1: return None + # lazy import to prevent circularity + # TODO: Avoid non-cython dependency + from pandas.core.algorithms import unique + if len(unique(self.fields['M'])) > 1: return None @@ -618,6 +620,10 @@ cdef class _FrequencyInferer(object): # if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all(): # return None + # lazy import to prevent circularity + # TODO: Avoid non-cython dependency + from pandas.core.algorithms import unique + weekdays = unique(self.index.weekday) if len(weekdays) > 1: return None diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index 387a63f61179d..fb9355dfed645 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -24,6 +24,8 @@ def test_namespace(): api = ['NaT', 'iNaT', 'OutOfBoundsDatetime', + 'Period', + 'IncompatibleFrequency', 'Timedelta', 'Timestamp', 'delta_to_nanoseconds', diff --git a/setup.py b/setup.py index 8018d71b74655..4910fcf292ca6 100755 --- a/setup.py +++ b/setup.py @@ -24,23 +24,6 @@ def is_platform_windows(): return sys.platform == 'win32' or sys.platform == 'cygwin' -def is_platform_linux(): - return sys.platform == 'linux2' - - -def is_platform_mac(): - return sys.platform == 'darwin' - - -min_cython_ver = '0.28.2' -try: - import Cython - ver = Cython.__version__ - _CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver) -except ImportError: - _CYTHON_INSTALLED = False - - min_numpy_ver = '1.9.0' setuptools_kwargs = { 'install_requires': [ @@ -53,24 +36,29 @@ def is_platform_mac(): } +min_cython_ver = '0.28.2' +try: + import Cython + ver = Cython.__version__ + _CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver) +except ImportError: + _CYTHON_INSTALLED = False + +# The import of Extension must be after the import of Cython, otherwise +# we do not get the appropriately patched class. +# See https://cython.readthedocs.io/en/latest/src/reference/compilation.html from distutils.extension import Extension # noqa:E402 from distutils.command.build import build # noqa:E402 -from distutils.command.build_ext import build_ext as _build_ext # noqa:E402 try: if not _CYTHON_INSTALLED: raise ImportError('No supported version of Cython installed.') - try: - from Cython.Distutils.old_build_ext import old_build_ext as _build_ext # noqa:F811,E501 - except ImportError: - # Pre 0.25 - from Cython.Distutils import build_ext as _build_ext + from Cython.Distutils.old_build_ext import old_build_ext as _build_ext cython = True except ImportError: + from distutils.command.build_ext import build_ext as _build_ext cython = False - - -if cython: +else: try: try: from Cython import Tempita as tempita @@ -103,27 +91,30 @@ def is_platform_mac(): class build_ext(_build_ext): - def build_extensions(self): + @classmethod + def render_templates(cls, pxifiles): + for pxifile in pxifiles: + # build pxifiles first, template extension must be .pxi.in + assert pxifile.endswith('.pxi.in') + outfile = pxifile[:-3] - # if builing from c files, don't need to - # generate template output - if cython: - for pxifile in _pxifiles: - # build pxifiles first, template extension must be .pxi.in - assert pxifile.endswith('.pxi.in') - outfile = pxifile[:-3] - - if (os.path.exists(outfile) and - os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime): - # if .pxi.in is not updated, no need to output .pxi - continue + if (os.path.exists(outfile) and + os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime): + # if .pxi.in is not updated, no need to output .pxi + continue - with open(pxifile, "r") as f: - tmpl = f.read() - pyxcontent = tempita.sub(tmpl) + with open(pxifile, "r") as f: + tmpl = f.read() + pyxcontent = tempita.sub(tmpl) - with open(outfile, "w") as f: - f.write(pyxcontent) + with open(outfile, "w") as f: + f.write(pyxcontent) + + def build_extensions(self): + # if building from c files, don't need to + # generate template output + if cython: + self.render_templates(_pxifiles) numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') @@ -360,7 +351,6 @@ def run(self): class CheckingBuildExt(build_ext): """ Subclass build_ext to get clearer report if Cython is necessary. - """ def check_cython_extensions(self, extensions): @@ -379,9 +369,11 @@ def build_extensions(self): class CythonCommand(build_ext): - """Custom distutils command subclassed from Cython.Distutils.build_ext + """ + Custom distutils command subclassed from Cython.Distutils.build_ext to compile pyx->c, and stop there. All this does is override the - C-compile method build_extension() with a no-op.""" + C-compile method build_extension() with a no-op. + """ def build_extension(self, ext): pass @@ -445,7 +437,6 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): lib_depends.append('pandas/_libs/src/util.pxd') else: lib_depends = [] - plib_depends = [] common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src'] @@ -471,8 +462,6 @@ def pxd(name): tseries_depends = np_datetime_headers + ['pandas/_libs/tslibs/np_datetime.pxd'] -# some linux distros require it -libraries = ['m'] if not is_platform_windows() else [] ext_data = { '_libs.algos': {
For a long time there has been a comment in `_libs.__init__` saying it would be nice to import `Period` directly but that is not possible due to circular imports. This resolves that issue. Also does some cleanup in setup.py, motivated by the goals of a) using `cythonize` and b) implementing test coverage for cython files. I've gotten those working locally, but they involve big diffs, so this gets some of the easy stuff out of the way.
https://api.github.com/repos/pandas-dev/pandas/pulls/21854
2018-07-11T03:17:59Z
2018-07-12T10:10:14Z
2018-07-12T10:10:14Z
2020-04-05T17:41:56Z
BUG: datetime rolling min/max segfaults when closed=left (#21704)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 034a56b2ac0cb..798e414b3e60c 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -398,6 +398,9 @@ Groupby/Resample/Rolling - - +- Multiple bugs in :func:`pandas.core.Rolling.min` with ``closed='left'` and a + datetime-like index leading to incorrect results and also segfault. (:issue:`21704`) + Sparse ^^^^^^ diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 9e704a9bd8d3f..bd6cd476595f3 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1218,141 +1218,188 @@ cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp, Moving min/max of 1d array of any numeric type along axis=0 ignoring NaNs. """ - cdef: - numeric ai - bint is_variable, should_replace - int64_t N, i, removed, window_i - Py_ssize_t nobs = 0 - deque Q[int64_t] ndarray[int64_t] starti, endi - ndarray[numeric, ndim=1] output - cdef: - int64_t* death - numeric* ring - numeric* minvalue - numeric* end - numeric* last - - cdef: - cdef numeric r + int64_t N + bint is_variable starti, endi, N, win, minp, is_variable = get_window_indexer( input, win, minp, index, closed) - output = np.empty(N, dtype=input.dtype) + if is_variable: + return _roll_min_max_variable(input, starti, endi, N, win, minp, + is_max) + else: + return _roll_min_max_fixed(input, starti, endi, N, win, minp, is_max) + +cdef _roll_min_max_variable(ndarray[numeric] input, + ndarray[int64_t] starti, + ndarray[int64_t] endi, + int64_t N, + int64_t win, + int64_t minp, + bint is_max): + cdef: + numeric ai + int64_t i, close_offset, curr_win_size + Py_ssize_t nobs = 0 + deque Q[int64_t] # min/max always the front + deque W[int64_t] # track the whole window for nobs compute + ndarray[double_t, ndim=1] output + + output = np.empty(N, dtype=float) Q = deque[int64_t]() + W = deque[int64_t]() - if is_variable: + with nogil: - with nogil: + # This is using a modified version of the C++ code in this + # SO post: http://bit.ly/2nOoHlY + # The original impl didn't deal with variable window sizes + # So the code was optimized for that - # This is using a modified version of the C++ code in this - # SO post: http://bit.ly/2nOoHlY - # The original impl didn't deal with variable window sizes - # So the code was optimized for that + for i from starti[0] <= i < endi[0]: + ai = init_mm(input[i], &nobs, is_max) - for i from starti[0] <= i < endi[0]: - ai = init_mm(input[i], &nobs, is_max) + # Discard previous entries if we find new min or max + if is_max: + while not Q.empty() and ((ai >= input[Q.back()]) or + (input[Q.back()] != input[Q.back()])): + Q.pop_back() + else: + while not Q.empty() and ((ai <= input[Q.back()]) or + (input[Q.back()] != input[Q.back()])): + Q.pop_back() + Q.push_back(i) + W.push_back(i) + + # if right is open then the first window is empty + close_offset = 0 if endi[0] > starti[0] else 1 + + for i in range(endi[0], endi[N-1]): + if not Q.empty(): + output[i-1+close_offset] = calc_mm( + minp, nobs, input[Q.front()]) + else: + output[i-1+close_offset] = NaN - if is_max: - while not Q.empty() and ai >= input[Q.back()]: - Q.pop_back() - else: - while not Q.empty() and ai <= input[Q.back()]: - Q.pop_back() - Q.push_back(i) + ai = init_mm(input[i], &nobs, is_max) + + # Discard previous entries if we find new min or max + if is_max: + while not Q.empty() and ((ai >= input[Q.back()]) or + (input[Q.back()] != input[Q.back()])): + Q.pop_back() + else: + while not Q.empty() and ((ai <= input[Q.back()]) or + (input[Q.back()] != input[Q.back()])): + Q.pop_back() - for i from endi[0] <= i < N: - output[i-1] = calc_mm(minp, nobs, input[Q.front()]) + # Maintain window/nobs retention + curr_win_size = endi[i + close_offset] - starti[i + close_offset] + while not Q.empty() and Q.front() <= i - curr_win_size: + Q.pop_front() + while not W.empty() and W.front() <= i - curr_win_size: + remove_mm(input[W.front()], &nobs) + W.pop_front() - ai = init_mm(input[i], &nobs, is_max) + Q.push_back(i) + W.push_back(i) - if is_max: - while not Q.empty() and ai >= input[Q.back()]: - Q.pop_back() - else: - while not Q.empty() and ai <= input[Q.back()]: - Q.pop_back() + output[N-1] = calc_mm(minp, nobs, input[Q.front()]) - while not Q.empty() and Q.front() <= i - (endi[i] - starti[i]): - Q.pop_front() + return output - Q.push_back(i) - output[N-1] = calc_mm(minp, nobs, input[Q.front()]) +cdef _roll_min_max_fixed(ndarray[numeric] input, + ndarray[int64_t] starti, + ndarray[int64_t] endi, + int64_t N, + int64_t win, + int64_t minp, + bint is_max): + cdef: + numeric ai + bint should_replace + int64_t i, removed, window_i, + Py_ssize_t nobs = 0 + int64_t* death + numeric* ring + numeric* minvalue + numeric* end + numeric* last + ndarray[double_t, ndim=1] output - else: - # setup the rings of death! - ring = <numeric *>malloc(win * sizeof(numeric)) - death = <int64_t *>malloc(win * sizeof(int64_t)) - - end = ring + win - last = ring - minvalue = ring - ai = input[0] - minvalue[0] = init_mm(input[0], &nobs, is_max) - death[0] = win - nobs = 0 + output = np.empty(N, dtype=float) + # setup the rings of death! + ring = <numeric *>malloc(win * sizeof(numeric)) + death = <int64_t *>malloc(win * sizeof(int64_t)) + + end = ring + win + last = ring + minvalue = ring + ai = input[0] + minvalue[0] = init_mm(input[0], &nobs, is_max) + death[0] = win + nobs = 0 - with nogil: + with nogil: - for i in range(N): - ai = init_mm(input[i], &nobs, is_max) + for i in range(N): + ai = init_mm(input[i], &nobs, is_max) - if i >= win: - remove_mm(input[i - win], &nobs) + if i >= win: + remove_mm(input[i - win], &nobs) - if death[minvalue - ring] == i: - minvalue = minvalue + 1 - if minvalue >= end: - minvalue = ring + if death[minvalue - ring] == i: + minvalue = minvalue + 1 + if minvalue >= end: + minvalue = ring - if is_max: - should_replace = ai >= minvalue[0] - else: - should_replace = ai <= minvalue[0] - if should_replace: + if is_max: + should_replace = ai >= minvalue[0] + else: + should_replace = ai <= minvalue[0] + if should_replace: - minvalue[0] = ai - death[minvalue - ring] = i + win - last = minvalue + minvalue[0] = ai + death[minvalue - ring] = i + win + last = minvalue - else: + else: + if is_max: + should_replace = last[0] <= ai + else: + should_replace = last[0] >= ai + while should_replace: + if last == ring: + last = end + last -= 1 if is_max: should_replace = last[0] <= ai else: should_replace = last[0] >= ai - while should_replace: - if last == ring: - last = end - last -= 1 - if is_max: - should_replace = last[0] <= ai - else: - should_replace = last[0] >= ai - last += 1 - if last == end: - last = ring - last[0] = ai - death[last - ring] = i + win + last += 1 + if last == end: + last = ring + last[0] = ai + death[last - ring] = i + win - output[i] = calc_mm(minp, nobs, minvalue[0]) + output[i] = calc_mm(minp, nobs, minvalue[0]) - for i in range(minp - 1): - if numeric in cython.floating: - output[i] = NaN - else: - output[i] = 0 + for i in range(minp - 1): + if numeric in cython.floating: + output[i] = NaN + else: + output[i] = 0 - free(ring) - free(death) + free(ring) + free(death) - # print("output: {0}".format(output)) return output diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 78d1fa84cc5db..14966177978f4 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -464,6 +464,60 @@ def test_closed(self): with pytest.raises(ValueError): df.rolling(window=3, closed='neither') + @pytest.mark.parametrize("input_dtype", ['int', 'float']) + @pytest.mark.parametrize("func,closed,expected", [ + ('min', 'right', [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]), + ('min', 'both', [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]), + ('min', 'neither', [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]), + ('min', 'left', [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]), + ('max', 'right', [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + ('max', 'both', [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + ('max', 'neither', [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]), + ('max', 'left', [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + ]) + def test_closed_min_max_datetime(self, input_dtype, + func, closed, + expected): + # see gh-21704 + ser = pd.Series(data=np.arange(10).astype(input_dtype), + index=pd.date_range('2000', periods=10)) + + result = getattr(ser.rolling('3D', closed=closed), func)() + expected = pd.Series(expected, index=ser.index) + tm.assert_series_equal(result, expected) + + def test_closed_uneven(self): + # see gh-21704 + ser = pd.Series(data=np.arange(10), + index=pd.date_range('2000', periods=10)) + + # uneven + ser = ser.drop(index=ser.index[[1, 5]]) + result = ser.rolling('3D', closed='left').min() + expected = pd.Series([np.nan, 0, 0, 2, 3, 4, 6, 6], + index=ser.index) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("func,closed,expected", [ + ('min', 'right', [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), + ('min', 'both', [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]), + ('min', 'neither', [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), + ('min', 'left', [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]), + ('max', 'right', [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]), + ('max', 'both', [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]), + ('max', 'neither', [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]), + ('max', 'left', [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]) + ]) + def test_closed_min_max_minp(self, func, closed, expected): + # see gh-21704 + ser = pd.Series(data=np.arange(10), + index=pd.date_range('2000', periods=10)) + ser[ser.index[-3:]] = np.nan + result = getattr(ser.rolling('3D', min_periods=2, closed=closed), + func)() + expected = pd.Series(expected, index=ser.index) + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('roller', ['1s', 1]) def tests_empty_df_rolling(self, roller): # GH 15819 Verifies that datetime and integer rolling windows can be
User reported that `df.rolling(to_offset('3D'), closed='left').max()` segfaults when df has a datetime index. The bug was in PR #19549. In that PR, in https://github.com/pandas-dev/pandas/blame/master/pandas/_libs/window.pyx#L1268 `i` is initialized to `endi[0]`, which is 0 when `closed=left`. So in the next line when it tries to set `output[i-1]` it goes out of bounds. In addition, there are 2 more bugs in the `roll_min_max` code. The second bug is that for variable size windows, the `nobs` is never updated when elements leave the window. The third bug is at the end of the fixed window where all output elements up to `minp` are initialized to 0 if the input is not float. This PR fixes all three of the aforementioned bugs, at the cost of casting the output array to floating point even if the input is integer. This is less than ideal if the output has no NaNs, but is still consistent with roll_sum behavior. - [x] closes #21704 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21853
2018-07-11T03:16:01Z
2018-07-20T15:58:35Z
2018-07-20T15:58:35Z
2018-09-04T23:49:30Z
Move constructor helpers to EAMixins
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 0d1c5241c5a93..d7dfa73c53d8d 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from datetime import timedelta +from datetime import datetime, timedelta import warnings import numpy as np @@ -22,6 +22,8 @@ _ensure_int64) from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.algorithms import checked_add_with_arr + from pandas.tseries.frequencies import to_offset, DateOffset from pandas.tseries.offsets import Tick @@ -281,6 +283,39 @@ def _add_offset(self, offset): return type(self)(result, freq='infer') + def _sub_datelike(self, other): + # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]] + if isinstance(other, (DatetimeArrayMixin, np.ndarray)): + if isinstance(other, np.ndarray): + # if other is an ndarray, we assume it is datetime64-dtype + other = type(self)(other) + if not self._has_same_tz(other): + # require tz compat + raise TypeError("{cls} subtraction must have the same " + "timezones or no timezones" + .format(cls=type(self).__name__)) + result = self._sub_datelike_dti(other) + elif isinstance(other, (datetime, np.datetime64)): + assert other is not NaT + other = Timestamp(other) + if other is NaT: + return self - NaT + # require tz compat + elif not self._has_same_tz(other): + raise TypeError("Timestamp subtraction must have the same " + "timezones or no timezones") + else: + i8 = self.asi8 + result = checked_add_with_arr(i8, -other.value, + arr_mask=self._isnan) + result = self._maybe_mask_results(result, + fill_value=iNaT) + else: + raise TypeError("cannot subtract {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) + return result.view('timedelta64[ns]') + def _add_delta(self, delta): """ Add a timedelta-like, DateOffset, or TimedeltaIndex-like object @@ -517,6 +552,47 @@ def to_pydatetime(self): """ return tslib.ints_to_pydatetime(self.asi8, tz=self.tz) + def normalize(self): + """ + Convert times to midnight. + + The time component of the date-time is converted to midnight i.e. + 00:00:00. This is useful in cases, when the time does not matter. + Length is unaltered. The timezones are unaffected. + + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on Datetime Array/Index. + + Returns + ------- + DatetimeArray, DatetimeIndex or Series + The same type as the original data. Series will have the same + name and index. DatetimeIndex will have the same name. + + See Also + -------- + floor : Floor the datetimes to the specified freq. + ceil : Ceil the datetimes to the specified freq. + round : Round the datetimes to the specified freq. + + Examples + -------- + >>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H', + ... periods=3, tz='Asia/Calcutta') + >>> idx + DatetimeIndex(['2014-08-01 10:00:00+05:30', + '2014-08-01 11:00:00+05:30', + '2014-08-01 12:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq='H') + >>> idx.normalize() + DatetimeIndex(['2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq=None) + """ + new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) + return type(self)(new_values, freq='infer').tz_localize(self.tz) + # ----------------------------------------------------------------- # Properties - Vectorized Timestamp Properties/Methods diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 35baa3262d3dd..000775361061e 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -8,7 +8,7 @@ from pandas._libs.tslib import NaT, iNaT from pandas._libs.tslibs.period import ( Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX, - get_period_field_arr, period_asfreq_arr) + get_period_field_arr, period_asfreq_arr, _quarter_to_myear) from pandas._libs.tslibs import period as libperiod from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas._libs.tslibs.fields import isleapyear_arr @@ -19,6 +19,9 @@ from pandas.core.dtypes.common import ( is_integer_dtype, is_float_dtype, is_period_dtype) from pandas.core.dtypes.dtypes import PeriodDtype +from pandas.core.dtypes.generic import ABCSeries + +import pandas.core.common as com from pandas.tseries import frequencies from pandas.tseries.offsets import Tick, DateOffset @@ -157,6 +160,25 @@ def _from_ordinals(cls, values, freq=None): result._freq = Period._maybe_convert_freq(freq) return result + @classmethod + def _generate_range(cls, start, end, periods, freq, fields): + if freq is not None: + freq = Period._maybe_convert_freq(freq) + + field_count = len(fields) + if com._count_not_none(start, end) > 0: + if field_count > 0: + raise ValueError('Can either instantiate from fields ' + 'or endpoints, but not both') + subarr, freq = _get_ordinal_range(start, end, periods, freq) + elif field_count > 0: + subarr, freq = _range_from_fields(freq=freq, **fields) + else: + raise ValueError('Not enough parameters to construct ' + 'Period range') + + return subarr, freq + # -------------------------------------------------------------------- # Vectorized analogues of Period properties @@ -371,3 +393,102 @@ def _add_comparison_methods(cls): PeriodArrayMixin._add_comparison_methods() + + +# ------------------------------------------------------------------- +# Constructor Helpers + +def _get_ordinal_range(start, end, periods, freq, mult=1): + if com._count_not_none(start, end, periods) != 2: + raise ValueError('Of the three parameters: start, end, and periods, ' + 'exactly two must be specified') + + if freq is not None: + _, mult = frequencies.get_freq_code(freq) + + if start is not None: + start = Period(start, freq) + if end is not None: + end = Period(end, freq) + + is_start_per = isinstance(start, Period) + is_end_per = isinstance(end, Period) + + if is_start_per and is_end_per and start.freq != end.freq: + raise ValueError('start and end must have same freq') + if (start is NaT or end is NaT): + raise ValueError('start and end must not be NaT') + + if freq is None: + if is_start_per: + freq = start.freq + elif is_end_per: + freq = end.freq + else: # pragma: no cover + raise ValueError('Could not infer freq from start/end') + + if periods is not None: + periods = periods * mult + if start is None: + data = np.arange(end.ordinal - periods + mult, + end.ordinal + 1, mult, + dtype=np.int64) + else: + data = np.arange(start.ordinal, start.ordinal + periods, mult, + dtype=np.int64) + else: + data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) + + return data, freq + + +def _range_from_fields(year=None, month=None, quarter=None, day=None, + hour=None, minute=None, second=None, freq=None): + if hour is None: + hour = 0 + if minute is None: + minute = 0 + if second is None: + second = 0 + if day is None: + day = 1 + + ordinals = [] + + if quarter is not None: + if freq is None: + freq = 'Q' + base = frequencies.FreqGroup.FR_QTR + else: + base, mult = frequencies.get_freq_code(freq) + if base != frequencies.FreqGroup.FR_QTR: + raise AssertionError("base must equal FR_QTR") + + year, quarter = _make_field_arrays(year, quarter) + for y, q in compat.zip(year, quarter): + y, m = _quarter_to_myear(y, q, freq) + val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) + ordinals.append(val) + else: + base, mult = frequencies.get_freq_code(freq) + arrays = _make_field_arrays(year, month, day, hour, minute, second) + for y, mth, d, h, mn, s in compat.zip(*arrays): + ordinals.append(libperiod.period_ordinal( + y, mth, d, h, mn, s, 0, 0, base)) + + return np.array(ordinals, dtype=np.int64), freq + + +def _make_field_arrays(*fields): + length = None + for x in fields: + if isinstance(x, (list, np.ndarray, ABCSeries)): + if length is not None and len(x) != length: + raise ValueError('Mismatched Period array lengths') + elif length is None: + length = len(x) + + arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) + else np.repeat(x, length) for x in fields] + + return arrays diff --git a/pandas/core/arrays/timedelta.py b/pandas/core/arrays/timedelta.py index f093cadec5a38..dbd481aae4f37 100644 --- a/pandas/core/arrays/timedelta.py +++ b/pandas/core/arrays/timedelta.py @@ -3,7 +3,7 @@ import numpy as np -from pandas._libs import tslibs +from pandas._libs import tslibs, lib from pandas._libs.tslibs import Timedelta, NaT from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64 @@ -15,6 +15,8 @@ from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna +import pandas.core.common as com + from pandas.tseries.offsets import Tick, DateOffset from pandas.tseries.frequencies import to_offset @@ -70,11 +72,27 @@ def _simple_new(cls, values, freq=None, **kwargs): result._freq = freq return result - def __new__(cls, values, freq=None): + def __new__(cls, values, freq=None, start=None, end=None, periods=None, + closed=None): if (freq is not None and not isinstance(freq, DateOffset) and freq != 'infer'): freq = to_offset(freq) + if periods is not None: + if lib.is_float(periods): + periods = int(periods) + elif not lib.is_integer(periods): + raise TypeError('`periods` must be a number, got {periods}' + .format(periods=periods)) + + if values is None: + if freq is None and com._any_none(periods, start, end): + raise ValueError('Must provide freq argument if no data is ' + 'supplied') + else: + return cls._generate(start, end, periods, freq, + closed=closed) + result = cls._simple_new(values, freq=freq) if freq == 'infer': inferred = result.inferred_freq @@ -83,6 +101,52 @@ def __new__(cls, values, freq=None): return result + @classmethod + def _generate(cls, start, end, periods, freq, closed=None, **kwargs): + # **kwargs are for compat with TimedeltaIndex, which includes `name` + if com._count_not_none(start, end, periods, freq) != 3: + raise ValueError('Of the four parameters: start, end, periods, ' + 'and freq, exactly three must be specified') + + if start is not None: + start = Timedelta(start) + + if end is not None: + end = Timedelta(end) + + left_closed = False + right_closed = False + + if start is None and end is None: + if closed is not None: + raise ValueError("Closed has to be None if not both of start" + "and end are defined") + + if closed is None: + left_closed = True + right_closed = True + elif closed == "left": + left_closed = True + elif closed == "right": + right_closed = True + else: + raise ValueError("Closed has to be either 'left', 'right' or None") + + if freq is not None: + index = _generate_regular_range(start, end, periods, freq) + index = cls._simple_new(index, freq=freq, **kwargs) + else: + index = np.linspace(start.value, end.value, periods).astype('i8') + # TODO: shouldn't we pass `name` here? (via **kwargs) + index = cls._simple_new(index, freq=freq) + + if not left_closed: + index = index[1:] + if not right_closed: + index = index[:-1] + + return index + # ---------------------------------------------------------------- # Arithmetic Methods @@ -173,6 +237,45 @@ def total_seconds(self): the return type is a Float64Index. When the calling object is a Series, the return type is Series of type `float64` whose index is the same as the original. + + See Also + -------- + datetime.timedelta.total_seconds : Standard library version + of this method. + TimedeltaIndex.components : Return a DataFrame with components of + each Timedelta. + + Examples + -------- + **Series** + + >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d')) + >>> s + 0 0 days + 1 1 days + 2 2 days + 3 3 days + 4 4 days + dtype: timedelta64[ns] + + >>> s.dt.total_seconds() + 0 0.0 + 1 86400.0 + 2 172800.0 + 3 259200.0 + 4 345600.0 + dtype: float64 + + **TimedeltaIndex** + + >>> idx = pd.to_timedelta(np.arange(5), unit='d') + >>> idx + TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq=None) + + >>> idx.total_seconds() + Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0], + dtype='float64') """ return self._maybe_mask_results(1e-9 * self.asi8) @@ -198,3 +301,55 @@ def to_pytimedelta(self): nanoseconds = _field_accessor("nanoseconds", "nanoseconds", "\nNumber of nanoseconds (>= 0 and less " "than 1 microsecond) for each\nelement.\n") + + @property + def components(self): + """ + Return a dataframe of the components (days, hours, minutes, + seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. + + Returns + ------- + a DataFrame + """ + from pandas import DataFrame + + columns = ['days', 'hours', 'minutes', 'seconds', + 'milliseconds', 'microseconds', 'nanoseconds'] + hasnans = self.hasnans + if hasnans: + def f(x): + if isna(x): + return [np.nan] * len(columns) + return x.components + else: + def f(x): + return x.components + + result = DataFrame([f(x) for x in self], columns=columns) + if not hasnans: + result = result.astype('int64') + return result + + +# --------------------------------------------------------------------- +# Constructor Helpers + +def _generate_regular_range(start, end, periods, offset): + stride = offset.nanos + if periods is None: + b = Timedelta(start).value + e = Timedelta(end).value + e += stride - e % stride + elif start is not None: + b = Timedelta(start).value + e = b + periods * stride + elif end is not None: + e = Timedelta(end).value + stride + b = e - periods * stride + else: + raise ValueError("at least 'start' or 'end' should be specified " + "if a 'period' is given.") + + data = np.arange(b, e, stride, dtype=np.int64) + return data diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b8a89ac26c9d9..bc0185bfaaafe 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -31,7 +31,6 @@ from pandas.core.dtypes.missing import isna import pandas.core.dtypes.concat as _concat -from pandas.core.algorithms import checked_add_with_arr from pandas.core.arrays.datetimes import DatetimeArrayMixin from pandas.core.indexes.base import Index, _index_shared_docs @@ -786,38 +785,6 @@ def __setstate__(self, state): raise Exception("invalid pickle state") _unpickle_compat = __setstate__ - def _sub_datelike(self, other): - # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]] - if isinstance(other, (DatetimeIndex, np.ndarray)): - # if other is an ndarray, we assume it is datetime64-dtype - other = DatetimeIndex(other) - # require tz compat - if not self._has_same_tz(other): - raise TypeError("{cls} subtraction must have the same " - "timezones or no timezones" - .format(cls=type(self).__name__)) - result = self._sub_datelike_dti(other) - elif isinstance(other, (datetime, np.datetime64)): - assert other is not tslibs.NaT - other = Timestamp(other) - if other is tslibs.NaT: - return self - tslibs.NaT - # require tz compat - elif not self._has_same_tz(other): - raise TypeError("Timestamp subtraction must have the same " - "timezones or no timezones") - else: - i8 = self.asi8 - result = checked_add_with_arr(i8, -other.value, - arr_mask=self._isnan) - result = self._maybe_mask_results(result, - fill_value=tslibs.iNaT) - else: - raise TypeError("cannot subtract {cls} and {typ}" - .format(cls=type(self).__name__, - typ=type(other).__name__)) - return result.view('timedelta64[ns]') - def _maybe_update_attributes(self, attrs): """ Update Index attributes (e.g. freq) depending on op """ freq = attrs.get('freq', None) @@ -1585,48 +1552,11 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): is_year_end = _wrap_field_accessor('is_year_end') is_leap_year = _wrap_field_accessor('is_leap_year') + @Appender(DatetimeArrayMixin.normalize.__doc__) def normalize(self): - """ - Convert times to midnight. - - The time component of the date-time is converted to midnight i.e. - 00:00:00. This is useful in cases, when the time does not matter. - Length is unaltered. The timezones are unaffected. - - This method is available on Series with datetime values under - the ``.dt`` accessor, and directly on DatetimeIndex. - - Returns - ------- - DatetimeIndex or Series - The same type as the original data. Series will have the same - name and index. DatetimeIndex will have the same name. - - See Also - -------- - floor : Floor the datetimes to the specified freq. - ceil : Ceil the datetimes to the specified freq. - round : Round the datetimes to the specified freq. - - Examples - -------- - >>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H', - ... periods=3, tz='Asia/Calcutta') - >>> idx - DatetimeIndex(['2014-08-01 10:00:00+05:30', - '2014-08-01 11:00:00+05:30', - '2014-08-01 12:00:00+05:30'], - dtype='datetime64[ns, Asia/Calcutta]', freq='H') - >>> idx.normalize() - DatetimeIndex(['2014-08-01 00:00:00+05:30', - '2014-08-01 00:00:00+05:30', - '2014-08-01 00:00:00+05:30'], - dtype='datetime64[ns, Asia/Calcutta]', freq=None) - """ - new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) - return DatetimeIndex(new_values, - freq='infer', - name=self.name).tz_localize(self.tz) + result = DatetimeArrayMixin.normalize(self) + result.name = self.name + return result @Substitution(klass='DatetimeIndex') @Appender(_shared_docs['searchsorted']) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a531a57eb031f..a8e0c7f1aaa6a 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -16,7 +16,6 @@ is_bool_dtype, pandas_dtype, _ensure_object) -from pandas.core.dtypes.generic import ABCSeries import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc @@ -29,7 +28,7 @@ from pandas._libs import tslib, index as libindex from pandas._libs.tslibs.period import (Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX, - _validate_end_alias, _quarter_to_myear) + _validate_end_alias) from pandas._libs.tslibs import resolution, period from pandas.core.arrays.period import PeriodArrayMixin @@ -39,7 +38,6 @@ from pandas import compat from pandas.util._decorators import (Appender, Substitution, cache_readonly, deprecate_kwarg) -from pandas.compat import zip import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -266,25 +264,6 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, def _engine(self): return self._engine_type(lambda: self, len(self)) - @classmethod - def _generate_range(cls, start, end, periods, freq, fields): - if freq is not None: - freq = Period._maybe_convert_freq(freq) - - field_count = len(fields) - if com._count_not_none(start, end) > 0: - if field_count > 0: - raise ValueError('Can either instantiate from fields ' - 'or endpoints, but not both') - subarr, freq = _get_ordinal_range(start, end, periods, freq) - elif field_count > 0: - subarr, freq = _range_from_fields(freq=freq, **fields) - else: - raise ValueError('Not enough parameters to construct ' - 'Period range') - - return subarr, freq - @classmethod def _simple_new(cls, values, name=None, freq=None, **kwargs): """ @@ -877,102 +856,6 @@ def tz_localize(self, tz, ambiguous='raise'): PeriodIndex._add_datetimelike_methods() -def _get_ordinal_range(start, end, periods, freq, mult=1): - if com._count_not_none(start, end, periods) != 2: - raise ValueError('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') - - if freq is not None: - _, mult = _gfc(freq) - - if start is not None: - start = Period(start, freq) - if end is not None: - end = Period(end, freq) - - is_start_per = isinstance(start, Period) - is_end_per = isinstance(end, Period) - - if is_start_per and is_end_per and start.freq != end.freq: - raise ValueError('start and end must have same freq') - if (start is tslib.NaT or end is tslib.NaT): - raise ValueError('start and end must not be NaT') - - if freq is None: - if is_start_per: - freq = start.freq - elif is_end_per: - freq = end.freq - else: # pragma: no cover - raise ValueError('Could not infer freq from start/end') - - if periods is not None: - periods = periods * mult - if start is None: - data = np.arange(end.ordinal - periods + mult, - end.ordinal + 1, mult, - dtype=np.int64) - else: - data = np.arange(start.ordinal, start.ordinal + periods, mult, - dtype=np.int64) - else: - data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) - - return data, freq - - -def _range_from_fields(year=None, month=None, quarter=None, day=None, - hour=None, minute=None, second=None, freq=None): - if hour is None: - hour = 0 - if minute is None: - minute = 0 - if second is None: - second = 0 - if day is None: - day = 1 - - ordinals = [] - - if quarter is not None: - if freq is None: - freq = 'Q' - base = frequencies.FreqGroup.FR_QTR - else: - base, mult = _gfc(freq) - if base != frequencies.FreqGroup.FR_QTR: - raise AssertionError("base must equal FR_QTR") - - year, quarter = _make_field_arrays(year, quarter) - for y, q in zip(year, quarter): - y, m = _quarter_to_myear(y, q, freq) - val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) - ordinals.append(val) - else: - base, mult = _gfc(freq) - arrays = _make_field_arrays(year, month, day, hour, minute, second) - for y, mth, d, h, mn, s in zip(*arrays): - ordinals.append(period.period_ordinal( - y, mth, d, h, mn, s, 0, 0, base)) - - return np.array(ordinals, dtype=np.int64), freq - - -def _make_field_arrays(*fields): - length = None - for x in fields: - if isinstance(x, (list, np.ndarray, ABCSeries)): - if length is not None and len(x) != length: - raise ValueError('Mismatched Period array lengths') - elif length is None: - length = len(x) - - arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) - else np.repeat(x, length) for x in fields] - - return arrays - - def pnow(freq=None): # deprecation, xref #13790 warnings.warn("pd.pnow() and pandas.core.indexes.period.pnow() " diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 3af825455caac..eb1171c45b1e5 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,7 +1,5 @@ """ implement the TimedeltaIndex """ -from datetime import timedelta - import numpy as np from pandas.core.dtypes.common import ( _TD_DTYPE, @@ -17,7 +15,8 @@ from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries -from pandas.core.arrays.timedelta import TimedeltaArrayMixin +from pandas.core.arrays.timedelta import ( + TimedeltaArrayMixin, _is_convertible_to_td) from pandas.core.indexes.base import Index from pandas.core.indexes.numeric import Int64Index import pandas.compat as compat @@ -33,7 +32,7 @@ TimelikeOps, DatetimeIndexOpsMixin) from pandas.core.tools.timedeltas import ( to_timedelta, _coerce_scalar_to_timedelta_type) -from pandas.tseries.offsets import Tick, DateOffset +from pandas.tseries.offsets import DateOffset from pandas._libs import (lib, index as libindex, join as libjoin, Timedelta, NaT, iNaT) @@ -268,46 +267,11 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, @classmethod def _generate(cls, start, end, periods, name, freq, closed=None): - if com._count_not_none(start, end, periods, freq) != 3: - raise ValueError('Of the four parameters: start, end, periods, ' - 'and freq, exactly three must be specified') - - if start is not None: - start = Timedelta(start) - - if end is not None: - end = Timedelta(end) - - left_closed = False - right_closed = False - - if start is None and end is None: - if closed is not None: - raise ValueError("Closed has to be None if not both of start" - "and end are defined") - - if closed is None: - left_closed = True - right_closed = True - elif closed == "left": - left_closed = True - elif closed == "right": - right_closed = True - else: - raise ValueError("Closed has to be either 'left', 'right' or None") - - if freq is not None: - index = _generate_regular_range(start, end, periods, freq) - index = cls._simple_new(index, name=name, freq=freq) - else: - index = to_timedelta(np.linspace(start.value, end.value, periods)) - - if not left_closed: - index = index[1:] - if not right_closed: - index = index[:-1] - - return index + # TimedeltaArray gets `name` via **kwargs, so we need to explicitly + # override it if name is passed as a positional argument + return super(TimedeltaIndex, cls)._generate(start, end, + periods, freq, + name=name, closed=closed) @classmethod def _simple_new(cls, values, name=None, freq=None, **kwargs): @@ -383,90 +347,8 @@ def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): microseconds = _wrap_field_accessor("microseconds") nanoseconds = _wrap_field_accessor("nanoseconds") - @property - def components(self): - """ - Return a dataframe of the components (days, hours, minutes, - seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. - - Returns - ------- - a DataFrame - """ - from pandas import DataFrame - - columns = ['days', 'hours', 'minutes', 'seconds', - 'milliseconds', 'microseconds', 'nanoseconds'] - hasnans = self.hasnans - if hasnans: - def f(x): - if isna(x): - return [np.nan] * len(columns) - return x.components - else: - def f(x): - return x.components - - result = DataFrame([f(x) for x in self]) - result.columns = columns - if not hasnans: - result = result.astype('int64') - return result - + @Appender(TimedeltaArrayMixin.total_seconds.__doc__) def total_seconds(self): - """ - Return total duration of each element expressed in seconds. - - This method is available directly on TimedeltaIndex and on Series - containing timedelta values under the ``.dt`` namespace. - - Returns - ------- - seconds : Float64Index or Series - When the calling object is a TimedeltaIndex, the return type is a - Float64Index. When the calling object is a Series, the return type - is Series of type `float64` whose index is the same as the - original. - - See Also - -------- - datetime.timedelta.total_seconds : Standard library version - of this method. - TimedeltaIndex.components : Return a DataFrame with components of - each Timedelta. - - Examples - -------- - **Series** - - >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d')) - >>> s - 0 0 days - 1 1 days - 2 2 days - 3 3 days - 4 4 days - dtype: timedelta64[ns] - - >>> s.dt.total_seconds() - 0 0.0 - 1 86400.0 - 2 172800.0 - 3 259200.0 - 4 345600.0 - dtype: float64 - - **TimedeltaIndex** - - >>> idx = pd.to_timedelta(np.arange(5), unit='d') - >>> idx - TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], - dtype='timedelta64[ns]', freq=None) - - >>> idx.total_seconds() - Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0], - dtype='float64') - """ result = TimedeltaArrayMixin.total_seconds(self) return Index(result, name=self.name) @@ -915,11 +797,6 @@ def _is_convertible_to_index(other): return False -def _is_convertible_to_td(key): - return isinstance(key, (Tick, timedelta, - np.timedelta64, compat.string_types)) - - def _to_m8(key): """ Timedelta-like => dt64 @@ -932,28 +809,6 @@ def _to_m8(key): return np.int64(key.value).view(_TD_DTYPE) -def _generate_regular_range(start, end, periods, offset): - stride = offset.nanos - if periods is None: - b = Timedelta(start).value - e = Timedelta(end).value - e += stride - e % stride - elif start is not None: - b = Timedelta(start).value - e = b + periods * stride - elif end is not None: - e = Timedelta(end).value + stride - b = e - periods * stride - else: - raise ValueError("at least 'start' or 'end' should be specified " - "if a 'period' is given.") - - data = np.arange(b, e, stride, dtype=np.int64) - data = TimedeltaIndex._simple_new(data, None) - - return data - - def timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None): """
Takes the place of #21843, porting constructor helpers. While this is in review I'm going to start porting tests in earnest.
https://api.github.com/repos/pandas-dev/pandas/pulls/21845
2018-07-10T20:19:09Z
2018-07-12T00:29:28Z
2018-07-12T00:29:28Z
2018-07-12T00:44:42Z
[CLN] cleanup import reverse-dependencies
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index b3f40b3a2429c..141a5d2389db5 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -1,8 +1,11 @@ """ io on the clipboard """ -from pandas import compat, get_option, option_context, DataFrame -from pandas.compat import StringIO, PY2, PY3 import warnings +from pandas.compat import StringIO, PY2, PY3 + +from pandas.core.dtypes.generic import ABCDataFrame +from pandas import compat, get_option, option_context + def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover r""" @@ -131,7 +134,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover elif sep is not None: warnings.warn('to_clipboard with excel=False ignores the sep argument') - if isinstance(obj, DataFrame): + if isinstance(obj, ABCDataFrame): # str(df) has various unhelpful defaults, like truncation with option_context('display.max_colwidth', 999999): objstr = obj.to_string(**kwargs) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index fa3a1bd74eda5..39131d390c69f 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -9,29 +9,32 @@ import os import abc import warnings -import numpy as np +from textwrap import fill from io import UnsupportedOperation +from distutils.version import LooseVersion + +import numpy as np + +import pandas._libs.json as json +from pandas.util._decorators import Appender, deprecate_kwarg +from pandas.errors import EmptyDataError + +import pandas.compat as compat +from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass, + string_types, OrderedDict) from pandas.core.dtypes.common import ( is_integer, is_float, is_bool, is_list_like) +from pandas.core import config from pandas.core.frame import DataFrame + from pandas.io.parsers import TextParser -from pandas.errors import EmptyDataError from pandas.io.common import (_is_url, _urlopen, _validate_header_arg, get_filepath_or_buffer, _NA_VALUES, _stringify_path) -import pandas._libs.json as json -from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass, - string_types, OrderedDict) -from pandas.core import config from pandas.io.formats.printing import pprint_thing -import pandas.compat as compat -from warnings import warn -from distutils.version import LooseVersion -from pandas.util._decorators import Appender, deprecate_kwarg -from textwrap import fill __all__ = ["read_excel", "ExcelWriter", "ExcelFile"] @@ -527,8 +530,8 @@ def _parse_excel(self, "is not implemented") if parse_dates is True and index_col is None: - warn("The 'parse_dates=True' keyword of read_excel was provided" - " without an 'index_col' keyword value.") + warnings.warn("The 'parse_dates=True' keyword of read_excel was " + "provided without an 'index_col' keyword value.") import xlrd from xlrd import (xldate, XL_CELL_DATE, diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 60518f596e9af..0796888554a46 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -9,18 +9,20 @@ import csv as csvlib from zipfile import ZipFile + import numpy as np -from pandas.core.dtypes.missing import notna -from pandas.core.index import Index, MultiIndex +from pandas._libs import writers as libwriters + from pandas import compat -from pandas.compat import (StringIO, range, zip) +from pandas.compat import StringIO, range, zip + +from pandas.core.dtypes.missing import notna +from pandas.core.dtypes.generic import ( + ABCMultiIndex, ABCPeriodIndex, ABCDatetimeIndex, ABCIndexClass) from pandas.io.common import (_get_handle, UnicodeWriter, _expand_user, _stringify_path) -from pandas._libs import writers as libwriters -from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.period import PeriodIndex class CSVFormatter(object): @@ -68,7 +70,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.date_format = date_format self.tupleize_cols = tupleize_cols - self.has_mi_columns = (isinstance(obj.columns, MultiIndex) and + self.has_mi_columns = (isinstance(obj.columns, ABCMultiIndex) and not self.tupleize_cols) # validate mi options @@ -78,7 +80,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', "columns") if cols is not None: - if isinstance(cols, Index): + if isinstance(cols, ABCIndexClass): cols = cols.to_native_types(na_rep=na_rep, float_format=float_format, date_format=date_format, @@ -90,7 +92,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', # update columns to include possible multiplicity of dupes # and make sure sure cols is just a list of labels cols = self.obj.columns - if isinstance(cols, Index): + if isinstance(cols, ABCIndexClass): cols = cols.to_native_types(na_rep=na_rep, float_format=float_format, date_format=date_format, @@ -111,8 +113,9 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.chunksize = int(chunksize) self.data_index = obj.index - if (isinstance(self.data_index, (DatetimeIndex, PeriodIndex)) and + if (isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and date_format is not None): + from pandas import Index self.data_index = Index([x.strftime(date_format) if notna(x) else '' for x in self.data_index]) @@ -197,7 +200,8 @@ def _save_header(self): header = self.header encoded_labels = [] - has_aliases = isinstance(header, (tuple, list, np.ndarray, Index)) + has_aliases = isinstance(header, (tuple, list, np.ndarray, + ABCIndexClass)) if not (has_aliases or self.header): return if has_aliases: @@ -214,7 +218,7 @@ def _save_header(self): # should write something for index label if index_label is not False: if index_label is None: - if isinstance(obj.index, MultiIndex): + if isinstance(obj.index, ABCMultiIndex): index_label = [] for i, name in enumerate(obj.index.names): if name is None: @@ -227,7 +231,7 @@ def _save_header(self): else: index_label = [index_label] elif not isinstance(index_label, - (list, tuple, np.ndarray, Index)): + (list, tuple, np.ndarray, ABCIndexClass)): # given a string for a DF with Index index_label = [index_label] diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 76ffd41f93090..ec95ce7a970ad 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -8,12 +8,15 @@ import numpy as np from pandas.compat import reduce -from pandas.io.formats.css import CSSResolver, CSSWarning -from pandas.io.formats.printing import pprint_thing import pandas.core.common as com + from pandas.core.dtypes.common import is_float, is_scalar from pandas.core.dtypes import missing -from pandas import Index, MultiIndex, PeriodIndex +from pandas.core.dtypes.generic import ABCMultiIndex, ABCPeriodIndex +from pandas import Index + +from pandas.io.formats.css import CSSResolver, CSSWarning +from pandas.io.formats.printing import pprint_thing from pandas.io.formats.format import get_level_lengths @@ -414,7 +417,7 @@ def _format_header_mi(self): coloffset = 0 lnum = 0 - if self.index and isinstance(self.df.index, MultiIndex): + if self.index and isinstance(self.df.index, ABCMultiIndex): coloffset = len(self.df.index[0]) - 1 if self.merge_cells: @@ -449,7 +452,7 @@ def _format_header_regular(self): if self.index: coloffset = 1 - if isinstance(self.df.index, MultiIndex): + if isinstance(self.df.index, ABCMultiIndex): coloffset = len(self.df.index[0]) colnames = self.columns @@ -466,7 +469,7 @@ def _format_header_regular(self): header_style) def _format_header(self): - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): gen = self._format_header_mi() else: gen = self._format_header_regular() @@ -483,7 +486,7 @@ def _format_header(self): def _format_body(self): - if isinstance(self.df.index, MultiIndex): + if isinstance(self.df.index, ABCMultiIndex): return self._format_hierarchical_rows() else: return self._format_regular_rows() @@ -507,7 +510,7 @@ def _format_regular_rows(self): else: index_label = self.df.index.names[0] - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): self.rowcounter += 1 if index_label and self.header is not False: @@ -516,7 +519,7 @@ def _format_regular_rows(self): # write index_values index_values = self.df.index - if isinstance(self.df.index, PeriodIndex): + if isinstance(self.df.index, ABCPeriodIndex): index_values = self.df.index.to_timestamp() for idx, idxval in enumerate(index_values): @@ -548,7 +551,7 @@ def _format_hierarchical_rows(self): # with index names (blank if None) for # unambigous round-trip, unless not merging, # in which case the names all go on one row Issue #11328 - if isinstance(self.columns, MultiIndex) and self.merge_cells: + if isinstance(self.columns, ABCMultiIndex) and self.merge_cells: self.rowcounter += 1 # if index labels are not empty go ahead and dump diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 86a686783eaf3..5f97447d29cbc 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -12,7 +12,7 @@ import numpy as np from pandas._libs import lib -from pandas._libs.tslibs import iNaT, Timestamp, Timedelta +from pandas._libs.tslibs import NaT, iNaT, Timestamp, Timedelta from pandas._libs.tslib import format_array_from_datetime from pandas import compat @@ -33,20 +33,18 @@ is_datetime64_dtype, is_timedelta64_dtype, is_list_like) -from pandas.core.dtypes.generic import ABCSparseArray +from pandas.core.dtypes.generic import ABCSparseArray, ABCMultiIndex from pandas.core.base import PandasObject import pandas.core.common as com -from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.index import Index, _ensure_index from pandas.core.config import get_option, set_option from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex from pandas.io.formats.terminal import get_terminal_size -from pandas.io.common import (_expand_user, _stringify_path) +from pandas.io.common import _expand_user, _stringify_path from pandas.io.formats.printing import adjoin, justify, pprint_thing -import pandas as pd - common_docstring = """ Parameters @@ -248,7 +246,7 @@ def _get_footer(self): def _get_formatted_index(self): index = self.tr_series.index - is_multi = isinstance(index, MultiIndex) + is_multi = isinstance(index, ABCMultiIndex) if is_multi: have_header = any(name for name in index.names) @@ -768,7 +766,7 @@ def _get_formatted_column_labels(self, frame): columns = frame.columns - if isinstance(columns, MultiIndex): + if isinstance(columns, ABCMultiIndex): fmt_columns = columns.format(sparsify=False, adjoin=False) fmt_columns = lzip(*fmt_columns) dtypes = self.frame.dtypes._values @@ -824,7 +822,7 @@ def _get_formatted_index(self, frame): fmt = self._get_formatter('__index__') - if isinstance(index, MultiIndex): + if isinstance(index, ABCMultiIndex): fmt_index = index.format(sparsify=self.sparsify, adjoin=False, names=show_index_names, formatter=fmt) else: @@ -850,7 +848,7 @@ def _get_formatted_index(self, frame): def _get_column_name_list(self): names = [] columns = self.frame.columns - if isinstance(columns, MultiIndex): + if isinstance(columns, ABCMultiIndex): names.extend('' if name is None else name for name in columns.names) else: @@ -937,7 +935,7 @@ def _format(x): if self.na_rep is not None and is_scalar(x) and isna(x): if x is None: return 'None' - elif x is pd.NaT: + elif x is NaT: return 'NaT' return self.na_rep elif isinstance(x, PandasObject): @@ -1415,7 +1413,7 @@ def _cond(values): def _has_names(index): - if isinstance(index, MultiIndex): + if isinstance(index, ABCMultiIndex): return com._any_not_none(*index.names) else: return index.name is not None diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index a43c55a220292..20be903f54967 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -8,12 +8,14 @@ from textwrap import dedent -import pandas.core.common as com -from pandas.core.index import MultiIndex from pandas import compat from pandas.compat import (lzip, range, map, zip, u, OrderedDict, unichr) + +import pandas.core.common as com +from pandas.core.dtypes.generic import ABCMultiIndex from pandas.core.config import get_option + from pandas.io.formats.printing import pprint_thing from pandas.io.formats.format import (get_level_lengths, buffer_put_lines) @@ -117,7 +119,7 @@ def write_style(self): ('tbody tr th', 'vertical-align', 'top')] - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): element_props.append(('thead tr th', 'text-align', 'left')) @@ -205,7 +207,7 @@ def _column_header(): else: row = [] - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): if self.fmt.has_column_names and self.fmt.index: row.append(single_column_table(self.columns.names)) else: @@ -224,7 +226,7 @@ def _column_header(): indent += self.indent_delta - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): template = 'colspan="{span:d}" halign="left"' if self.fmt.sparsify: @@ -337,7 +339,7 @@ def _write_body(self, indent): # write values if self.fmt.index: - if isinstance(self.frame.index, MultiIndex): + if isinstance(self.frame.index, ABCMultiIndex): self._write_hierarchical_rows(fmt_values, indent) else: self._write_regular_rows(fmt_values, indent) diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 666f124e7d544..fbbad763dd97b 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -2,14 +2,16 @@ """ Module for formatting output data in Latex. """ - from __future__ import print_function -from pandas.core.index import MultiIndex +import numpy as np + from pandas import compat from pandas.compat import range, map, zip, u + +from pandas.core.dtypes.generic import ABCMultiIndex + from pandas.io.formats.format import TableFormatter -import numpy as np class LatexFormatter(TableFormatter): @@ -63,7 +65,7 @@ def get_col_type(dtype): return 'l' # reestablish the MultiIndex that has been joined by _to_str_column - if self.fmt.index and isinstance(self.frame.index, MultiIndex): + if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): out = self.frame.index.format( adjoin=False, sparsify=self.fmt.sparsify, names=self.fmt.has_index_names, na_rep=self.fmt.na_rep diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index bb31e8927cba3..35e244bf2f9eb 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -11,6 +11,16 @@ import itertools import warnings import os +from distutils.version import LooseVersion + +import numpy as np + +from pandas._libs import algos, lib, writers as libwriters +from pandas._libs.tslibs import timezones + +from pandas.errors import PerformanceWarning +from pandas import compat +from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter from pandas.core.dtypes.common import ( is_list_like, @@ -23,17 +33,10 @@ _ensure_platform_int) from pandas.core.dtypes.missing import array_equivalent -import numpy as np -from pandas import (Series, DataFrame, Panel, Index, - MultiIndex, Int64Index, isna, concat, to_datetime, - SparseSeries, SparseDataFrame, PeriodIndex, - DatetimeIndex, TimedeltaIndex) from pandas.core import config -from pandas.io.common import _stringify_path +from pandas.core.config import get_option from pandas.core.sparse.array import BlockIndex, IntIndex from pandas.core.base import StringMixin -from pandas.io.formats.printing import adjoin, pprint_thing -from pandas.errors import PerformanceWarning import pandas.core.common as com from pandas.core.algorithms import match, unique from pandas.core.arrays.categorical import (Categorical, @@ -42,15 +45,15 @@ _block2d_to_blocknd, _factor_indexer, _block_shape) from pandas.core.index import _ensure_index -from pandas import compat -from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter -from pandas.core.config import get_option from pandas.core.computation.pytables import Expr, maybe_expression -from pandas._libs import algos, lib, writers as libwriters -from pandas._libs.tslibs import timezones +from pandas.io.common import _stringify_path +from pandas.io.formats.printing import adjoin, pprint_thing -from distutils.version import LooseVersion +from pandas import (Series, DataFrame, Panel, Index, + MultiIndex, Int64Index, isna, concat, to_datetime, + SparseSeries, SparseDataFrame, PeriodIndex, + DatetimeIndex, TimedeltaIndex) # versioning attribute _version = '0.15.2' diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 4d187a8282859..b2d930c1be5e7 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -13,16 +13,20 @@ Reference for binary data compression: http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm """ +from datetime import datetime +import struct + +import numpy as np -import pandas as pd from pandas import compat -from pandas.io.common import get_filepath_or_buffer, BaseIterator from pandas.errors import EmptyDataError -import numpy as np -import struct + +from pandas.io.common import get_filepath_or_buffer, BaseIterator import pandas.io.sas.sas_constants as const from pandas.io.sas._sas import Parser +import pandas as pd + class _subheader_pointer(object): pass @@ -169,7 +173,7 @@ def _get_properties(self): self.encoding or self.default_encoding) # Timestamp is epoch 01/01/1960 - epoch = pd.datetime(1960, 1, 1) + epoch = datetime(1960, 1, 1) x = self._read_float(const.date_created_offset + align1, const.date_created_length) self.date_created = epoch + pd.to_timedelta(x, unit='s') diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index cb01b7a652157..52b25898fc67e 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -9,13 +9,16 @@ """ from datetime import datetime -import pandas as pd -from pandas.io.common import get_filepath_or_buffer, BaseIterator -from pandas import compat import struct +import warnings + import numpy as np + from pandas.util._decorators import Appender -import warnings +from pandas import compat + +from pandas.io.common import get_filepath_or_buffer, BaseIterator +import pandas as pd _correct_line1 = ("HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!" "000000000000000000000000000000 ") diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 297a24fa3a149..4ce2ed4e36139 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -14,14 +14,15 @@ import struct import sys from collections import OrderedDict +import warnings import numpy as np from dateutil.relativedelta import relativedelta + from pandas._libs.lib import infer_dtype from pandas._libs.tslibs import NaT, Timestamp from pandas._libs.writers import max_len_string_array -import pandas as pd from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex from pandas.compat import (lrange, lmap, lzip, text_type, string_types, range, zip, BytesIO) @@ -317,12 +318,12 @@ def convert_delta_safe(base, deltas, unit): ms = dates conv_dates = convert_delta_safe(base, ms, 'ms') elif fmt.startswith(("%tC", "tC")): - from warnings import warn - warn("Encountered %tC format. Leaving in Stata Internal Format.") + warnings.warn("Encountered %tC format. Leaving in Stata " + "Internal Format.") conv_dates = Series(dates, dtype=np.object) if has_bad_values: - conv_dates[bad_locs] = pd.NaT + conv_dates[bad_locs] = NaT return conv_dates # Delta days relative to base elif fmt.startswith(("%td", "td", "%d", "d")): @@ -425,8 +426,7 @@ def parse_dates_safe(dates, delta=False, year=False, days=False): d = parse_dates_safe(dates, delta=True) conv_dates = d.delta / 1000 elif fmt in ["%tC", "tC"]: - from warnings import warn - warn("Stata Internal Format tC not supported.") + warnings.warn("Stata Internal Format tC not supported.") conv_dates = dates elif fmt in ["%td", "td"]: d = parse_dates_safe(dates, delta=True) @@ -580,8 +580,6 @@ def _cast_to_stata_types(data): raise ValueError(msg.format(col, value, float64_max)) if ws: - import warnings - warnings.warn(ws, PossiblePrecisionLoss) return data @@ -627,7 +625,6 @@ def __init__(self, catarray): category = vl[1] if not isinstance(category, string_types): category = str(category) - import warnings warnings.warn(value_label_mismatch_doc.format(catarray.name), ValueLabelTypeMismatch) @@ -1425,7 +1422,6 @@ def _read_strls(self): @Appender(_data_method_doc) def data(self, **kwargs): - import warnings warnings.warn("'data' is deprecated, use 'read' instead") if self._data_read: @@ -2105,7 +2101,6 @@ def _check_column_names(self, data): del self._convert_dates[o] if converted_names: - import warnings conversion_warning = [] for orig_name, name in converted_names.items(): # need to possibly encode the orig name if its unicode diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 97e0d0b4608ae..beebf84b8a033 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -11,6 +11,9 @@ from matplotlib.ticker import Formatter, AutoLocator, Locator from matplotlib.transforms import nonsingular +from pandas._libs import tslibs +from pandas._libs.tslibs import resolution + from pandas.core.dtypes.common import ( is_float, is_integer, is_integer_dtype, @@ -23,13 +26,11 @@ from pandas.compat import lrange import pandas.compat as compat -from pandas._libs import tslibs import pandas.core.common as com from pandas.core.index import Index from pandas.core.indexes.datetimes import date_range import pandas.core.tools.datetimes as tools -from pandas._libs.tslibs import resolution import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import FreqGroup from pandas.core.indexes.period import Period, PeriodIndex diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 842da838b4b83..06020bdfd5d1d 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -9,10 +9,15 @@ import numpy as np -from pandas.util._decorators import cache_readonly +from pandas.util._decorators import cache_readonly, Appender +from pandas.compat import range, lrange, map, zip, string_types +import pandas.compat as compat + import pandas.core.common as com from pandas.core.base import PandasObject from pandas.core.config import get_option +from pandas.core.generic import _shared_docs, _shared_doc_kwargs + from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike from pandas.core.dtypes.common import ( is_list_like, @@ -20,16 +25,10 @@ is_number, is_hashable, is_iterator) -from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame +from pandas.core.dtypes.generic import ( + ABCSeries, ABCDataFrame, ABCPeriodIndex, ABCMultiIndex, ABCIndexClass) -from pandas.core.generic import _shared_docs, _shared_doc_kwargs -from pandas.core.index import Index, MultiIndex - -from pandas.core.indexes.period import PeriodIndex -from pandas.compat import range, lrange, map, zip, string_types -import pandas.compat as compat from pandas.io.formats.printing import pprint_thing -from pandas.util._decorators import Appender from pandas.plotting._compat import (_mpl_ge_1_3_1, _mpl_ge_1_5_0, @@ -170,7 +169,8 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]): self.errors[kw] = self._parse_errorbars(kw, err) - if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)): + if not isinstance(secondary_y, (bool, tuple, list, + np.ndarray, ABCIndexClass)): secondary_y = [secondary_y] self.secondary_y = secondary_y @@ -484,7 +484,7 @@ def _apply_axis_properties(self, axis, rot=None, fontsize=None): @property def legend_title(self): - if not isinstance(self.data.columns, MultiIndex): + if not isinstance(self.data.columns, ABCMultiIndex): name = self.data.columns.name if name is not None: name = pprint_thing(name) @@ -566,7 +566,7 @@ def _get_xticks(self, convert_period=False): 'datetime64', 'time') if self.use_index: - if convert_period and isinstance(index, PeriodIndex): + if convert_period and isinstance(index, ABCPeriodIndex): self.data = self.data.reindex(index=index.sort_values()) x = self.data.index.to_timestamp()._mpl_repr() elif index.is_numeric(): @@ -596,7 +596,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): y = np.ma.array(y) y = np.ma.masked_where(mask, y) - if isinstance(x, Index): + if isinstance(x, ABCIndexClass): x = x._mpl_repr() if is_errorbar: @@ -615,7 +615,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): return ax.plot(*args, **kwds) def _get_index_name(self): - if isinstance(self.data.index, MultiIndex): + if isinstance(self.data.index, ABCMultiIndex): name = self.data.index.names if com._any_not_none(*name): name = ','.join(pprint_thing(x) for x in name) @@ -653,7 +653,8 @@ def on_right(self, i): if isinstance(self.secondary_y, bool): return self.secondary_y - if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)): + if isinstance(self.secondary_y, (tuple, list, + np.ndarray, ABCIndexClass)): return self.data.columns[i] in self.secondary_y def _apply_style_colors(self, colors, kwds, col_num, label): @@ -704,14 +705,12 @@ def _parse_errorbars(self, label, err): if err is None: return None - from pandas import DataFrame, Series - def match_labels(data, e): e = e.reindex(data.index) return e # key-matched DataFrame - if isinstance(err, DataFrame): + if isinstance(err, ABCDataFrame): err = match_labels(self.data, err) # key-matched dict @@ -719,7 +718,7 @@ def match_labels(data, e): pass # Series of error values - elif isinstance(err, Series): + elif isinstance(err, ABCSeries): # broadcast error series across data err = match_labels(self.data, err) err = np.atleast_2d(err) @@ -765,14 +764,13 @@ def match_labels(data, e): return err def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True): - from pandas import DataFrame errors = {} for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]): if flag: err = self.errors[kw] # user provided label-matched dataframe of errors - if isinstance(err, (DataFrame, dict)): + if isinstance(err, (ABCDataFrame, dict)): if label is not None and label in err.keys(): err = err[label] else: @@ -2196,9 +2194,8 @@ def boxplot(data, column=None, by=None, ax=None, fontsize=None, if return_type not in BoxPlot._valid_return_types: raise ValueError("return_type must be {'axes', 'dict', 'both'}") - from pandas import Series, DataFrame - if isinstance(data, Series): - data = DataFrame({'x': data}) + if isinstance(data, ABCSeries): + data = data.to_frame('x') column = 'x' def _get_colors(): @@ -2420,7 +2417,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, return axes if column is not None: - if not isinstance(column, (list, np.ndarray, Index)): + if not isinstance(column, (list, np.ndarray, ABCIndexClass)): column = [column] data = data[column] data = data._get_numeric_data() @@ -2658,7 +2655,6 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, figsize=None, sharex=True, sharey=True, layout=None, rot=0, ax=None, **kwargs): - from pandas import DataFrame if figsize == 'default': # allowed to specify mpl default with 'default' @@ -2679,7 +2675,7 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, for i, (key, group) in enumerate(grouped): ax = _axes[i] - if numeric_only and isinstance(group, DataFrame): + if numeric_only and isinstance(group, ABCDataFrame): group = group._get_numeric_data() plotf(group, ax, **kwargs) ax.set_title(pprint_thing(key)) diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 21a03ea388566..0522d7e721b65 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -3,14 +3,16 @@ import functools import numpy as np - from matplotlib import pylab -from pandas.core.indexes.period import Period + +from pandas._libs.tslibs.period import Period + +from pandas.core.dtypes.generic import ( + ABCPeriodIndex, ABCDatetimeIndex, ABCTimedeltaIndex) + from pandas.tseries.offsets import DateOffset import pandas.tseries.frequencies as frequencies -from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.period import PeriodIndex -from pandas.core.indexes.timedeltas import TimedeltaIndex + from pandas.io.formats.printing import pprint_thing import pandas.compat as compat @@ -69,7 +71,7 @@ def _maybe_resample(series, ax, kwargs): raise ValueError('Cannot use dynamic axis without frequency info') # Convert DatetimeIndex to PeriodIndex - if isinstance(series.index, DatetimeIndex): + if isinstance(series.index, ABCDatetimeIndex): series = series.to_period(freq=freq) if ax_freq is not None and freq != ax_freq: @@ -239,7 +241,7 @@ def _use_dynamic_x(ax, data): return False # hack this for 0.10.1, creating more technical debt...sigh - if isinstance(data.index, DatetimeIndex): + if isinstance(data.index, ABCDatetimeIndex): base = frequencies.get_freq(freq) x = data.index if (base <= frequencies.FreqGroup.FR_DAY): @@ -262,7 +264,7 @@ def _get_index_freq(data): def _maybe_convert_index(ax, data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames - if isinstance(data.index, DatetimeIndex): + if isinstance(data.index, ABCDatetimeIndex): freq = getattr(data.index, 'freq', None) if freq is None: @@ -320,7 +322,7 @@ def format_dateaxis(subplot, freq, index): # handle index specific formatting # Note: DatetimeIndex does not use this # interface. DatetimeIndex uses matplotlib.date directly - if isinstance(index, PeriodIndex): + if isinstance(index, ABCPeriodIndex): majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, @@ -343,7 +345,7 @@ def format_dateaxis(subplot, freq, index): # x and y coord info subplot.format_coord = functools.partial(_format_coord, freq) - elif isinstance(index, TimedeltaIndex): + elif isinstance(index, ABCTimedeltaIndex): subplot.xaxis.set_major_formatter( TimeSeries_TimedeltaFormatter()) else: diff --git a/pandas/plotting/_tools.py b/pandas/plotting/_tools.py index 816586fbb82f5..7618afd42010f 100644 --- a/pandas/plotting/_tools.py +++ b/pandas/plotting/_tools.py @@ -8,8 +8,7 @@ import numpy as np from pandas.core.dtypes.common import is_list_like -from pandas.core.dtypes.generic import ABCSeries -from pandas.core.index import Index +from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCDataFrame from pandas.compat import range @@ -43,10 +42,9 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs): ------- matplotlib table object """ - from pandas import DataFrame if isinstance(data, ABCSeries): - data = DataFrame(data, columns=[data.name]) - elif isinstance(data, DataFrame): + data = data.to_frame() + elif isinstance(data, ABCDataFrame): pass else: raise ValueError('Input data must be DataFrame or Series') @@ -341,7 +339,7 @@ def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey): def _flatten(axes): if not is_list_like(axes): return np.array([axes]) - elif isinstance(axes, (np.ndarray, Index)): + elif isinstance(axes, (np.ndarray, ABCIndexClass)): return axes.ravel() return np.array(axes)
Parts of the code are made more difficult to reason about by depending on `pandas.io.formats`, which has dependencies all over the code. This PR starts in on this by replacing `import Foo` with `import ABCFoo` where possible. Along the way it cleans up import order.
https://api.github.com/repos/pandas-dev/pandas/pulls/21844
2018-07-10T15:24:47Z
2018-07-11T00:09:43Z
2018-07-11T00:09:43Z
2018-07-11T01:19:27Z
[CLN] cy cleanup, de-duplication
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 41047d9c25c22..e040fd1f52478 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -44,9 +44,9 @@ cdef inline bint is_definitely_invalid_key(object val): or PyList_Check(val) or hasattr(val, '_data')) -def get_value_at(ndarray arr, object loc): +cpdef get_value_at(ndarray arr, object loc, object tz=None): if arr.descr.type_num == NPY_DATETIME: - return Timestamp(util.get_value_at(arr, loc)) + return Timestamp(util.get_value_at(arr, loc), tz=tz) elif arr.descr.type_num == NPY_TIMEDELTA: return Timedelta(util.get_value_at(arr, loc)) return util.get_value_at(arr, loc) @@ -69,12 +69,7 @@ cpdef object get_value_box(ndarray arr, object loc): if i >= sz or sz == 0 or i < 0: raise IndexError('index out of bounds') - if arr.descr.type_num == NPY_DATETIME: - return Timestamp(util.get_value_1d(arr, i)) - elif arr.descr.type_num == NPY_TIMEDELTA: - return Timedelta(util.get_value_1d(arr, i)) - else: - return util.get_value_1d(arr, i) + return get_value_at(arr, i, tz=None) # Don't populate hash tables in monotonic indexes larger than this @@ -115,11 +110,7 @@ cdef class IndexEngine: if PySlice_Check(loc) or cnp.PyArray_Check(loc): return arr[loc] else: - if arr.descr.type_num == NPY_DATETIME: - return Timestamp(util.get_value_at(arr, loc), tz=tz) - elif arr.descr.type_num == NPY_TIMEDELTA: - return Timedelta(util.get_value_at(arr, loc)) - return util.get_value_at(arr, loc) + return get_value_at(arr, loc, tz=tz) cpdef set_value(self, ndarray arr, object key, object value): """ diff --git a/pandas/_libs/indexing.pyx b/pandas/_libs/indexing.pyx index fb707a3c3e5e2..c680706b7b2d2 100644 --- a/pandas/_libs/indexing.pyx +++ b/pandas/_libs/indexing.pyx @@ -1,10 +1,10 @@ # cython: profile=False cdef class _NDFrameIndexerBase: - ''' + """ A base class for _NDFrameIndexer for fast instantiation and attribute access. - ''' + """ cdef public object obj, name, _ndim def __init__(self, name, obj): diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 0901d474d044c..12d35f7ce2f58 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -8,9 +8,7 @@ Cython implementations of functions resembling the stdlib calendar module cimport cython from cython cimport Py_ssize_t -cimport numpy as cnp from numpy cimport int64_t, int32_t -cnp.import_array() from locale import LC_TIME from strptime import LocaleTime diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 9964ca0847ce7..a3b7d6c59200c 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -556,6 +556,9 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): elif treat_tz_as_dateutil(tz): dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) else: + # TODO: this case is never reached in the tests, but get_dst_info + # has a path that returns typ = None and empty deltas. + # --> Is this path possible? pass obj.tzinfo = tz @@ -1145,10 +1148,7 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - _pos = trans.searchsorted(stamps, side='right') - 1 - if _pos.dtype != np.int64: - _pos = _pos.astype(np.int64) - pos = _pos + pos = trans.searchsorted(stamps, side='right') - 1 # statictzinfo if typ not in ['pytz', 'dateutil']: diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index d89c06d43ccb9..4ed3e13dc0025 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -960,10 +960,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - _pos = trans.searchsorted(stamps, side='right') - 1 - if _pos.dtype != np.int64: - _pos = _pos.astype(np.int64) - pos = _pos + pos = trans.searchsorted(stamps, side='right') - 1 # statictzinfo if typ not in ['pytz', 'dateutil']: diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 8565857fa945f..cfb7e1dce9309 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -103,10 +103,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - _pos = trans.searchsorted(stamps, side='right') - 1 - if _pos.dtype != np.int64: - _pos = _pos.astype(np.int64) - pos = _pos + pos = trans.searchsorted(stamps, side='right') - 1 # statictzinfo if typ not in ['pytz', 'dateutil']: diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 74fadbdb64763..b3fab83fef415 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -188,7 +188,7 @@ cdef object get_utc_trans_times_from_dateutil_tz(object tz): return new_trans -cpdef ndarray unbox_utcoffsets(object transinfo): +cpdef ndarray[int64_t, ndim=1] unbox_utcoffsets(object transinfo): cdef: Py_ssize_t i, sz ndarray[int64_t] arr @@ -216,6 +216,8 @@ cdef object get_dst_info(object tz): """ cache_key = tz_cache_key(tz) if cache_key is None: + # e.g. pytz.FixedOffset, matplotlib.dates._UTC, + # psycopg2.tz.FixedOffsetTimezone num = int(get_utcoffset(tz, None).total_seconds()) * 1000000000 return (np.array([NPY_NAT + 1], dtype=np.int64), np.array([num], dtype=np.int64),
Small cleanups, remove some unnecessary casting, add typing in timezones.
https://api.github.com/repos/pandas-dev/pandas/pulls/21826
2018-07-09T11:55:38Z
2018-07-11T00:03:59Z
2018-07-11T00:03:59Z
2018-07-11T01:20:33Z
ENH: raise a more a useful exception on empty files
diff --git a/doc/source/release.rst b/doc/source/release.rst index df09d2f5a50ba..27299844997cb 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -96,6 +96,8 @@ pandas 0.11.1 explicitly checking a website as a proxy for seeing if there is network connectivity. Plus, new ``optional_args`` decorator factory for decorators. (:issue:`3910`, :issue:`3914`) + - ``read_csv`` will now throw a more informative error message when a file + contains no columns, e.g., all newline characters **API Changes** diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index cc60ce07c2c4d..e7624225853a0 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -240,6 +240,9 @@ I/O Enhancements import os os.remove(path) + - ``read_csv`` will now throw a more informative error message when a file + contains no columns, e.g., all newline characters + Other Enhancements ~~~~~~~~~~~~~~~~~~ diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 377ef4ff5a44f..3fa8091feeb15 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -949,6 +949,7 @@ def __init__(self, src, **kwds): # #2442 kwds['allow_leading_cols'] = self.index_col is not False + self._reader = _parser.TextReader(src, **kwds) # XXX diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index f9e956f60dde6..d75dcb6f02bfc 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -2330,6 +2330,19 @@ def test_tokenize_CR_with_quoting(self): expected = self.read_csv(StringIO(data.replace('\r', '\n'))) tm.assert_frame_equal(result, expected) + def test_raise_on_no_columns(self): + # single newline + data = """ +""" + self.assertRaises(ValueError, self.read_csv, StringIO(data)) + + # test with more than a single newline + data = """ + + +""" + self.assertRaises(ValueError, self.read_csv, StringIO(data)) + class TestParseSQL(unittest.TestCase): diff --git a/pandas/parser.pyx b/pandas/parser.pyx index eaa588ef4d150..185cf1a752803 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -476,6 +476,9 @@ cdef class TextReader: self.names = names self.header, self.table_width = self._get_header() + if not self.table_width: + raise ValueError("No columns to parse from file") + # compute buffer_lines as function of table width heuristic = 2**20 // self.table_width self.buffer_lines = 1
https://api.github.com/repos/pandas-dev/pandas/pulls/3989
2013-06-22T03:14:30Z
2013-06-22T22:32:30Z
2013-06-22T22:32:30Z
2014-07-16T08:15:21Z
TST/BUG: fix MANIFEST.in to reflect the change of file type of RELEASE
diff --git a/MANIFEST.in b/MANIFEST.in index 649d96e7b2051..02de7790d11cf 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,6 @@ include MANIFEST.in include LICENSE -include RELEASE.rst +include RELEASE.md include README.rst include TODO.rst include setup.py
https://api.github.com/repos/pandas-dev/pandas/pulls/3987
2013-06-21T23:33:58Z
2013-06-21T23:54:55Z
2013-06-21T23:54:55Z
2014-07-16T08:15:20Z
BUG: Index shift drops index name
diff --git a/doc/source/release.rst b/doc/source/release.rst index afca7511bf11f..07489a140c018 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -270,6 +270,7 @@ pandas 0.11.1 - Indexing with a string with seconds resolution not selecting from a time index (:issue:`3925`) - csv parsers would loop infinitely if ``iterator=True`` but no ``chunksize`` was specified (:issue:`3967`), python parser failing with ``chunksize=1`` + - Fix index name not propogating when using ``shift`` .. _Gh3616: https://github.com/pydata/pandas/issues/3616 diff --git a/pandas/core/index.py b/pandas/core/index.py index a5880b9f18670..c06c46cde36c8 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -563,7 +563,7 @@ def shift(self, periods=1, freq=None): return self offset = periods * freq - return Index([idx + offset for idx in self]) + return Index([idx + offset for idx in self], name=self.name) def argsort(self, *args, **kwargs): """ diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 7ce4a11229561..d9808ab48ca41 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -204,6 +204,9 @@ def test_shift(self): shifted = self.dateIndex.shift(1, 'B') self.assert_(np.array_equal(shifted, self.dateIndex + offsets.BDay())) + shifted.name = 'shifted' + self.assertEqual(shifted.name, shifted.shift(1, 'D').name) + def test_intersection(self): first = self.strIndex[:20] second = self.strIndex[:10]
``` >>> idx = pd.Index([pd.to_datetime('2013-06-21')], name='idx') >>> idx.name 'idx' >>> idx.shift(1, freq='B').name == None True ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3986
2013-06-21T21:18:26Z
2013-06-21T23:04:23Z
2013-06-21T23:04:23Z
2014-07-15T16:07:40Z
BUG/TST: catch socket.error in py2/3.2 and ConnectionError in py3.3
diff --git a/doc/source/release.rst b/doc/source/release.rst index 917d91a14441e..c356b6378ce37 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -281,6 +281,8 @@ pandas 0.12 - Fixed flattening of columns when renaming MultiIndex columns DataFrame (:issue:`4004`) - Fix ``Series.clip`` for datetime series. NA/NaN threshold values will now throw ValueError (:issue:`3996`) - Fixed insertion issue into DataFrame, after rename (:issue:`4032`) + - Fixed testing issue where too many sockets where open thus leading to a + connection reset issue (:issue:`3982`, :issue:`3985`) pandas 0.11.0 diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index e146e892722d8..eb41c2dbca82f 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -420,6 +420,8 @@ Bug Fixes explicitly checking a website as a proxy for seeing if there is network connectivity. Plus, new ``optional_args`` decorator factory for decorators. (:issue:`3910`, :issue:`3914`) + - Fixed testing issue where too many sockets where open thus leading to a + connection reset issue (:issue:`3982`, :issue:`3985`) See the :ref:`full release notes <release>` or issue tracker diff --git a/pandas/io/data.py b/pandas/io/data.py index 21f69e2e7daf4..9cf5eeb1fed4e 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -8,9 +8,9 @@ import numpy as np import datetime as dt import urllib -import urllib2 import time -import warnings +from contextlib import closing +from urllib2 import urlopen from zipfile import ZipFile from pandas.util.py3compat import StringIO, BytesIO, bytes_to_str @@ -109,10 +109,11 @@ def get_quote_yahoo(symbols): data = dict(zip(codes.keys(), [[] for i in range(len(codes))])) - urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % ( - sym_list, request) + url_str = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (sym_list, + request) - lines = urllib2.urlopen(urlStr).readlines() + with closing(urlopen(url_str)) as url: + lines = url.readlines() for line in lines: fields = line.decode('utf-8').strip().split(',') @@ -151,29 +152,29 @@ def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3, yahoo_URL = 'http://ichart.yahoo.com/table.csv?' - url = yahoo_URL + 's=%s' % sym + \ - '&a=%s' % (start.month - 1) + \ - '&b=%s' % start.day + \ - '&c=%s' % start.year + \ - '&d=%s' % (end.month - 1) + \ - '&e=%s' % end.day + \ - '&f=%s' % end.year + \ - '&g=d' + \ - '&ignore=.csv' - - for _ in range(retry_count): - resp = urllib2.urlopen(url) - if resp.code == 200: - lines = resp.read() - rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0, - parse_dates=True)[::-1] - - # Yahoo! Finance sometimes does this awesome thing where they - # return 2 rows for the most recent business day - if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover - rs = rs[:-1] - - return rs + url = (yahoo_URL + 's=%s' % sym + + '&a=%s' % (start.month - 1) + + '&b=%s' % start.day + + '&c=%s' % start.year + + '&d=%s' % (end.month - 1) + + '&e=%s' % end.day + + '&f=%s' % end.year + + '&g=d' + + '&ignore=.csv') + + for _ in xrange(retry_count): + with closing(urlopen(url)) as resp: + if resp.code == 200: + lines = resp.read() + rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0, + parse_dates=True)[::-1] + + # Yahoo! Finance sometimes does this awesome thing where they + # return 2 rows for the most recent business day + if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover + rs = rs[:-1] + + return rs time.sleep(pause) @@ -198,17 +199,19 @@ def _get_hist_google(sym=None, start=None, end=None, retry_count=3, google_URL = 'http://www.google.com/finance/historical?' # www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv - url = google_URL + urllib.urlencode({"q": sym, \ - "startdate": start.strftime('%b %d, %Y'), \ - "enddate": end.strftime('%b %d, %Y'), "output": "csv" }) - for _ in range(retry_count): - resp = urllib2.urlopen(url) - if resp.code == 200: - lines = resp.read() - rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0, - parse_dates=True)[::-1] - - return rs + url = google_URL + urllib.urlencode({"q": sym, + "startdate": start.strftime('%b %d, ' + '%Y'), + "enddate": end.strftime('%b %d, %Y'), + "output": "csv"}) + for _ in xrange(retry_count): + with closing(urlopen(url)) as resp: + if resp.code == 200: + lines = resp.read() + rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0, + parse_dates=True)[::-1] + + return rs time.sleep(pause) @@ -280,19 +283,19 @@ def get_components_yahoo(idx_sym): '&e=.csv&h={2}' idx_mod = idx_sym.replace('^', '@%5E') - urlStr = url.format(idx_mod, stats, 1) + url_str = url.format(idx_mod, stats, 1) idx_df = DataFrame() mask = [True] comp_idx = 1 - #LOOP across component index structure, - #break when no new components are found - while (True in mask): - urlStr = url.format(idx_mod, stats, comp_idx) - lines = (urllib.urlopen(urlStr).read().decode('utf-8').strip(). - strip('"').split('"\r\n"')) - + # LOOP across component index structure, + # break when no new components are found + while True in mask: + url_str = url.format(idx_mod, stats, comp_idx) + with closing(urlopen(url_str)) as resp: + raw = resp.read() + lines = raw.decode('utf-8').strip().strip('"').split('"\r\n"') lines = [line.strip().split('","') for line in lines] temp_df = DataFrame(lines, columns=['ticker', 'name', 'exchange']) @@ -468,11 +471,11 @@ def get_data_fred(name=None, start=dt.datetime(2010, 1, 1), fred_URL = "http://research.stlouisfed.org/fred2/series/" - url = fred_URL + '%s' % name + \ - '/downloaddata/%s' % name + '.csv' - data = read_csv(urllib.urlopen(url), index_col=0, parse_dates=True, - header=None, skiprows=1, names=["DATE", name], - na_values='.') + url = fred_URL + '%s' % name + '/downloaddata/%s' % name + '.csv' + with closing(urlopen(url)) as resp: + data = read_csv(resp, index_col=0, parse_dates=True, + header=None, skiprows=1, names=["DATE", name], + na_values='.') try: return data.truncate(start, end) except KeyError: @@ -489,9 +492,9 @@ def get_data_famafrench(name, start=None, end=None): # path of zip files zipFileURL = "http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/" - url = urllib.urlopen(zipFileURL + name + ".zip") - zipfile = ZipFile(StringIO(url.read())) - data = zipfile.open(name + ".txt").readlines() + with closing(urlopen(zipFileURL + name + ".zip")) as url: + with closing(ZipFile(StringIO(url.read()))) as zf: + data = zf.read(name + ".txt").splitlines() file_edges = np.where(np.array([len(d) for d in data]) == 2)[0] @@ -638,7 +641,7 @@ def get_options_data(self, month=None, year=None, expiry=None): url = str('http://finance.yahoo.com/q/op?s=' + self.symbol + '+Options') - parsed = parse(urllib2.urlopen(url)) + parsed = parse(url) doc = parsed.getroot() tables = doc.findall('.//table') calls = tables[9] @@ -709,7 +712,7 @@ def get_call_data(self, month=None, year=None, expiry=None): url = str('http://finance.yahoo.com/q/op?s=' + self.symbol + '+Options') - parsed = parse(urllib2.urlopen(url)) + parsed = parse(url) doc = parsed.getroot() tables = doc.findall('.//table') calls = tables[9] @@ -777,7 +780,7 @@ def get_put_data(self, month=None, year=None, expiry=None): url = str('http://finance.yahoo.com/q/op?s=' + self.symbol + '+Options') - parsed = parse(urllib2.urlopen(url)) + parsed = parse(url) doc = parsed.getroot() tables = doc.findall('.//table') puts = tables[13]
closes #3982.
https://api.github.com/repos/pandas-dev/pandas/pulls/3985
2013-06-21T21:16:40Z
2013-06-26T18:16:36Z
2013-06-26T18:16:36Z
2014-06-13T07:41:23Z
BUG (GH3967) csv parsers would loop infinitely if iterator=True but no chunksize specified
diff --git a/doc/source/release.rst b/doc/source/release.rst index 882826765d057..f16036692c8d3 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -258,6 +258,8 @@ pandas 0.11.1 - Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing two integer arrays with at least 10000 cells total (:issue:`3764`) - Indexing with a string with seconds resolution not selecting from a time index (:issue:`3925`) + - csv parsers would loop infinitely if ``iterator=True`` but no ``chunksize`` was + specified (:issue:`3967`), python parser failing with ``chunksize=1`` .. _Gh3616: https://github.com/pydata/pandas/issues/3616 diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 054363d8cda06..658532e80682d 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -186,7 +186,7 @@ def _read(filepath_or_buffer, kwds): kwds['parse_dates'] = True # Extract some of the arguments (pass chunksize on). - iterator = kwds.pop('iterator', False) + iterator = kwds.get('iterator', False) nrows = kwds.pop('nrows', None) chunksize = kwds.get('chunksize', None) @@ -569,8 +569,11 @@ def _clean_options(self, options, engine): def __iter__(self): try: - while True: - yield self.read(self.chunksize) + if self.chunksize: + while True: + yield self.read(self.chunksize) + else: + yield self.read() except StopIteration: pass @@ -1594,47 +1597,58 @@ def _rows_to_cols(self, content): def _get_lines(self, rows=None): source = self.data lines = self.buf + new_rows = None # already fetched some number if rows is not None: - rows -= len(self.buf) - if isinstance(source, list): - if self.pos > len(source): - raise StopIteration - if rows is None: - lines.extend(source[self.pos:]) - self.pos = len(source) + # we already have the lines in the buffer + if len(self.buf) >= rows: + new_rows, self.buf = self.buf[:rows], self.buf[rows:] + + # need some lines else: - lines.extend(source[self.pos:self.pos + rows]) - self.pos += rows - else: - new_rows = [] - try: - if rows is not None: - for _ in xrange(rows): - new_rows.append(next(source)) - lines.extend(new_rows) + rows -= len(self.buf) + + if new_rows is None: + if isinstance(source, list): + if self.pos > len(source): + raise StopIteration + if rows is None: + lines.extend(source[self.pos:]) + self.pos = len(source) else: - rows = 0 - while True: - try: + lines.extend(source[self.pos:self.pos + rows]) + self.pos += rows + else: + new_rows = [] + try: + if rows is not None: + for _ in xrange(rows): new_rows.append(next(source)) - rows += 1 - except csv.Error, inst: - if 'newline inside string' in str(inst): - row_num = str(self.pos + rows) - msg = ('EOF inside string starting with line ' - + row_num) - raise Exception(msg) - raise - except StopIteration: - lines.extend(new_rows) - if len(lines) == 0: - raise - self.pos += len(new_rows) + lines.extend(new_rows) + else: + rows = 0 + while True: + try: + new_rows.append(next(source)) + rows += 1 + except csv.Error, inst: + if 'newline inside string' in str(inst): + row_num = str(self.pos + rows) + msg = ('EOF inside string starting with line ' + + row_num) + raise Exception(msg) + raise + except StopIteration: + lines.extend(new_rows) + if len(lines) == 0: + raise + self.pos += len(new_rows) - self.buf = [] + self.buf = [] + else: + lines = new_rows if self.skip_footer: lines = lines[:-self.skip_footer] diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index cc2dddd829302..f9e956f60dde6 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -1037,6 +1037,24 @@ def test_iterator(self): iterator=True) self.assert_(isinstance(treader, TextFileReader)) + # stopping iteration when on chunksize is specified, GH 3967 + data = """A,B,C +foo,1,2,3 +bar,4,5,6 +baz,7,8,9 +""" + reader = self.read_csv(StringIO(data), iterator=True) + result = list(reader) + expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz']) + tm.assert_frame_equal(result[0], expected) + + # chunksize = 1 + reader = self.read_csv(StringIO(data), chunksize=1) + result = list(reader) + expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz']) + self.assert_(len(result) == 3) + tm.assert_frame_equal(pd.concat(result), expected) + def test_header_not_first_line(self): data = """got,to,ignore,this,line got,to,ignore,this,line
BUG: python parser failing with `chunksize=1` closes #3967
https://api.github.com/repos/pandas-dev/pandas/pulls/3978
2013-06-21T01:00:15Z
2013-06-21T01:22:34Z
2013-06-21T01:22:33Z
2014-07-10T21:01:57Z
DOC: put release notes link to dev until 0.11.1 is released
diff --git a/RELEASE.md b/RELEASE.md index 9e21bbf23948a..b1e2aadf485a8 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -2,5 +2,5 @@ Release Notes ============= The list of changes to pandas between each release can be found -[here](http://pandas.pydata.org/pandas-docs/stable/release.html). For full +[here](http://pandas.pydata.org/pandas-docs/dev/release.html). For full details, see the commit logs at http://github.com/pydata/pandas.
https://api.github.com/repos/pandas-dev/pandas/pulls/3975
2013-06-20T23:39:14Z
2013-06-21T00:34:29Z
2013-06-21T00:34:29Z
2014-07-16T08:15:12Z
ENH: print more detailed truncated sequence if a limit is given
works similar to numpy except that the edgeitems is overriden by threshold rather than the other way around which is what np does. closes #3391 - [x] add release notes
https://api.github.com/repos/pandas-dev/pandas/pulls/3974
2013-06-20T21:52:45Z
2014-03-13T14:37:40Z
2014-03-13T14:37:40Z
2014-07-17T06:09:18Z
CLN: fix grammar in extract_index error message
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 47142daa8b20b..bf9d1cd7d30b9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5713,7 +5713,8 @@ def extract_index(data): raw_lengths.append(len(v)) if not indexes and not raw_lengths: - raise ValueError('If use all scalar values, must pass index') + raise ValueError('If using all scalar values, you must must pass' + ' an index') if have_series or have_dicts: index = _union_indexes(indexes) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 63f92e9fa7a35..8b32b3a641ebb 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -2245,8 +2245,9 @@ def test_constructor_error_msgs(self): try: DataFrame({'a': False, 'b': True}) except (Exception), detail: + msg = 'If using all scalar values, you must must pass an index' self.assert_(type(detail) == ValueError) - self.assert_("If use all scalar values, must pass index" in str(detail)) + self.assert_(msg in str(detail)) def test_constructor_subclass_dict(self): # Test for passing dict subclass to constructor
closes #3968.
https://api.github.com/repos/pandas-dev/pandas/pulls/3972
2013-06-20T18:03:40Z
2013-06-20T19:26:51Z
2013-06-20T19:26:50Z
2014-07-16T08:15:11Z
ENH add cython tutorial
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst new file mode 100644 index 0000000000000..db28dfde926bf --- /dev/null +++ b/doc/source/enhancingperf.rst @@ -0,0 +1,273 @@ +.. _enhancingperf: + +.. currentmodule:: pandas + +.. ipython:: python + :suppress: + + import os + import csv + from pandas import DataFrame + import pandas as pd + + import numpy as np + np.random.seed(123456) + randn = np.random.randn + randint = np.random.randint + np.set_printoptions(precision=4, suppress=True) + + +********************* +Enhancing Performance +********************* + +.. _enhancingperf.cython: + +Cython (Writing C extensions for pandas) +---------------------------------------- + +For many use cases writing pandas in pure python and numpy is sufficient. In some +computationally heavy applications however, it can be possible to achieve sizeable +speed-ups by offloading work to `cython <http://cython.org/>`_. + +This tutorial assumes you have refactored as much as possible in python, for example +trying to remove for loops and making use of numpy vectorization, it's always worth +optimising in python first. + +This tutorial walks through a "typical" process of cythonizing a slow computation. +We use an `example from the cython documentation <http://docs.cython.org/src/quickstart/cythonize.html>`_ +but in the context of pandas. Our final cythonized solution is around 100 times +faster than the pure python. + +.. _enhancingperf.pure: + +Pure python +~~~~~~~~~~~ + +We have a DataFrame to which we want to apply a function row-wise. + +.. ipython:: python + + df = DataFrame({'a': randn(1000), 'b': randn(1000),'N': randint(100, 1000, (1000)), 'x': 'x'}) + df + +Here's the function in pure python: + +.. ipython:: python + + def f(x): + return x * (x - 1) + + def integrate_f(a, b, N): + s = 0 + dx = (b - a) / N + for i in range(N): + s += f(a + i * dx) + return s * dx + +We achieve our result by by using ``apply`` (row-wise): + +.. ipython:: python + + %timeit df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1) + +But clearly this isn't fast enough for us. Let's take a look and see where the +time is spent during this operation (limited to the most time consuming +four calls) using the `prun ipython magic function <http://ipython.org/ipython-doc/stable/api/generated/IPython.core.magics.execution.html#IPython.core.magics.execution.ExecutionMagics.prun>`_: + +.. ipython:: python + + %prun -l 4 df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1) + +By far the majority of time is spend inside either ``integrate_f`` or ``f``, +hence we'll concentrate our efforts cythonizing these two functions. + +.. note:: + + In python 2 replacing the ``range`` with its generator counterpart (``xrange``) + would mean the ``range`` line would vanish. In python 3 range is already a generator. + +.. _enhancingperf.plain: + +Plain cython +~~~~~~~~~~~~ + +First we're going to need to import the cython magic function to ipython: + +.. ipython:: python + + %load_ext cythonmagic + + +Now, let's simply copy our functions over to cython as is (the suffix +is here to distinguish between function versions): + +.. ipython:: + + In [2]: %%cython + ...: def f_plain(x): + ...: return x * (x - 1) + ...: def integrate_f_plain(a, b, N): + ...: s = 0 + ...: dx = (b - a) / N + ...: for i in range(N): + ...: s += f_plain(a + i * dx) + ...: return s * dx + ...: + +.. note:: + + If you're having trouble pasting the above into your ipython, you may need + to be using bleeding edge ipython for paste to play well with cell magics. + + +.. ipython:: python + + %timeit df.apply(lambda x: integrate_f_plain(x['a'], x['b'], x['N']), axis=1) + +Already this has shaved a third off, not too bad for a simple copy and paste. + +.. _enhancingperf.type: + +Adding type +~~~~~~~~~~~ + +We get another huge improvement simply by providing type information: + +.. ipython:: + + In [3]: %%cython + ...: cdef double f_typed(double x) except? -2: + ...: return x * (x - 1) + ...: cpdef double integrate_f_typed(double a, double b, int N): + ...: cdef int i + ...: cdef double s, dx + ...: s = 0 + ...: dx = (b - a) / N + ...: for i in range(N): + ...: s += f_typed(a + i * dx) + ...: return s * dx + ...: + +.. ipython:: python + + %timeit df.apply(lambda x: integrate_f_typed(x['a'], x['b'], x['N']), axis=1) + +Now, we're talking! It's now over ten times faster than the original python +implementation, and we haven't *really* modified the code. Let's have another +look at what's eating up time: + +.. ipython:: python + + %prun -l 4 df.apply(lambda x: integrate_f_typed(x['a'], x['b'], x['N']), axis=1) + +.. _enhancingperf.ndarray: + +Using ndarray +~~~~~~~~~~~~~ + +It's calling series... a lot! It's creating a Series from each row, and get-ting from both +the index and the series (three times for each row). Function calls are expensive +in python, so maybe we could minimise these by cythonizing the apply part. + +.. note:: + + We are now passing ndarrays into the cython function, fortunately cython plays + very nicely with numpy. + +.. ipython:: + + In [4]: %%cython + ...: cimport numpy as np + ...: import numpy as np + ...: cdef double f_typed(double x) except? -2: + ...: return x * (x - 1) + ...: cpdef double integrate_f_typed(double a, double b, int N): + ...: cdef int i + ...: cdef double s, dx + ...: s = 0 + ...: dx = (b - a) / N + ...: for i in range(N): + ...: s += f_typed(a + i * dx) + ...: return s * dx + ...: cpdef np.ndarray[double] apply_integrate_f(np.ndarray col_a, np.ndarray col_b, np.ndarray col_N): + ...: assert (col_a.dtype == np.float and col_b.dtype == np.float and col_N.dtype == np.int) + ...: cdef Py_ssize_t i, n = len(col_N) + ...: assert (len(col_a) == len(col_b) == n) + ...: cdef np.ndarray[double] res = np.empty(n) + ...: for i in range(len(col_a)): + ...: res[i] = integrate_f_typed(col_a[i], col_b[i], col_N[i]) + ...: return res + ...: + + +The implementation is simple, it creates an array of zeros and loops over +the rows, applying our ``integrate_f_typed``, and putting this in the zeros array. + + +.. note:: + + Loop like this would be *extremely* slow in python, but in cython looping over + numpy arrays is *fast*. + +.. ipython:: python + + %timeit apply_integrate_f(df['a'], df['b'], df['N']) + +We've gone another three times faster! Let's check again where the time is spent: + +.. ipython:: python + + %prun -l 4 apply_integrate_f(df['a'], df['b'], df['N']) + +As one might expect, the majority of the time is now spent in ``apply_integrate_f``, +so if we wanted to make anymore efficiencies we must continue to concentrate our +efforts here. + +.. _enhancingperf.boundswrap: + +More advanced techniques +~~~~~~~~~~~~~~~~~~~~~~~~ + +There is still scope for improvement, here's an example of using some more +advanced cython techniques: + +.. ipython:: + + In [5]: %%cython + ...: cimport cython + ...: cimport numpy as np + ...: import numpy as np + ...: cdef double f_typed(double x) except? -2: + ...: return x * (x - 1) + ...: cpdef double integrate_f_typed(double a, double b, int N): + ...: cdef int i + ...: cdef double s, dx + ...: s = 0 + ...: dx = (b - a) / N + ...: for i in range(N): + ...: s += f_typed(a + i * dx) + ...: return s * dx + ...: @cython.boundscheck(False) + ...: @cython.wraparound(False) + ...: cpdef np.ndarray[double] apply_integrate_f_wrap(np.ndarray[double] col_a, np.ndarray[double] col_b, np.ndarray[Py_ssize_t] col_N): + ...: cdef Py_ssize_t i, n = len(col_N) + ...: assert len(col_a) == len(col_b) == n + ...: cdef np.ndarray[double] res = np.empty(n) + ...: for i in range(n): + ...: res[i] = integrate_f_typed(col_a[i], col_b[i], col_N[i]) + ...: return res + ...: + +.. ipython:: python + + %timeit apply_integrate_f_wrap(df['a'], df['b'], df['N']) + +This shaves another third off! + +Further topics +~~~~~~~~~~~~~~ + +- Loading C modules into cython. + +Read more in the `cython docs <http://docs.cython.org/>`_. \ No newline at end of file diff --git a/doc/source/index.rst b/doc/source/index.rst index 21a79ffdb85fd..67f1a3c1e6312 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -126,6 +126,7 @@ See the package overview for more detail about what's in the library. visualization rplot io + performance sparse gotchas r_interface diff --git a/doc/sphinxext/ipython_directive.py b/doc/sphinxext/ipython_directive.py index bc3c46dd5cc93..b237341e81125 100644 --- a/doc/sphinxext/ipython_directive.py +++ b/doc/sphinxext/ipython_directive.py @@ -296,11 +296,14 @@ def process_input(self, data, input_prompt, lineno): is_savefig = decorator is not None and \ decorator.startswith('@savefig') - input_lines = input.split('\n') + def _remove_first_space_if_any(line): + return line[1:] if line.startswith(' ') else line + + input_lines = map(_remove_first_space_if_any, input.split('\n')) self.datacontent = data - continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) + continuation = ' %s: '%''.join(['.']*(len(str(lineno))+2)) if is_savefig: image_file, image_directive = self.process_image(decorator)
WIP #3923 Please checkout this draft for a pandas cython tutorial, any feedback or ideas appreciated. _Thanks to @cpcloud for patiently working out/explaining how to force ipython sphinx directive to play nicely with %%cython (it's incredibly sensitive!) and fixing the spacing bug. This seems to build now._ :)
https://api.github.com/repos/pandas-dev/pandas/pulls/3965
2013-06-19T23:28:35Z
2013-06-21T00:15:16Z
2013-06-21T00:15:16Z
2014-06-29T19:01:34Z
DOC/CLN: remove gh links and use the new issue format for whatsnew
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 85ce838c2b414..7fdb1c53cc15b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -71,10 +71,10 @@ your contribution or address the issue you're having. which fails on python 2.6, use `self.assertRaises(TheException,func,args)` instead. - - RELEASE.rst and doc/source/vx.y.z.txt contain an on-going changelog for each - release as it is worked on. Add entries to these files as needed in - a separate commit in your PR, documenting the fix, enhancement or (unavoidable) - breaking change. + - doc/source/release.rst and doc/source/vx.y.z.txt contain an on-going + changelog for each release as it is worked on. Add entries to these files + as needed in a separate commit in your PR, documenting the fix, enhancement + or (unavoidable) breaking change. - For extra brownie points, use "git rebase -i" to squash and reorder commits in your PR so that the history makes the most sense. Use your own judgment to decide what history needs to be preserved. diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000000..9e21bbf23948a --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,6 @@ +Release Notes +============= + +The list of changes to pandas between each release can be found +[here](http://pandas.pydata.org/pandas-docs/stable/release.html). For full +details, see the commit logs at http://github.com/pydata/pandas. diff --git a/RELEASE.rst b/RELEASE.rst deleted file mode 100644 index da5b95a2c0647..0000000000000 --- a/RELEASE.rst +++ /dev/null @@ -1,4169 +0,0 @@ - -============= -Release Notes -============= - -This is the list of changes to pandas between each release. For full details, -see the commit logs at http://github.com/pydata/pandas - -What is it ----------- - -pandas is a Python package providing fast, flexible, and expressive data -structures designed to make working with “relational” or “labeled” data both -easy and intuitive. It aims to be the fundamental high-level building block for -doing practical, real world data analysis in Python. Additionally, it has the -broader goal of becoming the most powerful and flexible open source data -analysis / manipulation tool available in any language. - -Where to get it ---------------- - -* Source code: http://github.com/pydata/pandas -* Binary installers on PyPI: http://pypi.python.org/pypi/pandas -* Documentation: http://pandas.pydata.org - -pandas 0.11.1 -============= - -**Release date:** not-yet-released - -**New features** - - - ``pd.read_html()`` can now parse HTML strings, files or urls and - returns a list of ``DataFrame`` s courtesy of @cpcloud. (GH3477_, GH3605_, - GH3606_) - - Support for reading Amazon S3 files. (GH3504_) - - Added module for reading and writing Stata files: pandas.io.stata (GH1512_) - includes ``to_stata`` DataFrame method, and a ``read_stata`` top-level reader - - Added support for writing in ``to_csv`` and reading in ``read_csv``, - multi-index columns. The ``header`` option in ``read_csv`` now accepts a - list of the rows from which to read the index. Added the option, - ``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of - writing and reading multi-index columns via a list of tuples. The default in - 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a - multi-index column. - Note: The default value will change in 0.12 to make the default *to* write and - read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_) - - Add iterator to ``Series.str`` (GH3638_) - - ``pd.set_option()`` now allows N option, value pairs (GH3667_). - - Added keyword parameters for different types of scatter_matrix subplots - - A ``filter`` method on grouped Series or DataFrames returns a subset of - the original (GH3680_, GH919_) - - Access to historical Google Finance data in pandas.io.data (GH3814_) - -**Improvements to existing features** - - - Fixed various issues with internal pprinting code, the repr() for various objects - including TimeStamp and Index now produces valid python code strings and - can be used to recreate the object, (GH3038_, GH3379_, GH3251_, GH3460_) - - ``convert_objects`` now accepts a ``copy`` parameter (defaults to ``True``) - - ``HDFStore`` - - - will retain index attributes (freq,tz,name) on recreation (GH3499_) - - will warn with a ``AttributeConflictWarning`` if you are attempting to append - an index with a different frequency than the existing, or attempting - to append an index with a different name than the existing - - support datelike columns with a timezone as data_columns (GH2852_) - - table writing performance improvements. - - support python3 (via ``PyTables 3.0.0``) (GH3750_) - - Add modulo operator to Series, DataFrame - - Add ``date`` method to DatetimeIndex - - Simplified the API and added a describe method to Categorical - - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name`` - to specify custom column names of the returned DataFrame (GH3649_), - thanks @hoechenberger - - clipboard functions use pyperclip (no dependencies on Windows, alternative - dependencies offered for Linux) (GH3837_). - - Plotting functions now raise a ``TypeError`` before trying to plot anything - if the associated objects have have a dtype of ``object`` (GH1818_, - GH3572_, GH3911_, GH3912_), but they will try to convert object arrays to - numeric arrays if possible so that you can still plot, for example, an - object array with floats. This happens before any drawing takes place which - elimnates any spurious plots from showing up. - - Added Faq section on repr display options, to help users customize their setup. - - ``where`` operations that result in block splitting are much faster (GH3733_) - - Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_) - - DatetimeIndexes no longer try to convert mixed-integer indexes during join - operations (GH3877_) - - Add ``unit`` keyword to ``Timestamp`` and ``to_datetime`` to enable passing of - integers or floats that are in an epoch unit of ``s, ms, us, ns`` - (e.g. unix timestamps or epoch ``s``, with fracional seconds allowed) (GH3540_) - - DataFrame corr method (spearman) is now cythonized. - -**API Changes** - - - ``HDFStore`` - - - When removing an object, ``remove(key)`` raises - ``KeyError`` if the key is not a valid store object. - - raise a ``TypeError`` on passing ``where`` or ``columns`` - to select with a Storer; these are invalid parameters at this time - - can now specify an ``encoding`` option to ``append/put`` - to enable alternate encodings (GH3750_) - - enable support for ``iterator/chunksize`` with ``read_hdf`` - - The repr() for (Multi)Index now obeys display.max_seq_items rather - then numpy threshold print options. (GH3426_, GH3466_) - - Added mangle_dupe_cols option to read_table/csv, allowing users - to control legacy behaviour re dupe cols (A, A.1, A.2 vs A, A ) (GH3468_) - Note: The default value will change in 0.12 to the "no mangle" behaviour, - If your code relies on this behaviour, explicitly specify mangle_dupe_cols=True - in your calls. - - Do not allow astypes on ``datetime64[ns]`` except to ``object``, and - ``timedelta64[ns]`` to ``object/int`` (GH3425_) - - The behavior of ``datetime64`` dtypes has changed with respect to certain - so-called reduction operations (GH3726_). The following operations now - raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty* - ``Series`` when performed on a ``DataFrame`` similar to performing these - operations on, for example, a ``DataFrame`` of ``slice`` objects: - - sum, prod, mean, std, var, skew, kurt, corr, and cov - - Do not allow datetimelike/timedeltalike creation except with valid types - (e.g. cannot pass ``datetime64[ms]``) (GH3423_) - - Add ``squeeze`` keyword to ``groupby`` to allow reduction from - DataFrame -> Series if groups are unique. Regression from 0.10.1, - partial revert on (GH2893_) with (GH3596_) - - Raise on ``iloc`` when boolean indexing with a label based indexer mask - e.g. a boolean Series, even with integer labels, will raise. Since ``iloc`` - is purely positional based, the labels on the Series are not alignable (GH3631_) - - The ``raise_on_error`` option to plotting methods is obviated by GH3572_, - so it is removed. Plots now always raise when data cannot be plotted or the - object being plotted has a dtype of ``object``. - - ``DataFrame.interpolate()`` is now deprecated. Please use - ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead (GH3582_, - GH3675_, GH3676_). - - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are - deprecated - - ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now - performs conversion by default. (GH3907_) - - Deprecated display.height, display.width is now only a formatting option - does not control triggering of summary, similar to < 0.11.0. - - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column - to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_) - - io API changes - - - added ``pandas.io.api`` for i/o imports - - removed ``Excel`` support to ``pandas.io.excel`` - - added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods - - removed ``clipboard`` support to ``pandas.io.clipboard`` - - replace top-level and instance methods ``save`` and ``load`` with top-level ``read_pickle`` and - ``to_pickle`` instance method, ``save`` and ``load`` will give deprecation warning. - - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are - deprecated - - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are - deprecated - - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_) - - ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned - as an int, maxing with ``int64``, to avoid precision issues (GH3733_) - - ``na_values`` in a list provided to ``read_csv/read_excel`` will match string and numeric versions - e.g. ``na_values=['99']`` will match 99 whether the column ends up being int, float, or string (GH3611_) - - ``read_html`` now defaults to ``None`` when reading, and falls back on - ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try - until success is also valid - - more consistency in the to_datetime return types (give string/array of string inputs) (GH3888_) - -**Bug Fixes** - - - Fixed an esoteric excel reading bug, xlrd>= 0.9.0 now required for excel - support. Should provide python3 support (for reading) which has been - lacking. (GH3164_) - - Allow unioning of date ranges sharing a timezone (GH3491_) - - Fix to_csv issue when having a large number of rows and ``NaT`` in some - columns (GH3437_) - - ``.loc`` was not raising when passed an integer list (GH3449_) - - Unordered time series selection was misbehaving when using label slicing (GH3448_) - - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_) - - DataFrames fetched via FRED now handle '.' as a NaN. (GH3469_) - - Fix regression in a DataFrame apply with axis=1, objects were not being converted back - to base dtypes correctly (GH3480_) - - Fix issue when storing uint dtypes in an HDFStore. (GH3493_) - - Non-unique index support clarified (GH3468_) - - - Addressed handling of dupe columns in df.to_csv new and old (GH3454_, GH3457_) - - Fix assigning a new index to a duplicate index in a DataFrame would fail (GH3468_) - - Fix construction of a DataFrame with a duplicate index - - ref_locs support to allow duplicative indices across dtypes, - allows iget support to always find the index (even across dtypes) (GH2194_) - - applymap on a DataFrame with a non-unique index now works - (removed warning) (GH2786_), and fix (GH3230_) - - Fix to_csv to handle non-unique columns (GH3495_) - - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_) - and handle missing elements like unique indices (GH3561_) - - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_) - - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_) - - Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_) - - Allow insert/delete to non-unique columns (GH3679_) - - Extend ``reindex`` to correctly deal with non-unique indices (GH3679_) - - ``DataFrame.itertuples()`` now works with frames with duplicate column - names (GH3873_) - - Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_) - - Fixed bug in mixed-frame assignment with aligned series (GH3492_) - - Fixed bug in selecting month/quarter/year from a series would not select the time element - on the last day (GH3546_) - - Fixed a couple of MultiIndex rendering bugs in df.to_html() (GH3547_, GH3553_) - - Properly convert np.datetime64 objects in a Series (GH3416_) - - Raise a ``TypeError`` on invalid datetime/timedelta operations - e.g. add datetimes, multiple timedelta x datetime - - Fix ``.diff`` on datelike and timedelta operations (GH3100_) - - ``combine_first`` not returning the same dtype in cases where it can (GH3552_) - - Fixed bug with ``Panel.transpose`` argument aliases (GH3556_) - - Fixed platform bug in ``PeriodIndex.take`` (GH3579_) - - Fixed bud in incorrect conversion of datetime64[ns] in ``combine_first`` (GH3593_) - - Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_) - - ``fillna`` methods now raise a ``TypeError`` when the ``value`` parameter - is a ``list`` or ``tuple``. - - Fixed bug where a time-series was being selected in preference to an actual column name - in a frame (GH3594_) - - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return - ``np.nan`` or ``np.inf`` as appropriate (GH3590_) - - Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_) - - Fix ``read_csv/read_excel`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]`` - was failing (GH3611_) - - Disable HTML output in qtconsole again. (GH3657_) - - Reworked the new repr display logic, which users found confusing. (GH3663_) - - Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_) - - Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv`` - when ``parse_dates`` is specified (GH3062_) - - Fix not consolidating before to_csv (GH3624_) - - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_) or - a mixed DataFrame and a Series (GH3668_) - - Fix plotting of unordered DatetimeIndex (GH3601_) - - ``sql.write_frame`` failing when writing a single column to sqlite (GH3628_), - thanks to @stonebig - - Fix pivoting with ``nan`` in the index (GH3558_) - - Fix running of bs4 tests when it is not installed (GH3605_) - - Fix parsing of html table (GH3606_) - - ``read_html()`` now only allows a single backend: ``html5lib`` (GH3616_) - - ``convert_objects`` with ``convert_dates='coerce'`` was parsing some single-letter strings - into today's date - - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_) - - ``DataFrame.to_csv`` will succeed with the deprecated option ``nanRep``, @tdsmith - - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for - their first argument (GH3702_) - - Fix file tokenization error with \r delimiter and quoted fields (GH3453_) - - Groupby transform with item-by-item not upcasting correctly (GH3740_) - - Incorrectly read a HDFStore multi-index Frame witha column specification (GH3748_) - - ``read_html`` now correctly skips tests (GH3741_) - - PandasObjects raise TypeError when trying to hash (GH3882_) - - Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_) - - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) - in ``read_csv`` (GH3795_) - - Fix index name not propogating when using ``loc/ix`` (GH3880_) - - Fix groupby when applying a custom function resulting in a returned DataFrame was - not converting dtypes (GH3911_) - - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression - in the ``to_replace`` argument wasn't working (GH3907_) - - Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing - two integer arrays with at least 10000 cells total (GH3764_) - - Indexing with a string with seconds resolution not selecting from a time index (GH3925_) - -.. _GH3164: https://github.com/pydata/pandas/issues/3164 -.. _GH2786: https://github.com/pydata/pandas/issues/2786 -.. _GH2194: https://github.com/pydata/pandas/issues/2194 -.. _GH3230: https://github.com/pydata/pandas/issues/3230 -.. _GH3425: https://github.com/pydata/pandas/issues/3425 -.. _GH3416: https://github.com/pydata/pandas/issues/3416 -.. _GH3423: https://github.com/pydata/pandas/issues/3423 -.. _GH3251: https://github.com/pydata/pandas/issues/3251 -.. _GH3379: https://github.com/pydata/pandas/issues/3379 -.. _GH3480: https://github.com/pydata/pandas/issues/3480 -.. _GH3481: https://github.com/pydata/pandas/issues/3481 -.. _GH2852: https://github.com/pydata/pandas/issues/2852 -.. _GH3100: https://github.com/pydata/pandas/issues/3100 -.. _GH3454: https://github.com/pydata/pandas/issues/3454 -.. _GH3457: https://github.com/pydata/pandas/issues/3457 -.. _GH3491: https://github.com/pydata/pandas/issues/3491 -.. _GH3426: https://github.com/pydata/pandas/issues/3426 -.. _GH3466: https://github.com/pydata/pandas/issues/3466 -.. _GH3038: https://github.com/pydata/pandas/issues/3038 -.. _GH3510: https://github.com/pydata/pandas/issues/3510 -.. _GH3547: https://github.com/pydata/pandas/issues/3547 -.. _GH3553: https://github.com/pydata/pandas/issues/3553 -.. _GH3437: https://github.com/pydata/pandas/issues/3437 -.. _GH3468: https://github.com/pydata/pandas/issues/3468 -.. _GH3453: https://github.com/pydata/pandas/issues/3453 -.. _GH3455: https://github.com/pydata/pandas/issues/3455 -.. _GH3457: https://github.com/pydata/pandas/issues/3457 -.. _GH3477: https://github.com/pydata/pandas/issues/3457 -.. _GH3460: https://github.com/pydata/pandas/issues/3460 -.. _GH3461: https://github.com/pydata/pandas/issues/3461 -.. _GH3546: https://github.com/pydata/pandas/issues/3546 -.. _GH3468: https://github.com/pydata/pandas/issues/3468 -.. _GH3448: https://github.com/pydata/pandas/issues/3448 -.. _GH3499: https://github.com/pydata/pandas/issues/3499 -.. _GH3495: https://github.com/pydata/pandas/issues/3495 -.. _GH3492: https://github.com/pydata/pandas/issues/3492 -.. _GH3540: https://github.com/pydata/pandas/issues/3540 -.. _GH3552: https://github.com/pydata/pandas/issues/3552 -.. _GH3562: https://github.com/pydata/pandas/issues/3562 -.. _GH3586: https://github.com/pydata/pandas/issues/3586 -.. _GH3561: https://github.com/pydata/pandas/issues/3561 -.. _GH3493: https://github.com/pydata/pandas/issues/3493 -.. _GH3579: https://github.com/pydata/pandas/issues/3579 -.. _GH3593: https://github.com/pydata/pandas/issues/3593 -.. _GH3556: https://github.com/pydata/pandas/issues/3556 -.. _GH3594: https://github.com/pydata/pandas/issues/3594 -.. _GH3590: https://github.com/pydata/pandas/issues/3590 -.. _GH3610: https://github.com/pydata/pandas/issues/3610 -.. _GH3596: https://github.com/pydata/pandas/issues/3596 -.. _GH3617: https://github.com/pydata/pandas/issues/3617 -.. _GH3435: https://github.com/pydata/pandas/issues/3435 -.. _GH3611: https://github.com/pydata/pandas/issues/3611 -.. _GH3558: https://github.com/pydata/pandas/issues/3558 -.. _GH3062: https://github.com/pydata/pandas/issues/3062 -.. _GH3624: https://github.com/pydata/pandas/issues/3624 -.. _GH3626: https://github.com/pydata/pandas/issues/3626 -.. _GH3601: https://github.com/pydata/pandas/issues/3601 -.. _GH3631: https://github.com/pydata/pandas/issues/3631 -.. _GH3602: https://github.com/pydata/pandas/issues/3602 -.. _GH1512: https://github.com/pydata/pandas/issues/1512 -.. _GH3571: https://github.com/pydata/pandas/issues/3571 -.. _GH1651: https://github.com/pydata/pandas/issues/1651 -.. _GH3141: https://github.com/pydata/pandas/issues/3141 -.. _GH3628: https://github.com/pydata/pandas/issues/3628 -.. _GH3638: https://github.com/pydata/pandas/issues/3638 -.. _GH3668: https://github.com/pydata/pandas/issues/3668 -.. _GH3605: https://github.com/pydata/pandas/issues/3605 -.. _GH3606: https://github.com/pydata/pandas/issues/3606 -.. _GH3659: https://github.com/pydata/pandas/issues/3659 -.. _GH3649: https://github.com/pydata/pandas/issues/3649 -.. _GH3679: https://github.com/pydata/pandas/issues/3679 -.. _Gh3616: https://github.com/pydata/pandas/issues/3616 -.. _GH1818: https://github.com/pydata/pandas/issues/1818 -.. _GH3572: https://github.com/pydata/pandas/issues/3572 -.. _GH3582: https://github.com/pydata/pandas/issues/3582 -.. _GH3676: https://github.com/pydata/pandas/issues/3676 -.. _GH3675: https://github.com/pydata/pandas/issues/3675 -.. _GH3682: https://github.com/pydata/pandas/issues/3682 -.. _GH3702: https://github.com/pydata/pandas/issues/3702 -.. _GH3691: https://github.com/pydata/pandas/issues/3691 -.. _GH3696: https://github.com/pydata/pandas/issues/3696 -.. _GH3667: https://github.com/pydata/pandas/issues/3667 -.. _GH3733: https://github.com/pydata/pandas/issues/3733 -.. _GH3740: https://github.com/pydata/pandas/issues/3740 -.. _GH3748: https://github.com/pydata/pandas/issues/3748 -.. _GH3741: https://github.com/pydata/pandas/issues/3741 -.. _GH3750: https://github.com/pydata/pandas/issues/3750 -.. _GH3726: https://github.com/pydata/pandas/issues/3726 -.. _GH3795: https://github.com/pydata/pandas/issues/3795 -.. _GH3814: https://github.com/pydata/pandas/issues/3814 -.. _GH3834: https://github.com/pydata/pandas/issues/3834 -.. _GH3873: https://github.com/pydata/pandas/issues/3873 -.. _GH3877: https://github.com/pydata/pandas/issues/3877 -.. _GH3659: https://github.com/pydata/pandas/issues/3659 -.. _GH3679: https://github.com/pydata/pandas/issues/3679 -.. _GH3880: https://github.com/pydata/pandas/issues/3880 -.. _GH3911: https://github.com/pydata/pandas/issues/3911 -.. _GH3907: https://github.com/pydata/pandas/issues/3907 -.. _GH3911: https://github.com/pydata/pandas/issues/3911 -.. _GH3912: https://github.com/pydata/pandas/issues/3912 -.. _GH3764: https://github.com/pydata/pandas/issues/3764 -.. _GH3888: https://github.com/pydata/pandas/issues/3888 -.. _GH3925: https://github.com/pydata/pandas/issues/3925 - -pandas 0.11.0 -============= - -**Release date:** 2013-04-22 - -**New features** - - - New documentation section, ``10 Minutes to Pandas`` - - New documentation section, ``Cookbook`` - - Allow mixed dtypes (e.g ``float32/float64/int32/int16/int8``) to coexist in - DataFrames and propogate in operations - - Add function to pandas.io.data for retrieving stock index components from - Yahoo! finance (GH2795_) - - Support slicing with time objects (GH2681_) - - Added ``.iloc`` attribute, to support strict integer based indexing, - analogous to ``.ix`` (GH2922_) - - Added ``.loc`` attribute, to support strict label based indexing, analagous - to ``.ix`` (GH3053_) - - Added ``.iat`` attribute, to support fast scalar access via integers - (replaces ``iget_value/iset_value``) - - Added ``.at`` attribute, to support fast scalar access via labels (replaces - ``get_value/set_value``) - - Moved functionaility from ``irow,icol,iget_value/iset_value`` to ``.iloc`` indexer - (via ``_ixs`` methods in each object) - - Added support for expression evaluation using the ``numexpr`` library - - Added ``convert=boolean`` to ``take`` routines to translate negative - indices to positive, defaults to True - - Added to_series() method to indices, to facilitate the creation of indexeres - (GH3275_) - -**Improvements to existing features** - - - Improved performance of df.to_csv() by up to 10x in some cases. (GH3059_) - - added ``blocks`` attribute to DataFrames, to return a dict of dtypes to - homogeneously dtyped DataFrames - - added keyword ``convert_numeric`` to ``convert_objects()`` to try to - convert object dtypes to numeric types (default is False) - - ``convert_dates`` in ``convert_objects`` can now be ``coerce`` which will - return a datetime64[ns] dtype with non-convertibles set as ``NaT``; will - preserve an all-nan object (e.g. strings), default is True (to perform - soft-conversion - - Series print output now includes the dtype by default - - Optimize internal reindexing routines (GH2819_, GH2867_) - - ``describe_option()`` now reports the default and current value of options. - - Add ``format`` option to ``pandas.to_datetime`` with faster conversion of - strings that can be parsed with datetime.strptime - - Add ``axes`` property to ``Series`` for compatibility - - Add ``xs`` function to ``Series`` for compatibility - - Allow setitem in a frame where only mixed numerics are present (e.g. int - and float), (GH3037_) - - ``HDFStore`` - - - Provide dotted attribute access to ``get`` from stores - (e.g. store.df == store['df']) - - New keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` - are provided to support iteration on ``select`` and - ``select_as_multiple`` (GH3076_) - - support ``read_hdf/to_hdf`` API similar to ``read_csv/to_csv`` (GH3222_) - - - Add ``squeeze`` method to possibly remove length 1 dimensions from an - object. - - .. ipython:: python - - p = Panel(randn(3,4,4),items=['ItemA','ItemB','ItemC'], - major_axis=date_range('20010102',periods=4), - minor_axis=['A','B','C','D']) - p - p.reindex(items=['ItemA']).squeeze() - p.reindex(items=['ItemA'],minor=['B']).squeeze() - - - Improvement to Yahoo API access in ``pd.io.data.Options`` (GH2758_) - - added option `display.max_seq_items` to control the number of - elements printed per sequence pprinting it. (GH2979_) - - added option `display.chop_threshold` to control display of small numerical - values. (GH2739_) - - added option `display.max_info_rows` to prevent verbose_info from being - calculated for frames above 1M rows (configurable). (GH2807_, GH2918_) - - value_counts() now accepts a "normalize" argument, for normalized - histograms. (GH2710_). - - DataFrame.from_records now accepts not only dicts but any instance of - the collections.Mapping ABC. - - Allow selection semantics via a string with a datelike index to work in both - Series and DataFrames (GH3070_) - - .. ipython:: python - - idx = date_range("2001-10-1", periods=5, freq='M') - ts = Series(np.random.rand(len(idx)),index=idx) - ts['2001'] - - df = DataFrame(dict(A = ts)) - df['2001'] - - added option `display.mpl_style` providing a sleeker visual style - for plots. Based on https://gist.github.com/huyng/816622 (GH3075_). - - - - Improved performance across several core functions by taking memory - ordering of arrays into account. Courtesy of @stephenwlin (GH3130_) - - Improved performance of groupby transform method (GH2121_) - - Handle "ragged" CSV files missing trailing delimiters in rows with missing - fields when also providing explicit list of column names (so the parser - knows how many columns to expect in the result) (GH2981_) - - On a mixed DataFrame, allow setting with indexers with ndarray/DataFrame - on rhs (GH3216_) - - Treat boolean values as integers (values 1 and 0) for numeric - operations. (GH2641_) - - Add ``time`` method to DatetimeIndex (GH3180_) - - Return NA when using Series.str[...] for values that are not long enough - (GH3223_) - - Display cursor coordinate information in time-series plots (GH1670_) - - to_html() now accepts an optional "escape" argument to control reserved - HTML character escaping (enabled by default) and escapes ``&``, in addition - to ``<`` and ``>``. (GH2919_) - -**API Changes** - - - Do not automatically upcast numeric specified dtypes to ``int64`` or - ``float64`` (GH622_ and GH797_) - - DataFrame construction of lists and scalars, with no dtype present, will - result in casting to ``int64`` or ``float64``, regardless of platform. - This is not an apparent change in the API, but noting it. - - Guarantee that ``convert_objects()`` for Series/DataFrame always returns a - copy - - groupby operations will respect dtypes for numeric float operations - (float32/float64); other types will be operated on, and will try to cast - back to the input dtype (e.g. if an int is passed, as long as the output - doesn't have nans, then an int will be returned) - - backfill/pad/take/diff/ohlc will now support ``float32/int16/int8`` - operations - - Block types will upcast as needed in where/masking operations (GH2793_) - - Series now automatically will try to set the correct dtype based on passed - datetimelike objects (datetime/Timestamp) - - - timedelta64 are returned in appropriate cases (e.g. Series - Series, - when both are datetime64) - - mixed datetimes and objects (GH2751_) in a constructor will be cast - correctly - - astype on datetimes to object are now handled (as well as NaT - conversions to np.nan) - - all timedelta like objects will be correctly assigned to ``timedelta64`` - with mixed ``NaN`` and/or ``NaT`` allowed - - - arguments to DataFrame.clip were inconsistent to numpy and Series clipping - (GH2747_) - - util.testing.assert_frame_equal now checks the column and index names (GH2964_) - - Constructors will now return a more informative ValueError on failures - when invalid shapes are passed - - Don't suppress TypeError in GroupBy.agg (GH3238_) - - Methods return None when inplace=True (GH1893_) - - ``HDFStore`` - - - added the method ``select_column`` to select a single column from a table as a Series. - - deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` - - ``min_itemsize`` parameter will now automatically create data_columns for passed keys - - - Downcast on pivot if possible (GH3283_), adds argument ``downcast`` to ``fillna`` - - Introduced options `display.height/width` for explicitly specifying terminal - height/width in characters. Deprecated display.line_width, now replaced by display.width. - These defaults are in effect for scripts as well, so unless disabled, previously - very wide output will now be output as "expand_repr" style wrapped output. - - Various defaults for options (including display.max_rows) have been revised, - after a brief survey concluded they were wrong for everyone. Now at w=80,h=60. - - HTML repr output in IPython qtconsole is once again controlled by the option - `display.notebook_repr_html`, and on by default. - -**Bug Fixes** - - - Fix seg fault on empty data frame when fillna with ``pad`` or ``backfill`` - (GH2778_) - - Single element ndarrays of datetimelike objects are handled - (e.g. np.array(datetime(2001,1,1,0,0))), w/o dtype being passed - - 0-dim ndarrays with a passed dtype are handled correctly - (e.g. np.array(0.,dtype='float32')) - - Fix some boolean indexing inconsistencies in Series.__getitem__/__setitem__ - (GH2776_) - - Fix issues with DataFrame and Series constructor with integers that - overflow ``int64`` and some mixed typed type lists (GH2845_) - - - ``HDFStore`` - - - Fix weird PyTables error when using too many selectors in a where - also correctly filter on any number of values in a Term expression - (so not using numexpr filtering, but isin filtering) - - Internally, change all variables to be private-like (now have leading - underscore) - - Fixes for query parsing to correctly interpret boolean and != (GH2849_, GH2973_) - - Fixes for pathological case on SparseSeries with 0-len array and - compression (GH2931_) - - Fixes bug with writing rows if part of a block was all-nan (GH3012_) - - Exceptions are now ValueError or TypeError as needed - - A table will now raise if min_itemsize contains fields which are not queryables - - - Bug showing up in applymap where some object type columns are converted (GH2909_) - had an incorrect default in convert_objects - - - TimeDeltas - - - Series ops with a Timestamp on the rhs was throwing an exception (GH2898_) - added tests for Series ops with datetimes,timedeltas,Timestamps, and datelike - Series on both lhs and rhs - - Fixed subtle timedelta64 inference issue on py3 & numpy 1.7.0 (GH3094_) - - Fixed some formatting issues on timedelta when negative - - Support null checking on timedelta64, representing (and formatting) with NaT - - Support setitem with np.nan value, converts to NaT - - Support min/max ops in a Dataframe (abs not working, nor do we error on non-supported ops) - - Support idxmin/idxmax/abs/max/min in a Series (GH2989_, GH2982_) - - - Bug on in-place putmasking on an ``integer`` series that needs to be converted to - ``float`` (GH2746_) - - Bug in argsort of ``datetime64[ns]`` Series with ``NaT`` (GH2967_) - - Bug in value_counts of ``datetime64[ns]`` Series (GH3002_) - - Fixed printing of ``NaT` in an index - - Bug in idxmin/idxmax of ``datetime64[ns]`` Series with ``NaT`` (GH2982__) - - Bug in ``icol, take`` with negative indicies was producing incorrect return - values (see GH2922_, GH2892_), also check for out-of-bounds indices (GH3029_) - - Bug in DataFrame column insertion when the column creation fails, existing frame is left in - an irrecoverable state (GH3010_) - - Bug in DataFrame update, combine_first where non-specified values could cause - dtype changes (GH3016_, GH3041_) - - Bug in groupby with first/last where dtypes could change (GH3041_, GH2763_) - - Formatting of an index that has ``nan`` was inconsistent or wrong (would fill from - other values), (GH2850_) - - Unstack of a frame with no nans would always cause dtype upcasting (GH2929_) - - Fix scalar datetime.datetime parsing bug in read_csv (GH3071_) - - Fixed slow printing of large Dataframes, due to inefficient dtype - reporting (GH2807_) - - Fixed a segfault when using a function as grouper in groupby (GH3035_) - - Fix pretty-printing of infinite data structures (closes GH2978_) - - Fixed exception when plotting timeseries bearing a timezone (closes GH2877_) - - str.contains ignored na argument (GH2806_) - - Substitute warning for segfault when grouping with categorical grouper - of mismatched length (GH3011_) - - Fix exception in SparseSeries.density (GH2083_) - - Fix upsampling bug with closed='left' and daily to daily data (GH3020_) - - Fixed missing tick bars on scatter_matrix plot (GH3063_) - - Fixed bug in Timestamp(d,tz=foo) when d is date() rather then datetime() (GH2993_) - - series.plot(kind='bar') now respects pylab color schem (GH3115_) - - Fixed bug in reshape if not passed correct input, now raises TypeError (GH2719_) - - Fixed a bug where Series ctor did not respect ordering if OrderedDict passed in (GH3282_) - - Fix NameError issue on RESO_US (GH2787_) - - Allow selection in an *unordered* timeseries to work similary - to an *ordered* timeseries (GH2437_). - - Fix implemented ``.xs`` when called with ``axes=1`` and a level parameter (GH2903_) - - Timestamp now supports the class method fromordinal similar to datetimes (GH3042_) - - Fix issue with indexing a series with a boolean key and specifiying a 1-len list on the rhs (GH2745_) - or a list on the rhs (GH3235_) - - Fixed bug in groupby apply when kernel generate list of arrays having unequal len (GH1738_) - - fixed handling of rolling_corr with center=True which could produce corr>1 (GH3155_) - - Fixed issues where indices can be passed as 'index/column' in addition to 0/1 for the axis parameter - - PeriodIndex.tolist now boxes to Period (GH3178_) - - PeriodIndex.get_loc KeyError now reports Period instead of ordinal (GH3179_) - - df.to_records bug when handling MultiIndex (GH3189) - - Fix Series.__getitem__ segfault when index less than -length (GH3168_) - - Fix bug when using Timestamp as a date parser (GH2932_) - - Fix bug creating date range from Timestamp with time zone and passing same - time zone (GH2926_) - - Add comparison operators to Period object (GH2781_) - - Fix bug when concatenating two Series into a DataFrame when they have the - same name (GH2797_) - - Fix automatic color cycling when plotting consecutive timeseries - without color arguments (GH2816_) - - fixed bug in the pickling of PeriodIndex (GH2891_) - - Upcast/split blocks when needed in a mixed DataFrame when setitem - with an indexer (GH3216_) - - Invoking df.applymap on a dataframe with dupe cols now raises a ValueError (GH2786_) - - Apply with invalid returned indices raise correct Exception (GH2808_) - - Fixed a bug in plotting log-scale bar plots (GH3247_) - - df.plot() grid on/off now obeys the mpl default style, just like - series.plot(). (GH3233_) - - Fixed a bug in the legend of plotting.andrews_curves() (GH3278_) - - Produce a series on apply if we only generate a singular series and have - a simple index (GH2893_) - - Fix Python ascii file parsing when integer falls outside of floating point - spacing (GH3258_) - - fixed pretty priniting of sets (GH3294_) - - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (GH3303_) - - DataFrame where with a datetimelike incorrectly selecting (GH3311_) - - Ensure index casts work even in Int64Index - - Fix set_index segfault when passing MultiIndex (GH3308_) - - Ensure pickles created in py2 can be read in py3 - - Insert ellipsis in MultiIndex summary repr (GH3348_) - - Groupby will handle mutation among an input groups columns (and fallback - to non-fast apply) (GH3380_) - - Eliminated unicode errors on FreeBSD when using MPL GTK backend (GH3360_) - - Period.strftime should return unicode strings always (GH3363_) - - Respect passed read_* chunksize in get_chunk function (GH3406_) - -.. _GH3294: https://github.com/pydata/pandas/issues/3294 -.. _GH622: https://github.com/pydata/pandas/issues/622 -.. _GH3348: https://github.com/pydata/pandas/issues/3348 -.. _GH797: https://github.com/pydata/pandas/issues/797 -.. _GH1893: https://github.com/pydata/pandas/issues/1893 -.. _GH1978: https://github.com/pydata/pandas/issues/1978 -.. _GH3360: https://github.com/pydata/pandas/issues/3360 -.. _GH3363: https://github.com/pydata/pandas/issues/3363 -.. _GH2758: https://github.com/pydata/pandas/issues/2758 -.. _GH3275: https://github.com/pydata/pandas/issues/3275 -.. _GH2121: https://github.com/pydata/pandas/issues/2121 -.. _GH3247: https://github.com/pydata/pandas/issues/3247 -.. _GH2809: https://github.com/pydata/pandas/issues/2809 -.. _GH2810: https://github.com/pydata/pandas/issues/2810 -.. _GH2837: https://github.com/pydata/pandas/issues/2837 -.. _GH2898: https://github.com/pydata/pandas/issues/2898 -.. _GH3233: https://github.com/pydata/pandas/issues/3233 -.. _GH3035: https://github.com/pydata/pandas/issues/3035 -.. _GH3020: https://github.com/pydata/pandas/issues/3020 -.. _GH2978: https://github.com/pydata/pandas/issues/2978 -.. _GH2877: https://github.com/pydata/pandas/issues/2877 -.. _GH2739: https://github.com/pydata/pandas/issues/2739 -.. _GH2710: https://github.com/pydata/pandas/issues/2710 -.. _GH2806: https://github.com/pydata/pandas/issues/2806 -.. _GH2807: https://github.com/pydata/pandas/issues/2807 -.. _GH3278: https://github.com/pydata/pandas/issues/3278 -.. _GH2891: https://github.com/pydata/pandas/issues/2891 -.. _GH2918: https://github.com/pydata/pandas/issues/2918 -.. _GH3011: https://github.com/pydata/pandas/issues/3011 -.. _GH2745: https://github.com/pydata/pandas/issues/2745 -.. _GH622: https://github.com/pydata/pandas/issues/622 -.. _GH797: https://github.com/pydata/pandas/issues/797 -.. _GH1670: https://github.com/pydata/pandas/issues/1670 -.. _GH2681: https://github.com/pydata/pandas/issues/2681 -.. _GH2719: https://github.com/pydata/pandas/issues/2719 -.. _GH2746: https://github.com/pydata/pandas/issues/2746 -.. _GH2747: https://github.com/pydata/pandas/issues/2747 -.. _GH2751: https://github.com/pydata/pandas/issues/2751 -.. _GH2763: https://github.com/pydata/pandas/issues/2763 -.. _GH2776: https://github.com/pydata/pandas/issues/2776 -.. _GH2778: https://github.com/pydata/pandas/issues/2778 -.. _GH2781: https://github.com/pydata/pandas/issues/2781 -.. _GH2786: https://github.com/pydata/pandas/issues/2786 -.. _GH2787: https://github.com/pydata/pandas/issues/2787 -.. _GH3282: https://github.com/pydata/pandas/issues/3282 -.. _GH2437: https://github.com/pydata/pandas/issues/2437 -.. _GH2753: https://github.com/pydata/pandas/issues/2753 -.. _GH2793: https://github.com/pydata/pandas/issues/2793 -.. _GH2795: https://github.com/pydata/pandas/issues/2795 -.. _GH2797: https://github.com/pydata/pandas/issues/2797 -.. _GH2819: https://github.com/pydata/pandas/issues/2819 -.. _GH2845: https://github.com/pydata/pandas/issues/2845 -.. _GH2867: https://github.com/pydata/pandas/issues/2867 -.. _GH2803: https://github.com/pydata/pandas/issues/2803 -.. _GH2807: https://github.com/pydata/pandas/issues/2807 -.. _GH2808: https://github.com/pydata/pandas/issues/2808 -.. _GH2849: https://github.com/pydata/pandas/issues/2849 -.. _GH2850: https://github.com/pydata/pandas/issues/2850 -.. _GH2898: https://github.com/pydata/pandas/issues/2898 -.. _GH2892: https://github.com/pydata/pandas/issues/2892 -.. _GH2893: https://github.com/pydata/pandas/issues/2893 -.. _GH2902: https://github.com/pydata/pandas/issues/2902 -.. _GH2903: https://github.com/pydata/pandas/issues/2903 -.. _GH2909: https://github.com/pydata/pandas/issues/2909 -.. _GH2922: https://github.com/pydata/pandas/issues/2922 -.. _GH2926: https://github.com/pydata/pandas/issues/2926 -.. _GH2929: https://github.com/pydata/pandas/issues/2929 -.. _GH2931: https://github.com/pydata/pandas/issues/2931 -.. _GH2932: https://github.com/pydata/pandas/issues/2932 -.. _GH2973: https://github.com/pydata/pandas/issues/2973 -.. _GH2967: https://github.com/pydata/pandas/issues/2967 -.. _GH2981: https://github.com/pydata/pandas/issues/2981 -.. _GH2982: https://github.com/pydata/pandas/issues/2982 -.. _GH2989: https://github.com/pydata/pandas/issues/2989 -.. _GH2993: https://github.com/pydata/pandas/issues/2993 -.. _GH3002: https://github.com/pydata/pandas/issues/3002 -.. _GH3155: https://github.com/pydata/pandas/issues/3155 -.. _GH3010: https://github.com/pydata/pandas/issues/3010 -.. _GH1738: https://github.com/pydata/pandas/issues/1738 -.. _GH3012: https://github.com/pydata/pandas/issues/3012 -.. _GH3029: https://github.com/pydata/pandas/issues/3029 -.. _GH3037: https://github.com/pydata/pandas/issues/3037 -.. _GH3041: https://github.com/pydata/pandas/issues/3041 -.. _GH3042: https://github.com/pydata/pandas/issues/3042 -.. _GH3053: https://github.com/pydata/pandas/issues/3053 -.. _GH3070: https://github.com/pydata/pandas/issues/3070 -.. _GH3076: https://github.com/pydata/pandas/issues/3076 -.. _GH3063: https://github.com/pydata/pandas/issues/3063 -.. _GH3059: https://github.com/pydata/pandas/issues/3059 -.. _GH2993: https://github.com/pydata/pandas/issues/2993 -.. _GH3115: https://github.com/pydata/pandas/issues/3115 -.. _GH3070: https://github.com/pydata/pandas/issues/3070 -.. _GH3075: https://github.com/pydata/pandas/issues/3075 -.. _GH3094: https://github.com/pydata/pandas/issues/3094 -.. _GH3130: https://github.com/pydata/pandas/issues/3130 -.. _GH3168: https://github.com/pydata/pandas/issues/3168 -.. _GH3178: https://github.com/pydata/pandas/issues/3178 -.. _GH3179: https://github.com/pydata/pandas/issues/3179 -.. _GH3189: https://github.com/pydata/pandas/issues/3189 -.. _GH2751: https://github.com/pydata/pandas/issues/2751 -.. _GH2747: https://github.com/pydata/pandas/issues/2747 -.. _GH2816: https://github.com/pydata/pandas/issues/2816 -.. _GH3216: https://github.com/pydata/pandas/issues/3216 -.. _GH3222: https://github.com/pydata/pandas/issues/3222 -.. _GH2641: https://github.com/pydata/pandas/issues/2641 -.. _GH3223: https://github.com/pydata/pandas/issues/3223 -.. _GH3238: https://github.com/pydata/pandas/issues/3238 -.. _GH3258: https://github.com/pydata/pandas/issues/3258 -.. _GH3283: https://github.com/pydata/pandas/issues/3283 -.. _GH2919: https://github.com/pydata/pandas/issues/2919 -.. _GH3308: https://github.com/pydata/pandas/issues/3308 -.. _GH3311: https://github.com/pydata/pandas/issues/3311 -.. _GH3380: https://github.com/pydata/pandas/issues/3380 -.. _GH3406: https://github.com/pydata/pandas/issues/3406 - -pandas 0.10.1 -============= - -**Release date:** 2013-01-22 - -**New features** - - - Add data inferface to World Bank WDI pandas.io.wb (GH2592_) - -**API Changes** - - - Restored inplace=True behavior returning self (same object) with - deprecation warning until 0.11 (GH1893_) - - ``HDFStore`` - - - refactored HFDStore to deal with non-table stores as objects, will allow future enhancements - - removed keyword ``compression`` from ``put`` (replaced by keyword - ``complib`` to be consistent across library) - - warn `PerformanceWarning` if you are attempting to store types that will be pickled by PyTables - -**Improvements to existing features** - - - ``HDFStore`` - - - enables storing of multi-index dataframes (closes GH1277_) - - support data column indexing and selection, via ``data_columns`` keyword - in append - - support write chunking to reduce memory footprint, via ``chunksize`` - keyword to append - - support automagic indexing via ``index`` keyword to append - - support ``expectedrows`` keyword in append to inform ``PyTables`` about - the expected tablesize - - support ``start`` and ``stop`` keywords in select to limit the row - selection space - - added ``get_store`` context manager to automatically import with pandas - - added column filtering via ``columns`` keyword in select - - added methods append_to_multiple/select_as_multiple/select_as_coordinates - to do multiple-table append/selection - - added support for datetime64 in columns - - added method ``unique`` to select the unique values in an indexable or - data column - - added method ``copy`` to copy an existing store (and possibly upgrade) - - show the shape of the data on disk for non-table stores when printing the - store - - added ability to read PyTables flavor tables (allows compatiblity to - other HDF5 systems) - - Add ``logx`` option to DataFrame/Series.plot (GH2327_, GH2565_) - - Support reading gzipped data from file-like object - - ``pivot_table`` aggfunc can be anything used in GroupBy.aggregate (GH2643_) - - Implement DataFrame merges in case where set cardinalities might overflow - 64-bit integer (GH2690_) - - Raise exception in C file parser if integer dtype specified and have NA - values. (GH2631_) - - Attempt to parse ISO8601 format dates when parse_dates=True in read_csv for - major performance boost in such cases (GH2698_) - - Add methods ``neg`` and ``inv`` to Series - - Implement ``kind`` option in ``ExcelFile`` to indicate whether it's an XLS - or XLSX file (GH2613_) - -**Bug fixes** - - - Fix read_csv/read_table multithreading issues (GH2608_) - - ``HDFStore`` - - - correctly handle ``nan`` elements in string columns; serialize via the - ``nan_rep`` keyword to append - - raise correctly on non-implemented column types (unicode/date) - - handle correctly ``Term`` passed types (e.g. ``index<1000``, when index - is ``Int64``), (closes GH512_) - - handle Timestamp correctly in data_columns (closes GH2637_) - - contains correctly matches on non-natural names - - correctly store ``float32`` dtypes in tables (if not other float types in - the same table) - - Fix DataFrame.info bug with UTF8-encoded columns. (GH2576_) - - Fix DatetimeIndex handling of FixedOffset tz (GH2604_) - - More robust detection of being in IPython session for wide DataFrame - console formatting (GH2585_) - - Fix platform issues with ``file:///`` in unit test (GH2564_) - - Fix bug and possible segfault when grouping by hierarchical level that - contains NA values (GH2616_) - - Ensure that MultiIndex tuples can be constructed with NAs (GH2616_) - - Fix int64 overflow issue when unstacking MultiIndex with many levels - (GH2616_) - - Exclude non-numeric data from DataFrame.quantile by default (GH2625_) - - Fix a Cython C int64 boxing issue causing read_csv to return incorrect - results (GH2599_) - - Fix groupby summing performance issue on boolean data (GH2692_) - - Don't bork Series containing datetime64 values with to_datetime (GH2699_) - - Fix DataFrame.from_records corner case when passed columns, index column, - but empty record list (GH2633_) - - Fix C parser-tokenizer bug with trailing fields. (GH2668_) - - Don't exclude non-numeric data from GroupBy.max/min (GH2700_) - - Don't lose time zone when calling DatetimeIndex.drop (GH2621_) - - Fix setitem on a Series with a boolean key and a non-scalar as value - (GH2686_) - - Box datetime64 values in Series.apply/map (GH2627_, GH2689_) - - Upconvert datetime + datetime64 values when concatenating frames (GH2624_) - - Raise a more helpful error message in merge operations when one DataFrame - has duplicate columns (GH2649_) - - Fix partial date parsing issue occuring only when code is run at EOM - (GH2618_) - - Prevent MemoryError when using counting sort in sortlevel with - high-cardinality MultiIndex objects (GH2684_) - - Fix Period resampling bug when all values fall into a single bin (GH2070_) - - Fix buggy interaction with usecols argument in read_csv when there is an - implicit first index column (GH2654_) - -.. _GH512: https://github.com/pydata/pandas/issues/512 -.. _GH1277: https://github.com/pydata/pandas/issues/1277 -.. _GH2070: https://github.com/pydata/pandas/issues/2070 -.. _GH2327: https://github.com/pydata/pandas/issues/2327 -.. _GH2565: https://github.com/pydata/pandas/issues/2565 -.. _GH2585: https://github.com/pydata/pandas/issues/2585 -.. _GH2599: https://github.com/pydata/pandas/issues/2599 -.. _GH2604: https://github.com/pydata/pandas/issues/2604 -.. _GH2576: https://github.com/pydata/pandas/issues/2576 -.. _GH2608: https://github.com/pydata/pandas/issues/2608 -.. _GH2613: https://github.com/pydata/pandas/issues/2613 -.. _GH2616: https://github.com/pydata/pandas/issues/2616 -.. _GH2621: https://github.com/pydata/pandas/issues/2621 -.. _GH2624: https://github.com/pydata/pandas/issues/2624 -.. _GH2625: https://github.com/pydata/pandas/issues/2625 -.. _GH2627: https://github.com/pydata/pandas/issues/2627 -.. _GH2631: https://github.com/pydata/pandas/issues/2631 -.. _GH2633: https://github.com/pydata/pandas/issues/2633 -.. _GH2637: https://github.com/pydata/pandas/issues/2637 -.. _GH2643: https://github.com/pydata/pandas/issues/2643 -.. _GH2649: https://github.com/pydata/pandas/issues/2649 -.. _GH2654: https://github.com/pydata/pandas/issues/2654 -.. _GH2668: https://github.com/pydata/pandas/issues/2668 -.. _GH2684: https://github.com/pydata/pandas/issues/2684 -.. _GH2689: https://github.com/pydata/pandas/issues/2689 -.. _GH2690: https://github.com/pydata/pandas/issues/2690 -.. _GH2692: https://github.com/pydata/pandas/issues/2692 -.. _GH2698: https://github.com/pydata/pandas/issues/2698 -.. _GH2699: https://github.com/pydata/pandas/issues/2699 -.. _GH2700: https://github.com/pydata/pandas/issues/2700 -.. _GH2686: https://github.com/pydata/pandas/issues/2686 -.. _GH2618: https://github.com/pydata/pandas/issues/2618 -.. _GH2592: https://github.com/pydata/pandas/issues/2592 -.. _GH2564: https://github.com/pydata/pandas/issues/2564 -.. _GH2616: https://github.com/pydata/pandas/issues/2616 - -pandas 0.10.0 -============= - -**Release date:** 2012-12-17 - -**New features** - - - Brand new high-performance delimited file parsing engine written in C and - Cython. 50% or better performance in many standard use cases with a - fraction as much memory usage. (GH407_, GH821_) - - Many new file parser (read_csv, read_table) features: - - - Support for on-the-fly gzip or bz2 decompression (`compression` option) - - Ability to get back numpy.recarray instead of DataFrame - (`as_recarray=True`) - - `dtype` option: explicit column dtypes - - `usecols` option: specify list of columns to be read from a file. Good - for reading very wide files with many irrelevant columns (GH1216_ GH926_, GH2465_) - - Enhanced unicode decoding support via `encoding` option - - `skipinitialspace` dialect option - - Can specify strings to be recognized as True (`true_values`) or False - (`false_values`) - - High-performance `delim_whitespace` option for whitespace-delimited - files; a preferred alternative to the '\s+' regular expression delimiter - - Option to skip "bad" lines (wrong number of fields) that would otherwise - have caused an error in the past (`error_bad_lines` and `warn_bad_lines` - options) - - Substantially improved performance in the parsing of integers with - thousands markers and lines with comments - - Easy of European (and other) decimal formats (`decimal` option) (GH584_, GH2466_) - - Custom line terminators (e.g. lineterminator='~') (GH2457_) - - Handling of no trailing commas in CSV files (GH2333_) - - Ability to handle fractional seconds in date_converters (GH2209_) - - read_csv allow scalar arg to na_values (GH1944_) - - Explicit column dtype specification in read_* functions (GH1858_) - - Easier CSV dialect specification (GH1743_) - - Improve parser performance when handling special characters (GH1204_) - - - Google Analytics API integration with easy oauth2 workflow (GH2283_) - - Add error handling to Series.str.encode/decode (GH2276_) - - Add ``where`` and ``mask`` to Series (GH2337_) - - Grouped histogram via `by` keyword in Series/DataFrame.hist (GH2186_) - - Support optional ``min_periods`` keyword in ``corr`` and ``cov`` - for both Series and DataFrame (GH2002_) - - Add ``duplicated`` and ``drop_duplicates`` functions to Series (GH1923_) - - Add docs for ``HDFStore table`` format - - 'density' property in `SparseSeries` (GH2384_) - - Add ``ffill`` and ``bfill`` convenience functions for forward- and - backfilling time series data (GH2284_) - - New option configuration system and functions `set_option`, `get_option`, - `describe_option`, and `reset_option`. Deprecate `set_printoptions` and - `reset_printoptions` (GH2393_). - You can also access options as attributes via ``pandas.options.X`` - - Wide DataFrames can be viewed more easily in the console with new - `expand_frame_repr` and `line_width` configuration options. This is on by - default now (GH2436_) - - Scikits.timeseries-like moving window functions via ``rolling_window`` (GH1270_) - -**Experimental Features** - - - Add support for Panel4D, a named 4 Dimensional stucture - - Add support for ndpanel factory functions, to create custom, - domain-specific N-Dimensional containers - -**API Changes** - - - The default binning/labeling behavior for ``resample`` has been changed to - `closed='left', label='left'` for daily and lower frequencies. This had - been a large source of confusion for users. See "what's new" page for more - on this. (GH2410_) - - Methods with ``inplace`` option now return None instead of the calling - (modified) object (GH1893_) - - The special case DataFrame - TimeSeries doing column-by-column broadcasting - has been deprecated. Users should explicitly do e.g. df.sub(ts, axis=0) - instead. This is a legacy hack and can lead to subtle bugs. - - inf/-inf are no longer considered as NA by isnull/notnull. To be clear, this - is legacy cruft from early pandas. This behavior can be globally re-enabled - using the new option ``mode.use_inf_as_null`` (GH2050_, GH1919_) - - ``pandas.merge`` will now default to ``sort=False``. For many use cases - sorting the join keys is not necessary, and doing it by default is wasteful - - Specify ``header=0`` explicitly to replace existing column names in file in - read_* functions. - - Default column names for header-less parsed files (yielded by read_csv, - etc.) are now the integers 0, 1, .... A new argument `prefix` has been - added; to get the v0.9.x behavior specify ``prefix='X'`` (GH2034_). This API - change was made to make the default column names more consistent with the - DataFrame constructor's default column names when none are specified. - - DataFrame selection using a boolean frame now preserves input shape - - If function passed to Series.apply yields a Series, result will be a - DataFrame (GH2316_) - - Values like YES/NO/yes/no will not be considered as boolean by default any - longer in the file parsers. This can be customized using the new - ``true_values`` and ``false_values`` options (GH2360_) - - `obj.fillna()` is no longer valid; make `method='pad'` no longer the - default option, to be more explicit about what kind of filling to - perform. Add `ffill/bfill` convenience functions per above (GH2284_) - - `HDFStore.keys()` now returns an absolute path-name for each key - - `to_string()` now always returns a unicode string. (GH2224_) - - File parsers will not handle NA sentinel values arising from passed - converter functions - -**Improvements to existing features** - - - Add ``nrows`` option to DataFrame.from_records for iterators (GH1794_) - - Unstack/reshape algorithm rewrite to avoid high memory use in cases where - the number of observed key-tuples is much smaller than the total possible - number that could occur (GH2278_). Also improves performance in most cases. - - Support duplicate columns in DataFrame.from_records (GH2179_) - - Add ``normalize`` option to Series/DataFrame.asfreq (GH2137_) - - SparseSeries and SparseDataFrame construction from empty and scalar - values now no longer create dense ndarrays unnecessarily (GH2322_) - - ``HDFStore`` now supports hierarchial keys (GH2397_) - - Support multiple query selection formats for ``HDFStore tables`` (GH1996_) - - Support ``del store['df']`` syntax to delete HDFStores - - Add multi-dtype support for ``HDFStore tables`` - - ``min_itemsize`` parameter can be specified in ``HDFStore table`` creation - - Indexing support in ``HDFStore tables`` (GH698_) - - Add `line_terminator` option to DataFrame.to_csv (GH2383_) - - added implementation of str(x)/unicode(x)/bytes(x) to major pandas data - structures, which should do the right thing on both py2.x and py3.x. (GH2224_) - - Reduce groupby.apply overhead substantially by low-level manipulation of - internal NumPy arrays in DataFrames (GH535_) - - Implement ``value_vars`` in ``melt`` and add ``melt`` to pandas namespace - (GH2412_) - - Added boolean comparison operators to Panel - - Enable ``Series.str.strip/lstrip/rstrip`` methods to take an argument (GH2411_) - - The DataFrame ctor now respects column ordering when given - an OrderedDict (GH2455_) - - Assigning DatetimeIndex to Series changes the class to TimeSeries (GH2139_) - - Improve performance of .value_counts method on non-integer data (GH2480_) - - ``get_level_values`` method for MultiIndex return Index instead of ndarray (GH2449_) - - ``convert_to_r_dataframe`` conversion for datetime values (GH2351_) - - Allow ``DataFrame.to_csv`` to represent inf and nan differently (GH2026_) - - Add ``min_i`` argument to ``nancorr`` to specify minimum required observations (GH2002_) - - Add ``inplace`` option to ``sortlevel`` / ``sort`` functions on DataFrame (GH1873_) - - Enable DataFrame to accept scalar constructor values like Series (GH1856_) - - DataFrame.from_records now takes optional ``size`` parameter (GH1794_) - - include iris dataset (GH1709_) - - No datetime64 DataFrame column conversion of datetime.datetime with tzinfo (GH1581_) - - Micro-optimizations in DataFrame for tracking state of internal consolidation (GH217_) - - Format parameter in DataFrame.to_csv (GH1525_) - - Partial string slicing for ``DatetimeIndex`` for daily and higher frequencies (GH2306_) - - Implement ``col_space`` parameter in ``to_html`` and ``to_string`` in DataFrame (GH1000_) - - Override ``Series.tolist`` and box datetime64 types (GH2447_) - - Optimize ``unstack`` memory usage by compressing indices (GH2278_) - - Fix HTML repr in IPython qtconsole if opening window is small (GH2275_) - - Escape more special characters in console output (GH2492_) - - df.select now invokes bool on the result of crit(x) (GH2487_) - -**Bug fixes** - - - Fix major performance regression in DataFrame.iteritems (GH2273_) - - Fixes bug when negative period passed to Series/DataFrame.diff (GH2266_) - - Escape tabs in console output to avoid alignment issues (GH2038_) - - Properly box datetime64 values when retrieving cross-section from - mixed-dtype DataFrame (GH2272_) - - Fix concatenation bug leading to GH2057_, GH2257_ - - Fix regression in Index console formatting (GH2319_) - - Box Period data when assigning PeriodIndex to frame column (GH2243_, GH2281_) - - Raise exception on calling reset_index on Series with inplace=True (GH2277_) - - Enable setting multiple columns in DataFrame with hierarchical columns - (GH2295_) - - Respect dtype=object in DataFrame constructor (GH2291_) - - Fix DatetimeIndex.join bug with tz-aware indexes and how='outer' (GH2317_) - - pop(...) and del works with DataFrame with duplicate columns (GH2349_) - - Treat empty strings as NA in date parsing (rather than let dateutil do - something weird) (GH2263_) - - Prevent uint64 -> int64 overflows (GH2355_) - - Enable joins between MultiIndex and regular Index (GH2024_) - - Fix time zone metadata issue when unioning non-overlapping DatetimeIndex - objects (GH2367_) - - Raise/handle int64 overflows in parsers (GH2247_) - - Deleting of consecutive rows in ``HDFStore tables``` is much faster than before - - Appending on a HDFStore would fail if the table was not first created via ``put`` - - Use `col_space` argument as minimum column width in DataFrame.to_html (GH2328_) - - Fix tz-aware DatetimeIndex.to_period (GH2232_) - - Fix DataFrame row indexing case with MultiIndex (GH2314_) - - Fix to_excel exporting issues with Timestamp objects in index (GH2294_) - - Fixes assigning scalars and array to hierarchical column chunk (GH1803_) - - Fixed a UnicdeDecodeError with series tidy_repr (GH2225_) - - Fixed issued with duplicate keys in an index (GH2347_, GH2380_) - - Fixed issues re: Hash randomization, default on starting w/ py3.3 (GH2331_) - - Fixed issue with missing attributes after loading a pickled dataframe (GH2431_) - - Fix Timestamp formatting with tzoffset time zone in dateutil 2.1 (GH2443_) - - Fix GroupBy.apply issue when using BinGrouper to do ts binning (GH2300_) - - Fix issues resulting from datetime.datetime columns being converted to - datetime64 when calling DataFrame.apply. (GH2374_) - - Raise exception when calling to_panel on non uniquely-indexed frame (GH2441_) - - Improved detection of console encoding on IPython zmq frontends (GH2458_) - - Preserve time zone when .append-ing two time series (GH2260_) - - Box timestamps when calling reset_index on time-zone-aware index rather - than creating a tz-less datetime64 column (GH2262_) - - Enable searching non-string columns in DataFrame.filter(like=...) (GH2467_) - - Fixed issue with losing nanosecond precision upon conversion to DatetimeIndex(GH2252_) - - Handle timezones in Datetime.normalize (GH2338_) - - Fix test case where dtype specification with endianness causes - failures on big endian machines (GH2318_) - - Fix plotting bug where upsampling causes data to appear shifted in time (GH2448_) - - Fix ``read_csv`` failure for UTF-16 with BOM and skiprows(GH2298_) - - read_csv with names arg not implicitly setting header=None(GH2459_) - - Unrecognized compression mode causes segfault in read_csv(GH2474_) - - In read_csv, header=0 and passed names should discard first row(GH2269_) - - Correctly route to stdout/stderr in read_table (GH2071_) - - Fix exception when Timestamp.to_datetime is called on a Timestamp with tzoffset (GH2471_) - - Fixed unintentional conversion of datetime64 to long in groupby.first() (GH2133_) - - Union of empty DataFrames now return empty with concatenated index (GH2307_) - - DataFrame.sort_index raises more helpful exception if sorting by column - with duplicates (GH2488_) - - DataFrame.to_string formatters can be list, too (GH2520_) - - DataFrame.combine_first will always result in the union of the index and - columns, even if one DataFrame is length-zero (GH2525_) - - Fix several DataFrame.icol/irow with duplicate indices issues (GH2228_, GH2259_) - - Use Series names for column names when using concat with axis=1 (GH2489_) - - Raise Exception if start, end, periods all passed to date_range (GH2538_) - - Fix Panel resampling issue (GH2537_) - -.. _GH407: https://github.com/pydata/pandas/issues/407 -.. _GH821: https://github.com/pydata/pandas/issues/821 -.. _GH1216: https://github.com/pydata/pandas/issues/1216 -.. _GH926: https://github.com/pydata/pandas/issues/926 -.. _GH2465: https://github.com/pydata/pandas/issues/2465 -.. _GH584: https://github.com/pydata/pandas/issues/584 -.. _GH2466: https://github.com/pydata/pandas/issues/2466 -.. _GH2457: https://github.com/pydata/pandas/issues/2457 -.. _GH2333: https://github.com/pydata/pandas/issues/2333 -.. _GH2209: https://github.com/pydata/pandas/issues/2209 -.. _GH1944: https://github.com/pydata/pandas/issues/1944 -.. _GH1858: https://github.com/pydata/pandas/issues/1858 -.. _GH1743: https://github.com/pydata/pandas/issues/1743 -.. _GH1204: https://github.com/pydata/pandas/issues/1204 -.. _GH2283: https://github.com/pydata/pandas/issues/2283 -.. _GH2276: https://github.com/pydata/pandas/issues/2276 -.. _GH2337: https://github.com/pydata/pandas/issues/2337 -.. _GH2186: https://github.com/pydata/pandas/issues/2186 -.. _GH2002: https://github.com/pydata/pandas/issues/2002 -.. _GH1923: https://github.com/pydata/pandas/issues/1923 -.. _GH2384: https://github.com/pydata/pandas/issues/2384 -.. _GH2284: https://github.com/pydata/pandas/issues/2284 -.. _GH2393: https://github.com/pydata/pandas/issues/2393 -.. _GH2436: https://github.com/pydata/pandas/issues/2436 -.. _GH1270: https://github.com/pydata/pandas/issues/1270 -.. _GH2410: https://github.com/pydata/pandas/issues/2410 -.. _GH1893: https://github.com/pydata/pandas/issues/1893 -.. _GH2050: https://github.com/pydata/pandas/issues/2050 -.. _GH1919: https://github.com/pydata/pandas/issues/1919 -.. _GH2034: https://github.com/pydata/pandas/issues/2034 -.. _GH2316: https://github.com/pydata/pandas/issues/2316 -.. _GH2360: https://github.com/pydata/pandas/issues/2360 -.. _GH2224: https://github.com/pydata/pandas/issues/2224 -.. _GH1794: https://github.com/pydata/pandas/issues/1794 -.. _GH2278: https://github.com/pydata/pandas/issues/2278 -.. _GH2179: https://github.com/pydata/pandas/issues/2179 -.. _GH2137: https://github.com/pydata/pandas/issues/2137 -.. _GH2322: https://github.com/pydata/pandas/issues/2322 -.. _GH2397: https://github.com/pydata/pandas/issues/2397 -.. _GH1996: https://github.com/pydata/pandas/issues/1996 -.. _GH698: https://github.com/pydata/pandas/issues/698 -.. _GH2383: https://github.com/pydata/pandas/issues/2383 -.. _GH535: https://github.com/pydata/pandas/issues/535 -.. _GH2412: https://github.com/pydata/pandas/issues/2412 -.. _GH2411: https://github.com/pydata/pandas/issues/2411 -.. _GH2455: https://github.com/pydata/pandas/issues/2455 -.. _GH2139: https://github.com/pydata/pandas/issues/2139 -.. _GH2480: https://github.com/pydata/pandas/issues/2480 -.. _GH2449: https://github.com/pydata/pandas/issues/2449 -.. _GH2351: https://github.com/pydata/pandas/issues/2351 -.. _GH2026: https://github.com/pydata/pandas/issues/2026 -.. _GH1873: https://github.com/pydata/pandas/issues/1873 -.. _GH1856: https://github.com/pydata/pandas/issues/1856 -.. _GH1709: https://github.com/pydata/pandas/issues/1709 -.. _GH1581: https://github.com/pydata/pandas/issues/1581 -.. _GH217: https://github.com/pydata/pandas/issues/217 -.. _GH1525: https://github.com/pydata/pandas/issues/1525 -.. _GH2306: https://github.com/pydata/pandas/issues/2306 -.. _GH1000: https://github.com/pydata/pandas/issues/1000 -.. _GH2447: https://github.com/pydata/pandas/issues/2447 -.. _GH2275: https://github.com/pydata/pandas/issues/2275 -.. _GH2492: https://github.com/pydata/pandas/issues/2492 -.. _GH2487: https://github.com/pydata/pandas/issues/2487 -.. _GH2273: https://github.com/pydata/pandas/issues/2273 -.. _GH2266: https://github.com/pydata/pandas/issues/2266 -.. _GH2038: https://github.com/pydata/pandas/issues/2038 -.. _GH2272: https://github.com/pydata/pandas/issues/2272 -.. _GH2057: https://github.com/pydata/pandas/issues/2057 -.. _GH2257: https://github.com/pydata/pandas/issues/2257 -.. _GH2319: https://github.com/pydata/pandas/issues/2319 -.. _GH2243: https://github.com/pydata/pandas/issues/2243 -.. _GH2281: https://github.com/pydata/pandas/issues/2281 -.. _GH2277: https://github.com/pydata/pandas/issues/2277 -.. _GH2295: https://github.com/pydata/pandas/issues/2295 -.. _GH2291: https://github.com/pydata/pandas/issues/2291 -.. _GH2317: https://github.com/pydata/pandas/issues/2317 -.. _GH2349: https://github.com/pydata/pandas/issues/2349 -.. _GH2263: https://github.com/pydata/pandas/issues/2263 -.. _GH2355: https://github.com/pydata/pandas/issues/2355 -.. _GH2024: https://github.com/pydata/pandas/issues/2024 -.. _GH2367: https://github.com/pydata/pandas/issues/2367 -.. _GH2247: https://github.com/pydata/pandas/issues/2247 -.. _GH2328: https://github.com/pydata/pandas/issues/2328 -.. _GH2232: https://github.com/pydata/pandas/issues/2232 -.. _GH2314: https://github.com/pydata/pandas/issues/2314 -.. _GH2294: https://github.com/pydata/pandas/issues/2294 -.. _GH1803: https://github.com/pydata/pandas/issues/1803 -.. _GH2225: https://github.com/pydata/pandas/issues/2225 -.. _GH2347: https://github.com/pydata/pandas/issues/2347 -.. _GH2380: https://github.com/pydata/pandas/issues/2380 -.. _GH2331: https://github.com/pydata/pandas/issues/2331 -.. _GH2431: https://github.com/pydata/pandas/issues/2431 -.. _GH2443: https://github.com/pydata/pandas/issues/2443 -.. _GH2300: https://github.com/pydata/pandas/issues/2300 -.. _GH2374: https://github.com/pydata/pandas/issues/2374 -.. _GH2441: https://github.com/pydata/pandas/issues/2441 -.. _GH2458: https://github.com/pydata/pandas/issues/2458 -.. _GH2260: https://github.com/pydata/pandas/issues/2260 -.. _GH2262: https://github.com/pydata/pandas/issues/2262 -.. _GH2467: https://github.com/pydata/pandas/issues/2467 -.. _GH2252: https://github.com/pydata/pandas/issues/2252 -.. _GH2338: https://github.com/pydata/pandas/issues/2338 -.. _GH2318: https://github.com/pydata/pandas/issues/2318 -.. _GH2448: https://github.com/pydata/pandas/issues/2448 -.. _GH2298: https://github.com/pydata/pandas/issues/2298 -.. _GH2459: https://github.com/pydata/pandas/issues/2459 -.. _GH2474: https://github.com/pydata/pandas/issues/2474 -.. _GH2269: https://github.com/pydata/pandas/issues/2269 -.. _GH2071: https://github.com/pydata/pandas/issues/2071 -.. _GH2471: https://github.com/pydata/pandas/issues/2471 -.. _GH2133: https://github.com/pydata/pandas/issues/2133 -.. _GH2307: https://github.com/pydata/pandas/issues/2307 -.. _GH2488: https://github.com/pydata/pandas/issues/2488 -.. _GH2520: https://github.com/pydata/pandas/issues/2520 -.. _GH2525: https://github.com/pydata/pandas/issues/2525 -.. _GH2228: https://github.com/pydata/pandas/issues/2228 -.. _GH2259: https://github.com/pydata/pandas/issues/2259 -.. _GH2489: https://github.com/pydata/pandas/issues/2489 -.. _GH2538: https://github.com/pydata/pandas/issues/2538 -.. _GH2537: https://github.com/pydata/pandas/issues/2537 - - -pandas 0.9.1 -============ - -**Release date:** 2012-11-14 - -**New features** - - - Can specify multiple sort orders in DataFrame/Series.sort/sort_index (GH928_) - - New `top` and `bottom` options for handling NAs in rank (GH1508_, GH2159_) - - Add `where` and `mask` functions to DataFrame (GH2109_, GH2151_) - - Add `at_time` and `between_time` functions to DataFrame (GH2149_) - - Add flexible `pow` and `rpow` methods to DataFrame (GH2190_) - -**API Changes** - - - Upsampling period index "spans" intervals. Example: annual periods - upsampled to monthly will span all months in each year - - Period.end_time will yield timestamp at last nanosecond in the interval - (GH2124_, GH2125_, GH1764_) - - File parsers no longer coerce to float or bool for columns that have custom - converters specified (GH2184_) - -**Improvements to existing features** - - - Time rule inference for week-of-month (e.g. WOM-2FRI) rules (GH2140_) - - Improve performance of datetime + business day offset with large number of - offset periods - - Improve HTML display of DataFrame objects with hierarchical columns - - Enable referencing of Excel columns by their column names (GH1936_) - - DataFrame.dot can accept ndarrays (GH2042_) - - Support negative periods in Panel.shift (GH2164_) - - Make .drop(...) work with non-unique indexes (GH2101_) - - Improve performance of Series/DataFrame.diff (re: GH2087_) - - Support unary ~ (__invert__) in DataFrame (GH2110_) - - Turn off pandas-style tick locators and formatters (GH2205_) - - DataFrame[DataFrame] uses DataFrame.where to compute masked frame (GH2230_) - -**Bug fixes** - - - Fix some duplicate-column DataFrame constructor issues (GH2079_) - - Fix bar plot color cycle issues (GH2082_) - - Fix off-center grid for stacked bar plots (GH2157_) - - Fix plotting bug if inferred frequency is offset with N > 1 (GH2126_) - - Implement comparisons on date offsets with fixed delta (GH2078_) - - Handle inf/-inf correctly in read_* parser functions (GH2041_) - - Fix matplotlib unicode interaction bug - - Make WLS r-squared match statsmodels 0.5.0 fixed value - - Fix zero-trimming DataFrame formatting bug - - Correctly compute/box datetime64 min/max values from Series.min/max (GH2083_) - - Fix unstacking edge case with unrepresented groups (GH2100_) - - Fix Series.str failures when using pipe pattern '|' (GH2119_) - - Fix pretty-printing of dict entries in Series, DataFrame (GH2144_) - - Cast other datetime64 values to nanoseconds in DataFrame ctor (GH2095_) - - Alias Timestamp.astimezone to tz_convert, so will yield Timestamp (GH2060_) - - Fix timedelta64 formatting from Series (GH2165_, GH2146_) - - Handle None values gracefully in dict passed to Panel constructor (GH2075_) - - Box datetime64 values as Timestamp objects in Series/DataFrame.iget (GH2148_) - - Fix Timestamp indexing bug in DatetimeIndex.insert (GH2155_) - - Use index name(s) (if any) in DataFrame.to_records (GH2161_) - - Don't lose index names in Panel.to_frame/DataFrame.to_panel (GH2163_) - - Work around length-0 boolean indexing NumPy bug (GH2096_) - - Fix partial integer indexing bug in DataFrame.xs (GH2107_) - - Fix variety of cut/qcut string-bin formatting bugs (GH1978_, GH1979_) - - Raise Exception when xs view not possible of MultiIndex'd DataFrame (GH2117_) - - Fix groupby(...).first() issue with datetime64 (GH2133_) - - Better floating point error robustness in some rolling_* functions - (GH2114_, GH2527_) - - Fix ewma NA handling in the middle of Series (GH2128_) - - Fix numerical precision issues in diff with integer data (GH2087_) - - Fix bug in MultiIndex.__getitem__ with NA values (GH2008_) - - Fix DataFrame.from_records dict-arg bug when passing columns (GH2179_) - - Fix Series and DataFrame.diff for integer dtypes (GH2087_, GH2174_) - - Fix bug when taking intersection of DatetimeIndex with empty index (GH2129_) - - Pass through timezone information when calling DataFrame.align (GH2127_) - - Properly sort when joining on datetime64 values (GH2196_) - - Fix indexing bug in which False/True were being coerced to 0/1 (GH2199_) - - Many unicode formatting fixes (GH2201_) - - Fix improper MultiIndex conversion issue when assigning - e.g. DataFrame.index (GH2200_) - - Fix conversion of mixed-type DataFrame to ndarray with dup columns (GH2236_) - - Fix duplicate columns issue (GH2218_, GH2219_) - - Fix SparseSeries.__pow__ issue with NA input (GH2220_) - - Fix icol with integer sequence failure (GH2228_) - - Fixed resampling tz-aware time series issue (GH2245_) - - SparseDataFrame.icol was not returning SparseSeries (GH2227_, GH2229_) - - Enable ExcelWriter to handle PeriodIndex (GH2240_) - - Fix issue constructing DataFrame from empty Series with name (GH2234_) - - Use console-width detection in interactive sessions only (GH1610_) - - Fix parallel_coordinates legend bug with mpl 1.2.0 (GH2237_) - - Make tz_localize work in corner case of empty Series (GH2248_) - -.. _GH928: https://github.com/pydata/pandas/issues/928 -.. _GH1508: https://github.com/pydata/pandas/issues/1508 -.. _GH2159: https://github.com/pydata/pandas/issues/2159 -.. _GH2109: https://github.com/pydata/pandas/issues/2109 -.. _GH2151: https://github.com/pydata/pandas/issues/2151 -.. _GH2149: https://github.com/pydata/pandas/issues/2149 -.. _GH2190: https://github.com/pydata/pandas/issues/2190 -.. _GH2124: https://github.com/pydata/pandas/issues/2124 -.. _GH2125: https://github.com/pydata/pandas/issues/2125 -.. _GH1764: https://github.com/pydata/pandas/issues/1764 -.. _GH2184: https://github.com/pydata/pandas/issues/2184 -.. _GH2140: https://github.com/pydata/pandas/issues/2140 -.. _GH1936: https://github.com/pydata/pandas/issues/1936 -.. _GH2042: https://github.com/pydata/pandas/issues/2042 -.. _GH2164: https://github.com/pydata/pandas/issues/2164 -.. _GH2101: https://github.com/pydata/pandas/issues/2101 -.. _GH2087: https://github.com/pydata/pandas/issues/2087 -.. _GH2110: https://github.com/pydata/pandas/issues/2110 -.. _GH2205: https://github.com/pydata/pandas/issues/2205 -.. _GH2230: https://github.com/pydata/pandas/issues/2230 -.. _GH2079: https://github.com/pydata/pandas/issues/2079 -.. _GH2082: https://github.com/pydata/pandas/issues/2082 -.. _GH2157: https://github.com/pydata/pandas/issues/2157 -.. _GH2126: https://github.com/pydata/pandas/issues/2126 -.. _GH2078: https://github.com/pydata/pandas/issues/2078 -.. _GH2041: https://github.com/pydata/pandas/issues/2041 -.. _GH2083: https://github.com/pydata/pandas/issues/2083 -.. _GH2100: https://github.com/pydata/pandas/issues/2100 -.. _GH2119: https://github.com/pydata/pandas/issues/2119 -.. _GH2144: https://github.com/pydata/pandas/issues/2144 -.. _GH2095: https://github.com/pydata/pandas/issues/2095 -.. _GH2060: https://github.com/pydata/pandas/issues/2060 -.. _GH2165: https://github.com/pydata/pandas/issues/2165 -.. _GH2146: https://github.com/pydata/pandas/issues/2146 -.. _GH2075: https://github.com/pydata/pandas/issues/2075 -.. _GH2148: https://github.com/pydata/pandas/issues/2148 -.. _GH2155: https://github.com/pydata/pandas/issues/2155 -.. _GH2161: https://github.com/pydata/pandas/issues/2161 -.. _GH2163: https://github.com/pydata/pandas/issues/2163 -.. _GH2096: https://github.com/pydata/pandas/issues/2096 -.. _GH2107: https://github.com/pydata/pandas/issues/2107 -.. _GH1978: https://github.com/pydata/pandas/issues/1978 -.. _GH1979: https://github.com/pydata/pandas/issues/1979 -.. _GH2117: https://github.com/pydata/pandas/issues/2117 -.. _GH2133: https://github.com/pydata/pandas/issues/2133 -.. _GH2114: https://github.com/pydata/pandas/issues/2114 -.. _GH2527: https://github.com/pydata/pandas/issues/2527 -.. _GH2128: https://github.com/pydata/pandas/issues/2128 -.. _GH2008: https://github.com/pydata/pandas/issues/2008 -.. _GH2179: https://github.com/pydata/pandas/issues/2179 -.. _GH2174: https://github.com/pydata/pandas/issues/2174 -.. _GH2129: https://github.com/pydata/pandas/issues/2129 -.. _GH2127: https://github.com/pydata/pandas/issues/2127 -.. _GH2196: https://github.com/pydata/pandas/issues/2196 -.. _GH2199: https://github.com/pydata/pandas/issues/2199 -.. _GH2201: https://github.com/pydata/pandas/issues/2201 -.. _GH2200: https://github.com/pydata/pandas/issues/2200 -.. _GH2236: https://github.com/pydata/pandas/issues/2236 -.. _GH2218: https://github.com/pydata/pandas/issues/2218 -.. _GH2219: https://github.com/pydata/pandas/issues/2219 -.. _GH2220: https://github.com/pydata/pandas/issues/2220 -.. _GH2228: https://github.com/pydata/pandas/issues/2228 -.. _GH2245: https://github.com/pydata/pandas/issues/2245 -.. _GH2227: https://github.com/pydata/pandas/issues/2227 -.. _GH2229: https://github.com/pydata/pandas/issues/2229 -.. _GH2240: https://github.com/pydata/pandas/issues/2240 -.. _GH2234: https://github.com/pydata/pandas/issues/2234 -.. _GH1610: https://github.com/pydata/pandas/issues/1610 -.. _GH2237: https://github.com/pydata/pandas/issues/2237 -.. _GH2248: https://github.com/pydata/pandas/issues/2248 - - -pandas 0.9.0 -============ - -**Release date:** 10/7/2012 - -**New features** - - - Add ``str.encode`` and ``str.decode`` to Series (GH1706_) - - Add `to_latex` method to DataFrame (GH1735_) - - Add convenient expanding window equivalents of all rolling_* ops (GH1785_) - - Add Options class to pandas.io.data for fetching options data from Yahoo! - Finance (GH1748_, GH1739_) - - Recognize and convert more boolean values in file parsing (Yes, No, TRUE, - FALSE, variants thereof) (GH1691_, GH1295_) - - Add Panel.update method, analogous to DataFrame.update (GH1999_, GH1988_) - -**Improvements to existing features** - - - Proper handling of NA values in merge operations (GH1990_) - - Add ``flags`` option for ``re.compile`` in some Series.str methods (GH1659_) - - Parsing of UTC date strings in read_* functions (GH1693_) - - Handle generator input to Series (GH1679_) - - Add `na_action='ignore'` to Series.map to quietly propagate NAs (GH1661_) - - Add args/kwds options to Series.apply (GH1829_) - - Add inplace option to Series/DataFrame.reset_index (GH1797_) - - Add ``level`` parameter to ``Series.reset_index`` - - Add quoting option for DataFrame.to_csv (GH1902_) - - Indicate long column value truncation in DataFrame output with ... (GH1854_) - - DataFrame.dot will not do data alignment, and also work with Series (GH1915_) - - Add ``na`` option for missing data handling in some vectorized string - methods (GH1689_) - - If index_label=False in DataFrame.to_csv, do not print fields/commas in the - text output. Results in easier importing into R (GH1583_) - - Can pass tuple/list of axes to DataFrame.dropna to simplify repeated calls - (dropping both columns and rows) (GH924_) - - Improve DataFrame.to_html output for hierarchically-indexed rows (do not - repeat levels) (GH1929_) - - TimeSeries.between_time can now select times across midnight (GH1871_) - - Enable `skip_footer` parameter in `ExcelFile.parse` (GH1843_) - -**API Changes** - - - Change default header names in read_* functions to more Pythonic X0, X1, - etc. instead of X.1, X.2. (GH2000_) - - Deprecated ``day_of_year`` API removed from PeriodIndex, use ``dayofyear`` - (GH1723_) - - Don't modify NumPy suppress printoption at import time - - The internal HDF5 data arrangement for DataFrames has been - transposed. Legacy files will still be readable by HDFStore (GH1834_, GH1824_) - - Legacy cruft removed: pandas.stats.misc.quantileTS - - Use ISO8601 format for Period repr: monthly, daily, and on down (GH1776_) - - Empty DataFrame columns are now created as object dtype. This will prevent - a class of TypeErrors that was occurring in code where the dtype of a - column would depend on the presence of data or not (e.g. a SQL query having - results) (GH1783_) - - Setting parts of DataFrame/Panel using ix now aligns input Series/DataFrame - (GH1630_) - - `first` and `last` methods in `GroupBy` no longer drop non-numeric columns - (GH1809_) - - Resolved inconsistencies in specifying custom NA values in text parser. - `na_values` of type dict no longer override default NAs unless - `keep_default_na` is set to false explicitly (GH1657_) - - Enable `skipfooter` parameter in text parsers as an alias for `skip_footer` - -**Bug fixes** - - - Perform arithmetic column-by-column in mixed-type DataFrame to avoid type - upcasting issues. Caused downstream DataFrame.diff bug (GH1896_) - - Fix matplotlib auto-color assignment when no custom spectrum passed. Also - respect passed color keyword argument (GH1711_) - - Fix resampling logical error with closed='left' (GH1726_) - - Fix critical DatetimeIndex.union bugs (GH1730_, GH1719_, GH1745_, GH1702_, GH1753_) - - Fix critical DatetimeIndex.intersection bug with unanchored offsets (GH1708_) - - Fix MM-YYYY time series indexing case (GH1672_) - - Fix case where Categorical group key was not being passed into index in - GroupBy result (GH1701_) - - Handle Ellipsis in Series.__getitem__/__setitem__ (GH1721_) - - Fix some bugs with handling datetime64 scalars of other units in NumPy 1.6 - and 1.7 (GH1717_) - - Fix performance issue in MultiIndex.format (GH1746_) - - Fixed GroupBy bugs interacting with DatetimeIndex asof / map methods (GH1677_) - - Handle factors with NAs in pandas.rpy (GH1615_) - - Fix statsmodels import in pandas.stats.var (GH1734_) - - Fix DataFrame repr/info summary with non-unique columns (GH1700_) - - Fix Series.iget_value for non-unique indexes (GH1694_) - - Don't lose tzinfo when passing DatetimeIndex as DataFrame column (GH1682_) - - Fix tz conversion with time zones that haven't had any DST transitions since - first date in the array (GH1673_) - - Fix field access with UTC->local conversion on unsorted arrays (GH1756_) - - Fix isnull handling of array-like (list) inputs (GH1755_) - - Fix regression in handling of Series in Series constructor (GH1671_) - - Fix comparison of Int64Index with DatetimeIndex (GH1681_) - - Fix min_periods handling in new rolling_max/min at array start (GH1695_) - - Fix errors with how='median' and generic NumPy resampling in some cases - caused by SeriesBinGrouper (GH1648_, GH1688_) - - When grouping by level, exclude unobserved levels (GH1697_) - - Don't lose tzinfo in DatetimeIndex when shifting by different offset (GH1683_) - - Hack to support storing data with a zero-length axis in HDFStore (GH1707_) - - Fix DatetimeIndex tz-aware range generation issue (GH1674_) - - Fix method='time' interpolation with intraday data (GH1698_) - - Don't plot all-NA DataFrame columns as zeros (GH1696_) - - Fix bug in scatter_plot with by option (GH1716_) - - Fix performance problem in infer_freq with lots of non-unique stamps (GH1686_) - - Fix handling of PeriodIndex as argument to create MultiIndex (GH1705_) - - Fix re: unicode MultiIndex level names in Series/DataFrame repr (GH1736_) - - Handle PeriodIndex in to_datetime instance method (GH1703_) - - Support StaticTzInfo in DatetimeIndex infrastructure (GH1692_) - - Allow MultiIndex setops with length-0 other type indexes (GH1727_) - - Fix handling of DatetimeIndex in DataFrame.to_records (GH1720_) - - Fix handling of general objects in isnull on which bool(...) fails (GH1749_) - - Fix .ix indexing with MultiIndex ambiguity (GH1678_) - - Fix .ix setting logic error with non-unique MultiIndex (GH1750_) - - Basic indexing now works on MultiIndex with > 1000000 elements, regression - from earlier version of pandas (GH1757_) - - Handle non-float64 dtypes in fast DataFrame.corr/cov code paths (GH1761_) - - Fix DatetimeIndex.isin to function properly (GH1763_) - - Fix conversion of array of tz-aware datetime.datetime to DatetimeIndex with - right time zone (GH1777_) - - Fix DST issues with generating ancxhored date ranges (GH1778_) - - Fix issue calling sort on result of Series.unique (GH1807_) - - Fix numerical issue leading to square root of negative number in - rolling_std (GH1840_) - - Let Series.str.split accept no arguments (like str.split) (GH1859_) - - Allow user to have dateutil 2.1 installed on a Python 2 system (GH1851_) - - Catch ImportError less aggressively in pandas/__init__.py (GH1845_) - - Fix pip source installation bug when installing from GitHub (GH1805_) - - Fix error when window size > array size in rolling_apply (GH1850_) - - Fix pip source installation issues via SSH from GitHub - - Fix OLS.summary when column is a tuple (GH1837_) - - Fix bug in __doc__ patching when -OO passed to interpreter - (GH1792_ GH1741_ GH1774_) - - Fix unicode console encoding issue in IPython notebook (GH1782_, GH1768_) - - Fix unicode formatting issue with Series.name (GH1782_) - - Fix bug in DataFrame.duplicated with datetime64 columns (GH1833_) - - Fix bug in Panel internals resulting in error when doing fillna after - truncate not changing size of panel (GH1823_) - - Prevent segfault due to MultiIndex not being supported in HDFStore table - format (GH1848_) - - Fix UnboundLocalError in Panel.__setitem__ and add better error (GH1826_) - - Fix to_csv issues with list of string entries. Isnull works on list of - strings now too (GH1791_) - - Fix Timestamp comparisons with datetime values outside the nanosecond range - (1677-2262) - - Revert to prior behavior of normalize_date with datetime.date objects - (return datetime) - - Fix broken interaction between np.nansum and Series.any/all - - Fix bug with multiple column date parsers (GH1866_) - - DatetimeIndex.union(Int64Index) was broken - - Make plot x vs y interface consistent with integer indexing (GH1842_) - - set_index inplace modified data even if unique check fails (GH1831_) - - Only use Q-OCT/NOV/DEC in quarterly frequency inference (GH1789_) - - Upcast to dtype=object when unstacking boolean DataFrame (GH1820_) - - Fix float64/float32 merging bug (GH1849_) - - Fixes to Period.start_time for non-daily frequencies (GH1857_) - - Fix failure when converter used on index_col in read_csv (GH1835_) - - Implement PeriodIndex.append so that pandas.concat works correctly (GH1815_) - - Avoid Cython out-of-bounds access causing segfault sometimes in pad_2d, - backfill_2d - - Fix resampling error with intraday times and anchored target time (like - AS-DEC) (GH1772_) - - Fix .ix indexing bugs with mixed-integer indexes (GH1799_) - - Respect passed color keyword argument in Series.plot (GH1890_) - - Fix rolling_min/max when the window is larger than the size of the input - array. Check other malformed inputs (GH1899_, GH1897_) - - Rolling variance / standard deviation with only a single observation in - window (GH1884_) - - Fix unicode sheet name failure in to_excel (GH1828_) - - Override DatetimeIndex.min/max to return Timestamp objects (GH1895_) - - Fix column name formatting issue in length-truncated column (GH1906_) - - Fix broken handling of copying Index metadata to new instances created by - view(...) calls inside the NumPy infrastructure - - Support datetime.date again in DateOffset.rollback/rollforward - - Raise Exception if set passed to Series constructor (GH1913_) - - Add TypeError when appending HDFStore table w/ wrong index type (GH1881_) - - Don't raise exception on empty inputs in EW functions (e.g. ewma) (GH1900_) - - Make asof work correctly with PeriodIndex (GH1883_) - - Fix extlinks in doc build - - Fill boolean DataFrame with NaN when calling shift (GH1814_) - - Fix setuptools bug causing pip not to Cythonize .pyx files sometimes - - Fix negative integer indexing regression in .ix from 0.7.x (GH1888_) - - Fix error while retrieving timezone and utc offset from subclasses of - datetime.tzinfo without .zone and ._utcoffset attributes (GH1922_) - - Fix DataFrame formatting of small, non-zero FP numbers (GH1911_) - - Various fixes by upcasting of date -> datetime (GH1395_) - - Raise better exception when passing multiple functions with the same name, - such as lambdas, to GroupBy.aggregate - - Fix DataFrame.apply with axis=1 on a non-unique index (GH1878_) - - Proper handling of Index subclasses in pandas.unique (GH1759_) - - Set index names in DataFrame.from_records (GH1744_) - - Fix time series indexing error with duplicates, under and over hash table - size cutoff (GH1821_) - - Handle list keys in addition to tuples in DataFrame.xs when - partial-indexing a hierarchically-indexed DataFrame (GH1796_) - - Support multiple column selection in DataFrame.__getitem__ with duplicate - columns (GH1943_) - - Fix time zone localization bug causing improper fields (e.g. hours) in time - zones that have not had a UTC transition in a long time (GH1946_) - - Fix errors when parsing and working with with fixed offset timezones - (GH1922_, GH1928_) - - Fix text parser bug when handling UTC datetime objects generated by - dateutil (GH1693_) - - Fix plotting bug when 'B' is the inferred frequency but index actually - contains weekends (GH1668_, GH1669_) - - Fix plot styling bugs (GH1666_, GH1665_, GH1658_) - - Fix plotting bug with index/columns with unicode (GH1685_) - - Fix DataFrame constructor bug when passed Series with datetime64 dtype - in a dict (GH1680_) - - Fixed regression in generating DatetimeIndex using timezone aware - datetime.datetime (GH1676_) - - Fix DataFrame bug when printing concatenated DataFrames with duplicated - columns (GH1675_) - - Fixed bug when plotting time series with multiple intraday frequencies - (GH1732_) - - Fix bug in DataFrame.duplicated to enable iterables other than list-types - as input argument (GH1773_) - - Fix resample bug when passed list of lambdas as `how` argument (GH1808_) - - Repr fix for MultiIndex level with all NAs (GH1971_) - - Fix PeriodIndex slicing bug when slice start/end are out-of-bounds (GH1977_) - - Fix read_table bug when parsing unicode (GH1975_) - - Fix BlockManager.iget bug when dealing with non-unique MultiIndex as columns - (GH1970_) - - Fix reset_index bug if both drop and level are specified (GH1957_) - - Work around unsafe NumPy object->int casting with Cython function (GH1987_) - - Fix datetime64 formatting bug in DataFrame.to_csv (GH1993_) - - Default start date in pandas.io.data to 1/1/2000 as the docs say (GH2011_) - - -.. _GH1706: https://github.com/pydata/pandas/issues/1706 -.. _GH1735: https://github.com/pydata/pandas/issues/1735 -.. _GH1785: https://github.com/pydata/pandas/issues/1785 -.. _GH1748: https://github.com/pydata/pandas/issues/1748 -.. _GH1739: https://github.com/pydata/pandas/issues/1739 -.. _GH1691: https://github.com/pydata/pandas/issues/1691 -.. _GH1295: https://github.com/pydata/pandas/issues/1295 -.. _GH1999: https://github.com/pydata/pandas/issues/1999 -.. _GH1988: https://github.com/pydata/pandas/issues/1988 -.. _GH1990: https://github.com/pydata/pandas/issues/1990 -.. _GH1659: https://github.com/pydata/pandas/issues/1659 -.. _GH1693: https://github.com/pydata/pandas/issues/1693 -.. _GH1679: https://github.com/pydata/pandas/issues/1679 -.. _GH1661: https://github.com/pydata/pandas/issues/1661 -.. _GH1829: https://github.com/pydata/pandas/issues/1829 -.. _GH1797: https://github.com/pydata/pandas/issues/1797 -.. _GH1902: https://github.com/pydata/pandas/issues/1902 -.. _GH1854: https://github.com/pydata/pandas/issues/1854 -.. _GH1915: https://github.com/pydata/pandas/issues/1915 -.. _GH1689: https://github.com/pydata/pandas/issues/1689 -.. _GH1583: https://github.com/pydata/pandas/issues/1583 -.. _GH924: https://github.com/pydata/pandas/issues/924 -.. _GH1929: https://github.com/pydata/pandas/issues/1929 -.. _GH1871: https://github.com/pydata/pandas/issues/1871 -.. _GH1843: https://github.com/pydata/pandas/issues/1843 -.. _GH2000: https://github.com/pydata/pandas/issues/2000 -.. _GH1723: https://github.com/pydata/pandas/issues/1723 -.. _GH1834: https://github.com/pydata/pandas/issues/1834 -.. _GH1824: https://github.com/pydata/pandas/issues/1824 -.. _GH1776: https://github.com/pydata/pandas/issues/1776 -.. _GH1783: https://github.com/pydata/pandas/issues/1783 -.. _GH1630: https://github.com/pydata/pandas/issues/1630 -.. _GH1809: https://github.com/pydata/pandas/issues/1809 -.. _GH1657: https://github.com/pydata/pandas/issues/1657 -.. _GH1896: https://github.com/pydata/pandas/issues/1896 -.. _GH1711: https://github.com/pydata/pandas/issues/1711 -.. _GH1726: https://github.com/pydata/pandas/issues/1726 -.. _GH1730: https://github.com/pydata/pandas/issues/1730 -.. _GH1719: https://github.com/pydata/pandas/issues/1719 -.. _GH1745: https://github.com/pydata/pandas/issues/1745 -.. _GH1702: https://github.com/pydata/pandas/issues/1702 -.. _GH1753: https://github.com/pydata/pandas/issues/1753 -.. _GH1708: https://github.com/pydata/pandas/issues/1708 -.. _GH1672: https://github.com/pydata/pandas/issues/1672 -.. _GH1701: https://github.com/pydata/pandas/issues/1701 -.. _GH1721: https://github.com/pydata/pandas/issues/1721 -.. _GH1717: https://github.com/pydata/pandas/issues/1717 -.. _GH1746: https://github.com/pydata/pandas/issues/1746 -.. _GH1677: https://github.com/pydata/pandas/issues/1677 -.. _GH1615: https://github.com/pydata/pandas/issues/1615 -.. _GH1734: https://github.com/pydata/pandas/issues/1734 -.. _GH1700: https://github.com/pydata/pandas/issues/1700 -.. _GH1694: https://github.com/pydata/pandas/issues/1694 -.. _GH1682: https://github.com/pydata/pandas/issues/1682 -.. _GH1673: https://github.com/pydata/pandas/issues/1673 -.. _GH1756: https://github.com/pydata/pandas/issues/1756 -.. _GH1755: https://github.com/pydata/pandas/issues/1755 -.. _GH1671: https://github.com/pydata/pandas/issues/1671 -.. _GH1681: https://github.com/pydata/pandas/issues/1681 -.. _GH1695: https://github.com/pydata/pandas/issues/1695 -.. _GH1648: https://github.com/pydata/pandas/issues/1648 -.. _GH1688: https://github.com/pydata/pandas/issues/1688 -.. _GH1697: https://github.com/pydata/pandas/issues/1697 -.. _GH1683: https://github.com/pydata/pandas/issues/1683 -.. _GH1707: https://github.com/pydata/pandas/issues/1707 -.. _GH1674: https://github.com/pydata/pandas/issues/1674 -.. _GH1698: https://github.com/pydata/pandas/issues/1698 -.. _GH1696: https://github.com/pydata/pandas/issues/1696 -.. _GH1716: https://github.com/pydata/pandas/issues/1716 -.. _GH1686: https://github.com/pydata/pandas/issues/1686 -.. _GH1705: https://github.com/pydata/pandas/issues/1705 -.. _GH1736: https://github.com/pydata/pandas/issues/1736 -.. _GH1703: https://github.com/pydata/pandas/issues/1703 -.. _GH1692: https://github.com/pydata/pandas/issues/1692 -.. _GH1727: https://github.com/pydata/pandas/issues/1727 -.. _GH1720: https://github.com/pydata/pandas/issues/1720 -.. _GH1749: https://github.com/pydata/pandas/issues/1749 -.. _GH1678: https://github.com/pydata/pandas/issues/1678 -.. _GH1750: https://github.com/pydata/pandas/issues/1750 -.. _GH1757: https://github.com/pydata/pandas/issues/1757 -.. _GH1761: https://github.com/pydata/pandas/issues/1761 -.. _GH1763: https://github.com/pydata/pandas/issues/1763 -.. _GH1777: https://github.com/pydata/pandas/issues/1777 -.. _GH1778: https://github.com/pydata/pandas/issues/1778 -.. _GH1807: https://github.com/pydata/pandas/issues/1807 -.. _GH1840: https://github.com/pydata/pandas/issues/1840 -.. _GH1859: https://github.com/pydata/pandas/issues/1859 -.. _GH1851: https://github.com/pydata/pandas/issues/1851 -.. _GH1845: https://github.com/pydata/pandas/issues/1845 -.. _GH1805: https://github.com/pydata/pandas/issues/1805 -.. _GH1850: https://github.com/pydata/pandas/issues/1850 -.. _GH1837: https://github.com/pydata/pandas/issues/1837 -.. _GH1792: https://github.com/pydata/pandas/issues/1792 -.. _GH1741: https://github.com/pydata/pandas/issues/1741 -.. _GH1774: https://github.com/pydata/pandas/issues/1774 -.. _GH1782: https://github.com/pydata/pandas/issues/1782 -.. _GH1768: https://github.com/pydata/pandas/issues/1768 -.. _GH1833: https://github.com/pydata/pandas/issues/1833 -.. _GH1823: https://github.com/pydata/pandas/issues/1823 -.. _GH1848: https://github.com/pydata/pandas/issues/1848 -.. _GH1826: https://github.com/pydata/pandas/issues/1826 -.. _GH1791: https://github.com/pydata/pandas/issues/1791 -.. _GH1866: https://github.com/pydata/pandas/issues/1866 -.. _GH1842: https://github.com/pydata/pandas/issues/1842 -.. _GH1831: https://github.com/pydata/pandas/issues/1831 -.. _GH1789: https://github.com/pydata/pandas/issues/1789 -.. _GH1820: https://github.com/pydata/pandas/issues/1820 -.. _GH1849: https://github.com/pydata/pandas/issues/1849 -.. _GH1857: https://github.com/pydata/pandas/issues/1857 -.. _GH1835: https://github.com/pydata/pandas/issues/1835 -.. _GH1815: https://github.com/pydata/pandas/issues/1815 -.. _GH1772: https://github.com/pydata/pandas/issues/1772 -.. _GH1799: https://github.com/pydata/pandas/issues/1799 -.. _GH1890: https://github.com/pydata/pandas/issues/1890 -.. _GH1899: https://github.com/pydata/pandas/issues/1899 -.. _GH1897: https://github.com/pydata/pandas/issues/1897 -.. _GH1884: https://github.com/pydata/pandas/issues/1884 -.. _GH1828: https://github.com/pydata/pandas/issues/1828 -.. _GH1895: https://github.com/pydata/pandas/issues/1895 -.. _GH1906: https://github.com/pydata/pandas/issues/1906 -.. _GH1913: https://github.com/pydata/pandas/issues/1913 -.. _GH1881: https://github.com/pydata/pandas/issues/1881 -.. _GH1900: https://github.com/pydata/pandas/issues/1900 -.. _GH1883: https://github.com/pydata/pandas/issues/1883 -.. _GH1814: https://github.com/pydata/pandas/issues/1814 -.. _GH1888: https://github.com/pydata/pandas/issues/1888 -.. _GH1922: https://github.com/pydata/pandas/issues/1922 -.. _GH1911: https://github.com/pydata/pandas/issues/1911 -.. _GH1395: https://github.com/pydata/pandas/issues/1395 -.. _GH1878: https://github.com/pydata/pandas/issues/1878 -.. _GH1759: https://github.com/pydata/pandas/issues/1759 -.. _GH1744: https://github.com/pydata/pandas/issues/1744 -.. _GH1821: https://github.com/pydata/pandas/issues/1821 -.. _GH1796: https://github.com/pydata/pandas/issues/1796 -.. _GH1943: https://github.com/pydata/pandas/issues/1943 -.. _GH1946: https://github.com/pydata/pandas/issues/1946 -.. _GH1928: https://github.com/pydata/pandas/issues/1928 -.. _GH1668: https://github.com/pydata/pandas/issues/1668 -.. _GH1669: https://github.com/pydata/pandas/issues/1669 -.. _GH1666: https://github.com/pydata/pandas/issues/1666 -.. _GH1665: https://github.com/pydata/pandas/issues/1665 -.. _GH1658: https://github.com/pydata/pandas/issues/1658 -.. _GH1685: https://github.com/pydata/pandas/issues/1685 -.. _GH1680: https://github.com/pydata/pandas/issues/1680 -.. _GH1676: https://github.com/pydata/pandas/issues/1676 -.. _GH1675: https://github.com/pydata/pandas/issues/1675 -.. _GH1732: https://github.com/pydata/pandas/issues/1732 -.. _GH1773: https://github.com/pydata/pandas/issues/1773 -.. _GH1808: https://github.com/pydata/pandas/issues/1808 -.. _GH1971: https://github.com/pydata/pandas/issues/1971 -.. _GH1977: https://github.com/pydata/pandas/issues/1977 -.. _GH1975: https://github.com/pydata/pandas/issues/1975 -.. _GH1970: https://github.com/pydata/pandas/issues/1970 -.. _GH1957: https://github.com/pydata/pandas/issues/1957 -.. _GH1987: https://github.com/pydata/pandas/issues/1987 -.. _GH1993: https://github.com/pydata/pandas/issues/1993 -.. _GH2011: https://github.com/pydata/pandas/issues/2011 - - -pandas 0.8.1 -============ - -**Release date:** July 22, 2012 - -**New features** - - - Add vectorized, NA-friendly string methods to Series (GH1621_, GH620_) - - Can pass dict of per-column line styles to DataFrame.plot (GH1559_) - - Selective plotting to secondary y-axis on same subplot (GH1640_) - - Add new ``bootstrap_plot`` plot function - - Add new ``parallel_coordinates`` plot function (GH1488_) - - Add ``radviz`` plot function (GH1566_) - - Add ``multi_sparse`` option to ``set_printoptions`` to modify display of - hierarchical indexes (GH1538_) - - Add ``dropna`` method to Panel (GH171_) - -**Improvements to existing features** - - - Use moving min/max algorithms from Bottleneck in rolling_min/rolling_max - for > 100x speedup. (GH1504_, GH50_) - - Add Cython group median method for >15x speedup (GH1358_) - - Drastically improve ``to_datetime`` performance on ISO8601 datetime strings - (with no time zones) (GH1571_) - - Improve single-key groupby performance on large data sets, accelerate use of - groupby with a Categorical variable - - Add ability to append hierarchical index levels with ``set_index`` and to - drop single levels with ``reset_index`` (GH1569_, GH1577_) - - Always apply passed functions in ``resample``, even if upsampling (GH1596_) - - Avoid unnecessary copies in DataFrame constructor with explicit dtype (GH1572_) - - Cleaner DatetimeIndex string representation with 1 or 2 elements (GH1611_) - - Improve performance of array-of-Period to PeriodIndex, convert such arrays - to PeriodIndex inside Index (GH1215_) - - More informative string representation for weekly Period objects (GH1503_) - - Accelerate 3-axis multi data selection from homogeneous Panel (GH979_) - - Add ``adjust`` option to ewma to disable adjustment factor (GH1584_) - - Add new matplotlib converters for high frequency time series plotting (GH1599_) - - Handling of tz-aware datetime.datetime objects in to_datetime; raise - Exception unless utc=True given (GH1581_) - -**Bug fixes** - - - Fix NA handling in DataFrame.to_panel (GH1582_) - - Handle TypeError issues inside PyObject_RichCompareBool calls in khash - (GH1318_) - - Fix resampling bug to lower case daily frequency (GH1588_) - - Fix kendall/spearman DataFrame.corr bug with no overlap (GH1595_) - - Fix bug in DataFrame.set_index (GH1592_) - - Don't ignore axes in boxplot if by specified (GH1565_) - - Fix Panel .ix indexing with integers bug (GH1603_) - - Fix Partial indexing bugs (years, months, ...) with PeriodIndex (GH1601_) - - Fix MultiIndex console formatting issue (GH1606_) - - Unordered index with duplicates doesn't yield scalar location for single - entry (GH1586_) - - Fix resampling of tz-aware time series with "anchored" freq (GH1591_) - - Fix DataFrame.rank error on integer data (GH1589_) - - Selection of multiple SparseDataFrame columns by list in __getitem__ (GH1585_) - - Override Index.tolist for compatibility with MultiIndex (GH1576_) - - Fix hierarchical summing bug with MultiIndex of length 1 (GH1568_) - - Work around numpy.concatenate use/bug in Series.set_value (GH1561_) - - Ensure Series/DataFrame are sorted before resampling (GH1580_) - - Fix unhandled IndexError when indexing very large time series (GH1562_) - - Fix DatetimeIndex intersection logic error with irregular indexes (GH1551_) - - Fix unit test errors on Python 3 (GH1550_) - - Fix .ix indexing bugs in duplicate DataFrame index (GH1201_) - - Better handle errors with non-existing objects in HDFStore (GH1254_) - - Don't copy int64 array data in DatetimeIndex when copy=False (GH1624_) - - Fix resampling of conforming periods quarterly to annual (GH1622_) - - Don't lose index name on resampling (GH1631_) - - Support python-dateutil version 2.1 (GH1637_) - - Fix broken scatter_matrix axis labeling, esp. with time series (GH1625_) - - Fix cases where extra keywords weren't being passed on to matplotlib from - Series.plot (GH1636_) - - Fix BusinessMonthBegin logic for dates before 1st bday of month (GH1645_) - - Ensure string alias converted (valid in DatetimeIndex.get_loc) in - DataFrame.xs / __getitem__ (GH1644_) - - Fix use of string alias timestamps with tz-aware time series (GH1647_) - - Fix Series.max/min and Series.describe on len-0 series (GH1650_) - - Handle None values in dict passed to concat (GH1649_) - - Fix Series.interpolate with method='values' and DatetimeIndex (GH1646_) - - Fix IndexError in left merges on a DataFrame with 0-length (GH1628_) - - Fix DataFrame column width display with UTF-8 encoded characters (GH1620_) - - Handle case in pandas.io.data.get_data_yahoo where Yahoo! returns duplicate - dates for most recent business day - - Avoid downsampling when plotting mixed frequencies on the same subplot (GH1619_) - - Fix read_csv bug when reading a single line (GH1553_) - - Fix bug in C code causing monthly periods prior to December 1969 to be off (GH1570_) - -.. _GH1621: https://github.com/pydata/pandas/issues/1621 -.. _GH620: https://github.com/pydata/pandas/issues/620 -.. _GH1559: https://github.com/pydata/pandas/issues/1559 -.. _GH1640: https://github.com/pydata/pandas/issues/1640 -.. _GH1488: https://github.com/pydata/pandas/issues/1488 -.. _GH1566: https://github.com/pydata/pandas/issues/1566 -.. _GH1538: https://github.com/pydata/pandas/issues/1538 -.. _GH171: https://github.com/pydata/pandas/issues/171 -.. _GH1504: https://github.com/pydata/pandas/issues/1504 -.. _GH50: https://github.com/pydata/pandas/issues/50 -.. _GH1358: https://github.com/pydata/pandas/issues/1358 -.. _GH1571: https://github.com/pydata/pandas/issues/1571 -.. _GH1569: https://github.com/pydata/pandas/issues/1569 -.. _GH1577: https://github.com/pydata/pandas/issues/1577 -.. _GH1596: https://github.com/pydata/pandas/issues/1596 -.. _GH1572: https://github.com/pydata/pandas/issues/1572 -.. _GH1611: https://github.com/pydata/pandas/issues/1611 -.. _GH1215: https://github.com/pydata/pandas/issues/1215 -.. _GH1503: https://github.com/pydata/pandas/issues/1503 -.. _GH979: https://github.com/pydata/pandas/issues/979 -.. _GH1584: https://github.com/pydata/pandas/issues/1584 -.. _GH1599: https://github.com/pydata/pandas/issues/1599 -.. _GH1581: https://github.com/pydata/pandas/issues/1581 -.. _GH1582: https://github.com/pydata/pandas/issues/1582 -.. _GH1318: https://github.com/pydata/pandas/issues/1318 -.. _GH1588: https://github.com/pydata/pandas/issues/1588 -.. _GH1595: https://github.com/pydata/pandas/issues/1595 -.. _GH1592: https://github.com/pydata/pandas/issues/1592 -.. _GH1565: https://github.com/pydata/pandas/issues/1565 -.. _GH1603: https://github.com/pydata/pandas/issues/1603 -.. _GH1601: https://github.com/pydata/pandas/issues/1601 -.. _GH1606: https://github.com/pydata/pandas/issues/1606 -.. _GH1586: https://github.com/pydata/pandas/issues/1586 -.. _GH1591: https://github.com/pydata/pandas/issues/1591 -.. _GH1589: https://github.com/pydata/pandas/issues/1589 -.. _GH1585: https://github.com/pydata/pandas/issues/1585 -.. _GH1576: https://github.com/pydata/pandas/issues/1576 -.. _GH1568: https://github.com/pydata/pandas/issues/1568 -.. _GH1561: https://github.com/pydata/pandas/issues/1561 -.. _GH1580: https://github.com/pydata/pandas/issues/1580 -.. _GH1562: https://github.com/pydata/pandas/issues/1562 -.. _GH1551: https://github.com/pydata/pandas/issues/1551 -.. _GH1550: https://github.com/pydata/pandas/issues/1550 -.. _GH1201: https://github.com/pydata/pandas/issues/1201 -.. _GH1254: https://github.com/pydata/pandas/issues/1254 -.. _GH1624: https://github.com/pydata/pandas/issues/1624 -.. _GH1622: https://github.com/pydata/pandas/issues/1622 -.. _GH1631: https://github.com/pydata/pandas/issues/1631 -.. _GH1637: https://github.com/pydata/pandas/issues/1637 -.. _GH1625: https://github.com/pydata/pandas/issues/1625 -.. _GH1636: https://github.com/pydata/pandas/issues/1636 -.. _GH1645: https://github.com/pydata/pandas/issues/1645 -.. _GH1644: https://github.com/pydata/pandas/issues/1644 -.. _GH1647: https://github.com/pydata/pandas/issues/1647 -.. _GH1650: https://github.com/pydata/pandas/issues/1650 -.. _GH1649: https://github.com/pydata/pandas/issues/1649 -.. _GH1646: https://github.com/pydata/pandas/issues/1646 -.. _GH1628: https://github.com/pydata/pandas/issues/1628 -.. _GH1620: https://github.com/pydata/pandas/issues/1620 -.. _GH1619: https://github.com/pydata/pandas/issues/1619 -.. _GH1553: https://github.com/pydata/pandas/issues/1553 -.. _GH1570: https://github.com/pydata/pandas/issues/1570 - - -pandas 0.8.0 -============ - -**Release date:** 6/29/2012 - -**New features** - - - New unified DatetimeIndex class for nanosecond-level timestamp data - - New Timestamp datetime.datetime subclass with easy time zone conversions, - and support for nanoseconds - - New PeriodIndex class for timespans, calendar logic, and Period scalar object - - High performance resampling of timestamp and period data. New `resample` - method of all pandas data structures - - New frequency names plus shortcut string aliases like '15h', '1h30min' - - Time series string indexing shorthand (GH222_) - - Add week, dayofyear array and other timestamp array-valued field accessor - functions to DatetimeIndex - - Add GroupBy.prod optimized aggregation function and 'prod' fast time series - conversion method (GH1018_) - - Implement robust frequency inference function and `inferred_freq` attribute - on DatetimeIndex (GH391_) - - New ``tz_convert`` and ``tz_localize`` methods in Series / DataFrame - - Convert DatetimeIndexes to UTC if time zones are different in join/setops - (GH864_) - - Add limit argument for forward/backward filling to reindex, fillna, - etc. (GH825_ and others) - - Add support for indexes (dates or otherwise) with duplicates and common - sense indexing/selection functionality - - Series/DataFrame.update methods, in-place variant of combine_first (GH961_) - - Add ``match`` function to API (GH502_) - - Add Cython-optimized first, last, min, max, prod functions to GroupBy (GH994_, - GH1043_) - - Dates can be split across multiple columns (GH1227_, GH1186_) - - Add experimental support for converting pandas DataFrame to R data.frame - via rpy2 (GH350_, GH1212_) - - Can pass list of (name, function) to GroupBy.aggregate to get aggregates in - a particular order (GH610_) - - Can pass dicts with lists of functions or dicts to GroupBy aggregate to do - much more flexible multiple function aggregation (GH642_, GH610_) - - New ordered_merge functions for merging DataFrames with ordered - data. Also supports group-wise merging for panel data (GH813_) - - Add keys() method to DataFrame - - Add flexible replace method for replacing potentially values to Series and - DataFrame (GH929_, GH1241_) - - Add 'kde' plot kind for Series/DataFrame.plot (GH1059_) - - More flexible multiple function aggregation with GroupBy - - Add pct_change function to Series/DataFrame - - Add option to interpolate by Index values in Series.interpolate (GH1206_) - - Add ``max_colwidth`` option for DataFrame, defaulting to 50 - - Conversion of DataFrame through rpy2 to R data.frame (GH1282_, ) - - Add keys() method on DataFrame (GH1240_) - - Add new ``match`` function to API (similar to R) (GH502_) - - Add dayfirst option to parsers (GH854_) - - Add ``method`` argument to ``align`` method for forward/backward fillin - (GH216_) - - Add Panel.transpose method for rearranging axes (GH695_) - - Add new ``cut`` function (patterned after R) for discretizing data into - equal range-length bins or arbitrary breaks of your choosing (GH415_) - - Add new ``qcut`` for cutting with quantiles (GH1378_) - - Add ``value_counts`` top level array method (GH1392_) - - Added Andrews curves plot tupe (GH1325_) - - Add lag plot (GH1440_) - - Add autocorrelation_plot (GH1425_) - - Add support for tox and Travis CI (GH1382_) - - Add support for Categorical use in GroupBy (GH292_) - - Add ``any`` and ``all`` methods to DataFrame (GH1416_) - - Add ``secondary_y`` option to Series.plot - - Add experimental ``lreshape`` function for reshaping wide to long - -**Improvements to existing features** - - - Switch to klib/khash-based hash tables in Index classes for better - performance in many cases and lower memory footprint - - Shipping some functions from scipy.stats to reduce dependency, - e.g. Series.describe and DataFrame.describe (GH1092_) - - Can create MultiIndex by passing list of lists or list of arrays to Series, - DataFrame constructor, etc. (GH831_) - - Can pass arrays in addition to column names to DataFrame.set_index (GH402_) - - Improve the speed of "square" reindexing of homogeneous DataFrame objects - by significant margin (GH836_) - - Handle more dtypes when passed MaskedArrays in DataFrame constructor (GH406_) - - Improved performance of join operations on integer keys (GH682_) - - Can pass multiple columns to GroupBy object, e.g. grouped[[col1, col2]] to - only aggregate a subset of the value columns (GH383_) - - Add histogram / kde plot options for scatter_matrix diagonals (GH1237_) - - Add inplace option to Series/DataFrame.rename and sort_index, - DataFrame.drop_duplicates (GH805_, GH207_) - - More helpful error message when nothing passed to Series.reindex (GH1267_) - - Can mix array and scalars as dict-value inputs to DataFrame ctor (GH1329_) - - Use DataFrame columns' name for legend title in plots - - Preserve frequency in DatetimeIndex when possible in boolean indexing - operations - - Promote datetime.date values in data alignment operations (GH867_) - - Add ``order`` method to Index classes (GH1028_) - - Avoid hash table creation in large monotonic hash table indexes (GH1160_) - - Store time zones in HDFStore (GH1232_) - - Enable storage of sparse data structures in HDFStore (GH85_) - - Enable Series.asof to work with arrays of timestamp inputs - - Cython implementation of DataFrame.corr speeds up by > 100x (GH1349_, GH1354_) - - Exclude "nuisance" columns automatically in GroupBy.transform (GH1364_) - - Support functions-as-strings in GroupBy.transform (GH1362_) - - Use index name as xlabel/ylabel in plots (GH1415_) - - Add ``convert_dtype`` option to Series.apply to be able to leave data as - dtype=object (GH1414_) - - Can specify all index level names in concat (GH1419_) - - Add ``dialect`` keyword to parsers for quoting conventions (GH1363_) - - Enable DataFrame[bool_DataFrame] += value (GH1366_) - - Add ``retries`` argument to ``get_data_yahoo`` to try to prevent Yahoo! API - 404s (GH826_) - - Improve performance of reshaping by using O(N) categorical sorting - - Series names will be used for index of DataFrame if no index passed (GH1494_) - - Header argument in DataFrame.to_csv can accept a list of column names to - use instead of the object's columns (GH921_) - - Add ``raise_conflict`` argument to DataFrame.update (GH1526_) - - Support file-like objects in ExcelFile (GH1529_) - -**API Changes** - - - Rename `pandas._tseries` to `pandas.lib` - - Rename Factor to Categorical and add improvements. Numerous Categorical bug - fixes - - Frequency name overhaul, WEEKDAY/EOM and rules with @ - deprecated. get_legacy_offset_name backwards compatibility function added - - Raise ValueError in DataFrame.__nonzero__, so "if df" no longer works - (GH1073_) - - Change BDay (business day) to not normalize dates by default (GH506_) - - Remove deprecated DataMatrix name - - Default merge suffixes for overlap now have underscores instead of periods - to facilitate tab completion, etc. (GH1239_) - - Deprecation of offset, time_rule timeRule parameters throughout codebase - - Series.append and DataFrame.append no longer check for duplicate indexes - by default, add verify_integrity parameter (GH1394_) - - Refactor Factor class, old constructor moved to Factor.from_array - - Modified internals of MultiIndex to use less memory (no longer represented - as array of tuples) internally, speed up construction time and many methods - which construct intermediate hierarchical indexes (GH1467_) - -**Bug fixes** - - - Fix OverflowError from storing pre-1970 dates in HDFStore by switching to - datetime64 (GH179_) - - Fix logical error with February leap year end in YearEnd offset - - Series([False, nan]) was getting casted to float64 (GH1074_) - - Fix binary operations between boolean Series and object Series with - booleans and NAs (GH1074_, GH1079_) - - Couldn't assign whole array to column in mixed-type DataFrame via .ix - (GH1142_) - - Fix label slicing issues with float index values (GH1167_) - - Fix segfault caused by empty groups passed to groupby (GH1048_) - - Fix occasionally misbehaved reindexing in the presence of NaN labels (GH522_) - - Fix imprecise logic causing weird Series results from .apply (GH1183_) - - Unstack multiple levels in one shot, avoiding empty columns in some - cases. Fix pivot table bug (GH1181_) - - Fix formatting of MultiIndex on Series/DataFrame when index name coincides - with label (GH1217_) - - Handle Excel 2003 #N/A as NaN from xlrd (GH1213_, GH1225_) - - Fix timestamp locale-related deserialization issues with HDFStore by moving - to datetime64 representation (GH1081_, GH809_) - - Fix DataFrame.duplicated/drop_duplicates NA value handling (GH557_) - - Actually raise exceptions in fast reducer (GH1243_) - - Fix various timezone-handling bugs from 0.7.3 (GH969_) - - GroupBy on level=0 discarded index name (GH1313_) - - Better error message with unmergeable DataFrames (GH1307_) - - Series.__repr__ alignment fix with unicode index values (GH1279_) - - Better error message if nothing passed to reindex (GH1267_) - - More robust NA handling in DataFrame.drop_duplicates (GH557_) - - Resolve locale-based and pre-epoch HDF5 timestamp deserialization issues - (GH973_, GH1081_, GH179_) - - Implement Series.repeat (GH1229_) - - Fix indexing with namedtuple and other tuple subclasses (GH1026_) - - Fix float64 slicing bug (GH1167_) - - Parsing integers with commas (GH796_) - - Fix groupby improper data type when group consists of one value (GH1065_) - - Fix negative variance possibility in nanvar resulting from floating point - error (GH1090_) - - Consistently set name on groupby pieces (GH184_) - - Treat dict return values as Series in GroupBy.apply (GH823_) - - Respect column selection for DataFrame in in GroupBy.transform (GH1365_) - - Fix MultiIndex partial indexing bug (GH1352_) - - Enable assignment of rows in mixed-type DataFrame via .ix (GH1432_) - - Reset index mapping when grouping Series in Cython (GH1423_) - - Fix outer/inner DataFrame.join with non-unique indexes (GH1421_) - - Fix MultiIndex groupby bugs with empty lower levels (GH1401_) - - Calling fillna with a Series will have same behavior as with dict (GH1486_) - - SparseSeries reduction bug (GH1375_) - - Fix unicode serialization issue in HDFStore (GH1361_) - - Pass keywords to pyplot.boxplot in DataFrame.boxplot (GH1493_) - - Bug fixes in MonthBegin (GH1483_) - - Preserve MultiIndex names in drop (GH1513_) - - Fix Panel DataFrame slice-assignment bug (GH1533_) - - Don't use locals() in read_* functions (GH1547_) - -.. _GH222: https://github.com/pydata/pandas/issues/222 -.. _GH1018: https://github.com/pydata/pandas/issues/1018 -.. _GH391: https://github.com/pydata/pandas/issues/391 -.. _GH864: https://github.com/pydata/pandas/issues/864 -.. _GH825: https://github.com/pydata/pandas/issues/825 -.. _GH961: https://github.com/pydata/pandas/issues/961 -.. _GH502: https://github.com/pydata/pandas/issues/502 -.. _GH994: https://github.com/pydata/pandas/issues/994 -.. _GH1043: https://github.com/pydata/pandas/issues/1043 -.. _GH1227: https://github.com/pydata/pandas/issues/1227 -.. _GH1186: https://github.com/pydata/pandas/issues/1186 -.. _GH350: https://github.com/pydata/pandas/issues/350 -.. _GH1212: https://github.com/pydata/pandas/issues/1212 -.. _GH610: https://github.com/pydata/pandas/issues/610 -.. _GH642: https://github.com/pydata/pandas/issues/642 -.. _GH813: https://github.com/pydata/pandas/issues/813 -.. _GH929: https://github.com/pydata/pandas/issues/929 -.. _GH1241: https://github.com/pydata/pandas/issues/1241 -.. _GH1059: https://github.com/pydata/pandas/issues/1059 -.. _GH1206: https://github.com/pydata/pandas/issues/1206 -.. _GH1282: https://github.com/pydata/pandas/issues/1282 -.. _GH1240: https://github.com/pydata/pandas/issues/1240 -.. _GH854: https://github.com/pydata/pandas/issues/854 -.. _GH216: https://github.com/pydata/pandas/issues/216 -.. _GH695: https://github.com/pydata/pandas/issues/695 -.. _GH415: https://github.com/pydata/pandas/issues/415 -.. _GH1378: https://github.com/pydata/pandas/issues/1378 -.. _GH1392: https://github.com/pydata/pandas/issues/1392 -.. _GH1325: https://github.com/pydata/pandas/issues/1325 -.. _GH1440: https://github.com/pydata/pandas/issues/1440 -.. _GH1425: https://github.com/pydata/pandas/issues/1425 -.. _GH1382: https://github.com/pydata/pandas/issues/1382 -.. _GH292: https://github.com/pydata/pandas/issues/292 -.. _GH1416: https://github.com/pydata/pandas/issues/1416 -.. _GH1092: https://github.com/pydata/pandas/issues/1092 -.. _GH831: https://github.com/pydata/pandas/issues/831 -.. _GH402: https://github.com/pydata/pandas/issues/402 -.. _GH836: https://github.com/pydata/pandas/issues/836 -.. _GH406: https://github.com/pydata/pandas/issues/406 -.. _GH682: https://github.com/pydata/pandas/issues/682 -.. _GH383: https://github.com/pydata/pandas/issues/383 -.. _GH1237: https://github.com/pydata/pandas/issues/1237 -.. _GH805: https://github.com/pydata/pandas/issues/805 -.. _GH207: https://github.com/pydata/pandas/issues/207 -.. _GH1267: https://github.com/pydata/pandas/issues/1267 -.. _GH1329: https://github.com/pydata/pandas/issues/1329 -.. _GH867: https://github.com/pydata/pandas/issues/867 -.. _GH1028: https://github.com/pydata/pandas/issues/1028 -.. _GH1160: https://github.com/pydata/pandas/issues/1160 -.. _GH1232: https://github.com/pydata/pandas/issues/1232 -.. _GH1349: https://github.com/pydata/pandas/issues/1349 -.. _GH1354: https://github.com/pydata/pandas/issues/1354 -.. _GH1364: https://github.com/pydata/pandas/issues/1364 -.. _GH1362: https://github.com/pydata/pandas/issues/1362 -.. _GH1415: https://github.com/pydata/pandas/issues/1415 -.. _GH1414: https://github.com/pydata/pandas/issues/1414 -.. _GH1419: https://github.com/pydata/pandas/issues/1419 -.. _GH1363: https://github.com/pydata/pandas/issues/1363 -.. _GH1366: https://github.com/pydata/pandas/issues/1366 -.. _GH826: https://github.com/pydata/pandas/issues/826 -.. _GH1494: https://github.com/pydata/pandas/issues/1494 -.. _GH921: https://github.com/pydata/pandas/issues/921 -.. _GH1526: https://github.com/pydata/pandas/issues/1526 -.. _GH1529: https://github.com/pydata/pandas/issues/1529 -.. _GH1073: https://github.com/pydata/pandas/issues/1073 -.. _GH506: https://github.com/pydata/pandas/issues/506 -.. _GH1239: https://github.com/pydata/pandas/issues/1239 -.. _GH1394: https://github.com/pydata/pandas/issues/1394 -.. _GH1467: https://github.com/pydata/pandas/issues/1467 -.. _GH179: https://github.com/pydata/pandas/issues/179 -.. _GH1074: https://github.com/pydata/pandas/issues/1074 -.. _GH1079: https://github.com/pydata/pandas/issues/1079 -.. _GH1142: https://github.com/pydata/pandas/issues/1142 -.. _GH1167: https://github.com/pydata/pandas/issues/1167 -.. _GH1048: https://github.com/pydata/pandas/issues/1048 -.. _GH522: https://github.com/pydata/pandas/issues/522 -.. _GH1183: https://github.com/pydata/pandas/issues/1183 -.. _GH1181: https://github.com/pydata/pandas/issues/1181 -.. _GH1217: https://github.com/pydata/pandas/issues/1217 -.. _GH1213: https://github.com/pydata/pandas/issues/1213 -.. _GH1225: https://github.com/pydata/pandas/issues/1225 -.. _GH1081: https://github.com/pydata/pandas/issues/1081 -.. _GH809: https://github.com/pydata/pandas/issues/809 -.. _GH557: https://github.com/pydata/pandas/issues/557 -.. _GH1243: https://github.com/pydata/pandas/issues/1243 -.. _GH969: https://github.com/pydata/pandas/issues/969 -.. _GH1313: https://github.com/pydata/pandas/issues/1313 -.. _GH1307: https://github.com/pydata/pandas/issues/1307 -.. _GH1279: https://github.com/pydata/pandas/issues/1279 -.. _GH973: https://github.com/pydata/pandas/issues/973 -.. _GH1229: https://github.com/pydata/pandas/issues/1229 -.. _GH1026: https://github.com/pydata/pandas/issues/1026 -.. _GH796: https://github.com/pydata/pandas/issues/796 -.. _GH1065: https://github.com/pydata/pandas/issues/1065 -.. _GH1090: https://github.com/pydata/pandas/issues/1090 -.. _GH184: https://github.com/pydata/pandas/issues/184 -.. _GH823: https://github.com/pydata/pandas/issues/823 -.. _GH1365: https://github.com/pydata/pandas/issues/1365 -.. _GH1352: https://github.com/pydata/pandas/issues/1352 -.. _GH1432: https://github.com/pydata/pandas/issues/1432 -.. _GH1423: https://github.com/pydata/pandas/issues/1423 -.. _GH1421: https://github.com/pydata/pandas/issues/1421 -.. _GH1401: https://github.com/pydata/pandas/issues/1401 -.. _GH1486: https://github.com/pydata/pandas/issues/1486 -.. _GH1375: https://github.com/pydata/pandas/issues/1375 -.. _GH1361: https://github.com/pydata/pandas/issues/1361 -.. _GH1493: https://github.com/pydata/pandas/issues/1493 -.. _GH1483: https://github.com/pydata/pandas/issues/1483 -.. _GH1513: https://github.com/pydata/pandas/issues/1513 -.. _GH1533: https://github.com/pydata/pandas/issues/1533 -.. _GH1547: https://github.com/pydata/pandas/issues/1547 -.. _GH85: https://github.com/pydata/pandas/issues/85 - - -pandas 0.7.3 -============ - -**Release date:** April 12, 2012 - -**New features / modules** - - - Support for non-unique indexes: indexing and selection, many-to-one and - many-to-many joins (GH1306_) - - Added fixed-width file reader, read_fwf (GH952_) - - Add group_keys argument to groupby to not add group names to MultiIndex in - result of apply (GH938_) - - DataFrame can now accept non-integer label slicing (GH946_). Previously - only DataFrame.ix was able to do so. - - DataFrame.apply now retains name attributes on Series objects (GH983_) - - Numeric DataFrame comparisons with non-numeric values now raises proper - TypeError (GH943_). Previously raise "PandasError: DataFrame constructor - not properly called!" - - Add ``kurt`` methods to Series and DataFrame (GH964_) - - Can pass dict of column -> list/set NA values for text parsers (GH754_) - - Allows users specified NA values in text parsers (GH754_) - - Parsers checks for openpyxl dependency and raises ImportError if not found - (GH1007_) - - New factory function to create HDFStore objects that can be used in a with - statement so users do not have to explicitly call HDFStore.close (GH1005_) - - pivot_table is now more flexible with same parameters as groupby (GH941_) - - Added stacked bar plots (GH987_) - - scatter_matrix method in pandas/tools/plotting.py (GH935_) - - DataFrame.boxplot returns plot results for ex-post styling (GH985_) - - Short version number accessible as pandas.version.short_version (GH930_) - - Additional documentation in panel.to_frame (GH942_) - - More informative Series.apply docstring regarding element-wise apply - (GH977_) - - Notes on rpy2 installation (GH1006_) - - Add rotation and font size options to hist method (GH1012_) - - Use exogenous / X variable index in result of OLS.y_predict. Add - OLS.predict method (GH1027_, GH1008_) - -**API Changes** - - - Calling apply on grouped Series, e.g. describe(), will no longer yield - DataFrame by default. Will have to call unstack() to get prior behavior - - NA handling in non-numeric comparisons has been tightened up (GH933_, GH953_) - - No longer assign dummy names key_0, key_1, etc. to groupby index (GH1291_) - -**Bug fixes** - - - Fix logic error when selecting part of a row in a DataFrame with a - MultiIndex index (GH1013_) - - Series comparison with Series of differing length causes crash (GH1016_). - - Fix bug in indexing when selecting section of hierarchically-indexed row - (GH1013_) - - DataFrame.plot(logy=True) has no effect (GH1011_). - - Broken arithmetic operations between SparsePanel-Panel (GH1015_) - - Unicode repr issues in MultiIndex with non-ascii characters (GH1010_) - - DataFrame.lookup() returns inconsistent results if exact match not present - (GH1001_) - - DataFrame arithmetic operations not treating None as NA (GH992_) - - DataFrameGroupBy.apply returns incorrect result (GH991_) - - Series.reshape returns incorrect result for multiple dimensions (GH989_) - - Series.std and Series.var ignores ddof parameter (GH934_) - - DataFrame.append loses index names (GH980_) - - DataFrame.plot(kind='bar') ignores color argument (GH958_) - - Inconsistent Index comparison results (GH948_) - - Improper int dtype DataFrame construction from data with NaN (GH846_) - - Removes default 'result' name in grouby results (GH995_) - - DataFrame.from_records no longer mutate input columns (GH975_) - - Use Index name when grouping by it (GH1313_) - -.. _GH1306: https://github.com/pydata/pandas/issues/1306 -.. _GH952: https://github.com/pydata/pandas/issues/952 -.. _GH938: https://github.com/pydata/pandas/issues/938 -.. _GH946: https://github.com/pydata/pandas/issues/946 -.. _GH983: https://github.com/pydata/pandas/issues/983 -.. _GH943: https://github.com/pydata/pandas/issues/943 -.. _GH964: https://github.com/pydata/pandas/issues/964 -.. _GH754: https://github.com/pydata/pandas/issues/754 -.. _GH1007: https://github.com/pydata/pandas/issues/1007 -.. _GH1005: https://github.com/pydata/pandas/issues/1005 -.. _GH941: https://github.com/pydata/pandas/issues/941 -.. _GH987: https://github.com/pydata/pandas/issues/987 -.. _GH935: https://github.com/pydata/pandas/issues/935 -.. _GH985: https://github.com/pydata/pandas/issues/985 -.. _GH930: https://github.com/pydata/pandas/issues/930 -.. _GH942: https://github.com/pydata/pandas/issues/942 -.. _GH977: https://github.com/pydata/pandas/issues/977 -.. _GH1006: https://github.com/pydata/pandas/issues/1006 -.. _GH1012: https://github.com/pydata/pandas/issues/1012 -.. _GH1027: https://github.com/pydata/pandas/issues/1027 -.. _GH1008: https://github.com/pydata/pandas/issues/1008 -.. _GH933: https://github.com/pydata/pandas/issues/933 -.. _GH953: https://github.com/pydata/pandas/issues/953 -.. _GH1291: https://github.com/pydata/pandas/issues/1291 -.. _GH1013: https://github.com/pydata/pandas/issues/1013 -.. _GH1016: https://github.com/pydata/pandas/issues/1016 -.. _GH1011: https://github.com/pydata/pandas/issues/1011 -.. _GH1015: https://github.com/pydata/pandas/issues/1015 -.. _GH1010: https://github.com/pydata/pandas/issues/1010 -.. _GH1001: https://github.com/pydata/pandas/issues/1001 -.. _GH992: https://github.com/pydata/pandas/issues/992 -.. _GH991: https://github.com/pydata/pandas/issues/991 -.. _GH989: https://github.com/pydata/pandas/issues/989 -.. _GH934: https://github.com/pydata/pandas/issues/934 -.. _GH980: https://github.com/pydata/pandas/issues/980 -.. _GH958: https://github.com/pydata/pandas/issues/958 -.. _GH948: https://github.com/pydata/pandas/issues/948 -.. _GH846: https://github.com/pydata/pandas/issues/846 -.. _GH995: https://github.com/pydata/pandas/issues/995 -.. _GH975: https://github.com/pydata/pandas/issues/975 -.. _GH1313: https://github.com/pydata/pandas/issues/1313 - - -pandas 0.7.2 -============ - -**Release date:** March 16, 2012 - -**New features / modules** - - - Add additional tie-breaking methods in DataFrame.rank (GH874_) - - Add ascending parameter to rank in Series, DataFrame (GH875_) - - Add sort_columns parameter to allow unsorted plots (GH918_) - - IPython tab completion on GroupBy objects - -**API Changes** - - - Series.sum returns 0 instead of NA when called on an empty - series. Analogously for a DataFrame whose rows or columns are length 0 - (GH844_) - -**Improvements to existing features** - - - Don't use groups dict in Grouper.size (GH860_) - - Use khash for Series.value_counts, add raw function to algorithms.py (GH861_) - - Enable column access via attributes on GroupBy (GH882_) - - Enable setting existing columns (only) via attributes on DataFrame, Panel - (GH883_) - - Intercept __builtin__.sum in groupby (GH885_) - - Can pass dict to DataFrame.fillna to use different values per column (GH661_) - - Can select multiple hierarchical groups by passing list of values in .ix - (GH134_) - - Add level keyword to ``drop`` for dropping values from a level (GH159_) - - Add ``coerce_float`` option on DataFrame.from_records (GH893_) - - Raise exception if passed date_parser fails in ``read_csv`` - - Add ``axis`` option to DataFrame.fillna (GH174_) - - Fixes to Panel to make it easier to subclass (GH888_) - -**Bug fixes** - - - Fix overflow-related bugs in groupby (GH850_, GH851_) - - Fix unhelpful error message in parsers (GH856_) - - Better err msg for failed boolean slicing of dataframe (GH859_) - - Series.count cannot accept a string (level name) in the level argument (GH869_) - - Group index platform int check (GH870_) - - concat on axis=1 and ignore_index=True raises TypeError (GH871_) - - Further unicode handling issues resolved (GH795_) - - Fix failure in multiindex-based access in Panel (GH880_) - - Fix DataFrame boolean slice assignment failure (GH881_) - - Fix combineAdd NotImplementedError for SparseDataFrame (GH887_) - - Fix DataFrame.to_html encoding and columns (GH890_, GH891_, GH909_) - - Fix na-filling handling in mixed-type DataFrame (GH910_) - - Fix to DataFrame.set_value with non-existant row/col (GH911_) - - Fix malformed block in groupby when excluding nuisance columns (GH916_) - - Fix inconsistant NA handling in dtype=object arrays (GH925_) - - Fix missing center-of-mass computation in ewmcov (GH862_) - - Don't raise exception when opening read-only HDF5 file (GH847_) - - Fix possible out-of-bounds memory access in 0-length Series (GH917_) - -.. _GH874: https://github.com/pydata/pandas/issues/874 -.. _GH875: https://github.com/pydata/pandas/issues/875 -.. _GH893: https://github.com/pydata/pandas/issues/893 -.. _GH918: https://github.com/pydata/pandas/issues/918 -.. _GH844: https://github.com/pydata/pandas/issues/844 -.. _GH860: https://github.com/pydata/pandas/issues/860 -.. _GH861: https://github.com/pydata/pandas/issues/861 -.. _GH882: https://github.com/pydata/pandas/issues/882 -.. _GH883: https://github.com/pydata/pandas/issues/883 -.. _GH885: https://github.com/pydata/pandas/issues/885 -.. _GH661: https://github.com/pydata/pandas/issues/661 -.. _GH134: https://github.com/pydata/pandas/issues/134 -.. _GH159: https://github.com/pydata/pandas/issues/159 -.. _GH174: https://github.com/pydata/pandas/issues/174 -.. _GH888: https://github.com/pydata/pandas/issues/888 -.. _GH850: https://github.com/pydata/pandas/issues/850 -.. _GH851: https://github.com/pydata/pandas/issues/851 -.. _GH856: https://github.com/pydata/pandas/issues/856 -.. _GH859: https://github.com/pydata/pandas/issues/859 -.. _GH869: https://github.com/pydata/pandas/issues/869 -.. _GH870: https://github.com/pydata/pandas/issues/870 -.. _GH871: https://github.com/pydata/pandas/issues/871 -.. _GH795: https://github.com/pydata/pandas/issues/795 -.. _GH880: https://github.com/pydata/pandas/issues/880 -.. _GH881: https://github.com/pydata/pandas/issues/881 -.. _GH887: https://github.com/pydata/pandas/issues/887 -.. _GH890: https://github.com/pydata/pandas/issues/890 -.. _GH891: https://github.com/pydata/pandas/issues/891 -.. _GH909: https://github.com/pydata/pandas/issues/909 -.. _GH910: https://github.com/pydata/pandas/issues/910 -.. _GH911: https://github.com/pydata/pandas/issues/911 -.. _GH916: https://github.com/pydata/pandas/issues/916 -.. _GH925: https://github.com/pydata/pandas/issues/925 -.. _GH862: https://github.com/pydata/pandas/issues/862 -.. _GH847: https://github.com/pydata/pandas/issues/847 -.. _GH917: https://github.com/pydata/pandas/issues/917 - - -pandas 0.7.1 -============ - -**Release date:** February 29, 2012 - -**New features / modules** - - - Add ``to_clipboard`` function to pandas namespace for writing objects to - the system clipboard (GH774_) - - Add ``itertuples`` method to DataFrame for iterating through the rows of a - dataframe as tuples (GH818_) - - Add ability to pass fill_value and method to DataFrame and Series align - method (GH806_, GH807_) - - Add fill_value option to reindex, align methods (GH784_) - - Enable concat to produce DataFrame from Series (GH787_) - - Add ``between`` method to Series (GH802_) - - Add HTML representation hook to DataFrame for the IPython HTML notebook - (GH773_) - - Support for reading Excel 2007 XML documents using openpyxl - -**Improvements to existing features** - - - Improve performance and memory usage of fillna on DataFrame - - Can concatenate a list of Series along axis=1 to obtain a DataFrame (GH787_) - -**Bug fixes** - - - Fix memory leak when inserting large number of columns into a single - DataFrame (GH790_) - - Appending length-0 DataFrame with new columns would not result in those new - columns being part of the resulting concatenated DataFrame (GH782_) - - Fixed groupby corner case when passing dictionary grouper and as_index is - False (GH819_) - - Fixed bug whereby bool array sometimes had object dtype (GH820_) - - Fix exception thrown on np.diff (GH816_) - - Fix to_records where columns are non-strings (GH822_) - - Fix Index.intersection where indices have incomparable types (GH811_) - - Fix ExcelFile throwing an exception for two-line file (GH837_) - - Add clearer error message in csv parser (GH835_) - - Fix loss of fractional seconds in HDFStore (GH513_) - - Fix DataFrame join where columns have datetimes (GH787_) - - Work around numpy performance issue in take (GH817_) - - Improve comparison operations for NA-friendliness (GH801_) - - Fix indexing operation for floating point values (GH780_, GH798_) - - Fix groupby case resulting in malformed dataframe (GH814_) - - Fix behavior of reindex of Series dropping name (GH812_) - - Improve on redudant groupby computation (GH775_) - - Catch possible NA assignment to int/bool series with exception (GH839_) - -.. _GH774: https://github.com/pydata/pandas/issues/774 -.. _GH818: https://github.com/pydata/pandas/issues/818 -.. _GH806: https://github.com/pydata/pandas/issues/806 -.. _GH807: https://github.com/pydata/pandas/issues/807 -.. _GH784: https://github.com/pydata/pandas/issues/784 -.. _GH787: https://github.com/pydata/pandas/issues/787 -.. _GH802: https://github.com/pydata/pandas/issues/802 -.. _GH773: https://github.com/pydata/pandas/issues/773 -.. _GH790: https://github.com/pydata/pandas/issues/790 -.. _GH782: https://github.com/pydata/pandas/issues/782 -.. _GH819: https://github.com/pydata/pandas/issues/819 -.. _GH820: https://github.com/pydata/pandas/issues/820 -.. _GH816: https://github.com/pydata/pandas/issues/816 -.. _GH822: https://github.com/pydata/pandas/issues/822 -.. _GH811: https://github.com/pydata/pandas/issues/811 -.. _GH837: https://github.com/pydata/pandas/issues/837 -.. _GH835: https://github.com/pydata/pandas/issues/835 -.. _GH513: https://github.com/pydata/pandas/issues/513 -.. _GH817: https://github.com/pydata/pandas/issues/817 -.. _GH801: https://github.com/pydata/pandas/issues/801 -.. _GH780: https://github.com/pydata/pandas/issues/780 -.. _GH798: https://github.com/pydata/pandas/issues/798 -.. _GH814: https://github.com/pydata/pandas/issues/814 -.. _GH812: https://github.com/pydata/pandas/issues/812 -.. _GH775: https://github.com/pydata/pandas/issues/775 -.. _GH839: https://github.com/pydata/pandas/issues/839 - - -pandas 0.7.0 -============ - -**Release date:** 2/9/2012 - -**New features / modules** - - - New ``merge`` function for efficiently performing full gamut of database / - relational-algebra operations. Refactored existing join methods to use the - new infrastructure, resulting in substantial performance gains (GH220_, - GH249_, GH267_) - - New ``concat`` function for concatenating DataFrame or Panel objects along - an axis. Can form union or intersection of the other axes. Improves - performance of ``DataFrame.append`` (GH468_, GH479_, GH273_) - - Handle differently-indexed output values in ``DataFrame.apply`` (GH498_) - - Can pass list of dicts (e.g., a list of shallow JSON objects) to DataFrame - constructor (GH526_) - - Add ``reorder_levels`` method to Series and DataFrame (GH534_) - - Add dict-like ``get`` function to DataFrame and Panel (GH521_) - - ``DataFrame.iterrows`` method for efficiently iterating through the rows of - a DataFrame - - Added ``DataFrame.to_panel`` with code adapted from ``LongPanel.to_long`` - - ``reindex_axis`` method added to DataFrame - - Add ``level`` option to binary arithmetic functions on ``DataFrame`` and - ``Series`` - - Add ``level`` option to the ``reindex`` and ``align`` methods on Series and - DataFrame for broadcasting values across a level (GH542_, GH552_, others) - - Add attribute-based item access to ``Panel`` and add IPython completion (PR - GH554_) - - Add ``logy`` option to ``Series.plot`` for log-scaling on the Y axis - - Add ``index``, ``header``, and ``justify`` options to - ``DataFrame.to_string``. Add option to (GH570_, GH571_) - - Can pass multiple DataFrames to ``DataFrame.join`` to join on index (GH115_) - - Can pass multiple Panels to ``Panel.join`` (GH115_) - - Can pass multiple DataFrames to `DataFrame.append` to concatenate (stack) - and multiple Series to ``Series.append`` too - - Added ``justify`` argument to ``DataFrame.to_string`` to allow different - alignment of column headers - - Add ``sort`` option to GroupBy to allow disabling sorting of the group keys - for potential speedups (GH595_) - - Can pass MaskedArray to Series constructor (GH563_) - - Add Panel item access via attributes and IPython completion (GH554_) - - Implement ``DataFrame.lookup``, fancy-indexing analogue for retrieving - values given a sequence of row and column labels (GH338_) - - Add ``verbose`` option to ``read_csv`` and ``read_table`` to show number of - NA values inserted in non-numeric columns (GH614_) - - Can pass a list of dicts or Series to ``DataFrame.append`` to concatenate - multiple rows (GH464_) - - Add ``level`` argument to ``DataFrame.xs`` for selecting data from other - MultiIndex levels. Can take one or more levels with potentially a tuple of - keys for flexible retrieval of data (GH371_, GH629_) - - New ``crosstab`` function for easily computing frequency tables (GH170_) - - Can pass a list of functions to aggregate with groupby on a DataFrame, - yielding an aggregated result with hierarchical columns (GH166_) - - Add integer-indexing functions ``iget`` in Series and ``irow`` / ``iget`` - in DataFrame (GH628_) - - Add new ``Series.unique`` function, significantly faster than - ``numpy.unique`` (GH658_) - - Add new ``cummin`` and ``cummax`` instance methods to ``Series`` and - ``DataFrame`` (GH647_) - - Add new ``value_range`` function to return min/max of a dataframe (GH288_) - - Add ``drop`` parameter to ``reset_index`` method of ``DataFrame`` and added - method to ``Series`` as well (GH699_) - - Add ``isin`` method to Index objects, works just like ``Series.isin`` (GH - GH657_) - - Implement array interface on Panel so that ufuncs work (re: GH740_) - - Add ``sort`` option to ``DataFrame.join`` (GH731_) - - Improved handling of NAs (propagation) in binary operations with - dtype=object arrays (GH737_) - - Add ``abs`` method to Pandas objects - - Added ``algorithms`` module to start collecting central algos - -**API Changes** - - - Label-indexing with integer indexes now raises KeyError if a label is not - found instead of falling back on location-based indexing (GH700_) - - Label-based slicing via ``ix`` or ``[]`` on Series will now only work if - exact matches for the labels are found or if the index is monotonic (for - range selections) - - Label-based slicing and sequences of labels can be passed to ``[]`` on a - Series for both getting and setting (GH86_) - - `[]` operator (``__getitem__`` and ``__setitem__``) will raise KeyError - with integer indexes when an index is not contained in the index. The prior - behavior would fall back on position-based indexing if a key was not found - in the index which would lead to subtle bugs. This is now consistent with - the behavior of ``.ix`` on DataFrame and friends (GH328_) - - Rename ``DataFrame.delevel`` to ``DataFrame.reset_index`` and add - deprecation warning - - `Series.sort` (an in-place operation) called on a Series which is a view on - a larger array (e.g. a column in a DataFrame) will generate an Exception to - prevent accidentally modifying the data source (GH316_) - - Refactor to remove deprecated ``LongPanel`` class (GH552_) - - Deprecated ``Panel.to_long``, renamed to ``to_frame`` - - Deprecated ``colSpace`` argument in ``DataFrame.to_string``, renamed to - ``col_space`` - - Rename ``precision`` to ``accuracy`` in engineering float formatter (GH - GH395_) - - The default delimiter for ``read_csv`` is comma rather than letting - ``csv.Sniffer`` infer it - - Rename ``col_or_columns`` argument in ``DataFrame.drop_duplicates`` (GH - GH734_) - -**Improvements to existing features** - - - Better error message in DataFrame constructor when passed column labels - don't match data (GH497_) - - Substantially improve performance of multi-GroupBy aggregation when a - Python function is passed, reuse ndarray object in Cython (GH496_) - - Can store objects indexed by tuples and floats in HDFStore (GH492_) - - Don't print length by default in Series.to_string, add `length` option (GH - GH489_) - - Improve Cython code for multi-groupby to aggregate without having to sort - the data (GH93_) - - Improve MultiIndex reindexing speed by storing tuples in the MultiIndex, - test for backwards unpickling compatibility - - Improve column reindexing performance by using specialized Cython take - function - - Further performance tweaking of Series.__getitem__ for standard use cases - - Avoid Index dict creation in some cases (i.e. when getting slices, etc.), - regression from prior versions - - Friendlier error message in setup.py if NumPy not installed - - Use common set of NA-handling operations (sum, mean, etc.) in Panel class - also (GH536_) - - Default name assignment when calling ``reset_index`` on DataFrame with a - regular (non-hierarchical) index (GH476_) - - Use Cythonized groupers when possible in Series/DataFrame stat ops with - ``level`` parameter passed (GH545_) - - Ported skiplist data structure to C to speed up ``rolling_median`` by about - 5-10x in most typical use cases (GH374_) - - Some performance enhancements in constructing a Panel from a dict of - DataFrame objects - - Made ``Index._get_duplicates`` a public method by removing the underscore - - Prettier printing of floats, and column spacing fix (GH395_, GH571_) - - Add ``bold_rows`` option to DataFrame.to_html (GH586_) - - Improve the performance of ``DataFrame.sort_index`` by up to 5x or more - when sorting by multiple columns - - Substantially improve performance of DataFrame and Series constructors when - passed a nested dict or dict, respectively (GH540_, GH621_) - - Modified setup.py so that pip / setuptools will install dependencies (GH - GH507_, various pull requests) - - Unstack called on DataFrame with non-MultiIndex will return Series (GH - GH477_) - - Improve DataFrame.to_string and console formatting to be more consistent in - the number of displayed digits (GH395_) - - Use bottleneck if available for performing NaN-friendly statistical - operations that it implemented (GH91_) - - Monkey-patch context to traceback in ``DataFrame.apply`` to indicate which - row/column the function application failed on (GH614_) - - Improved ability of read_table and read_clipboard to parse - console-formatted DataFrames (can read the row of index names, etc.) - - Can pass list of group labels (without having to convert to an ndarray - yourself) to ``groupby`` in some cases (GH659_) - - Use ``kind`` argument to Series.order for selecting different sort kinds - (GH668_) - - Add option to Series.to_csv to omit the index (GH684_) - - Add ``delimiter`` as an alternative to ``sep`` in ``read_csv`` and other - parsing functions - - Substantially improved performance of groupby on DataFrames with many - columns by aggregating blocks of columns all at once (GH745_) - - Can pass a file handle or StringIO to Series/DataFrame.to_csv (GH765_) - - Can pass sequence of integers to DataFrame.irow(icol) and Series.iget, (GH - GH654_) - - Prototypes for some vectorized string functions - - Add float64 hash table to solve the Series.unique problem with NAs (GH714_) - - Memoize objects when reading from file to reduce memory footprint - - Can get and set a column of a DataFrame with hierarchical columns - containing "empty" ('') lower levels without passing the empty levels (PR - GH768_) - -**Bug fixes** - - - Raise exception in out-of-bounds indexing of Series instead of - seg-faulting, regression from earlier releases (GH495_) - - Fix error when joining DataFrames of different dtypes within the same - typeclass (e.g. float32 and float64) (GH486_) - - Fix bug in Series.min/Series.max on objects like datetime.datetime (GH - GH487_) - - Preserve index names in Index.union (GH501_) - - Fix bug in Index joining causing subclass information (like DateRange type) - to be lost in some cases (GH500_) - - Accept empty list as input to DataFrame constructor, regression from 0.6.0 - (GH491_) - - Can output DataFrame and Series with ndarray objects in a dtype=object - array (GH490_) - - Return empty string from Series.to_string when called on empty Series (GH - GH488_) - - Fix exception passing empty list to DataFrame.from_records - - Fix Index.format bug (excluding name field) with datetimes with time info - - Fix scalar value access in Series to always return NumPy scalars, - regression from prior versions (GH510_) - - Handle rows skipped at beginning of file in read_* functions (GH505_) - - Handle improper dtype casting in ``set_value`` methods - - Unary '-' / __neg__ operator on DataFrame was returning integer values - - Unbox 0-dim ndarrays from certain operators like all, any in Series - - Fix handling of missing columns (was combine_first-specific) in - DataFrame.combine for general case (GH529_) - - Fix type inference logic with boolean lists and arrays in DataFrame indexing - - Use centered sum of squares in R-square computation if entity_effects=True - in panel regression - - Handle all NA case in Series.{corr, cov}, was raising exception (GH548_) - - Aggregating by multiple levels with ``level`` argument to DataFrame, Series - stat method, was broken (GH545_) - - Fix Cython buf when converter passed to read_csv produced a numeric array - (buffer dtype mismatch when passed to Cython type inference function) (GH - GH546_) - - Fix exception when setting scalar value using .ix on a DataFrame with a - MultiIndex (GH551_) - - Fix outer join between two DateRanges with different offsets that returned - an invalid DateRange - - Cleanup DataFrame.from_records failure where index argument is an integer - - Fix Data.from_records failure when passed a dictionary - - Fix NA handling in {Series, DataFrame}.rank with non-floating point dtypes - - Fix bug related to integer type-checking in .ix-based indexing - - Handle non-string index name passed to DataFrame.from_records - - DataFrame.insert caused the columns name(s) field to be discarded (GH527_) - - Fix erroneous in monotonic many-to-one left joins - - Fix DataFrame.to_string to remove extra column white space (GH571_) - - Format floats to default to same number of digits (GH395_) - - Added decorator to copy docstring from one function to another (GH449_) - - Fix error in monotonic many-to-one left joins - - Fix __eq__ comparison between DateOffsets with different relativedelta - keywords passed - - Fix exception caused by parser converter returning strings (GH583_) - - Fix MultiIndex formatting bug with integer names (GH601_) - - Fix bug in handling of non-numeric aggregates in Series.groupby (GH612_) - - Fix TypeError with tuple subclasses (e.g. namedtuple) in - DataFrame.from_records (GH611_) - - Catch misreported console size when running IPython within Emacs - - Fix minor bug in pivot table margins, loss of index names and length-1 - 'All' tuple in row labels - - Add support for legacy WidePanel objects to be read from HDFStore - - Fix out-of-bounds segfault in pad_object and backfill_object methods when - either source or target array are empty - - Could not create a new column in a DataFrame from a list of tuples - - Fix bugs preventing SparseDataFrame and SparseSeries working with groupby - (GH666_) - - Use sort kind in Series.sort / argsort (GH668_) - - Fix DataFrame operations on non-scalar, non-pandas objects (GH672_) - - Don't convert DataFrame column to integer type when passing integer to - __setitem__ (GH669_) - - Fix downstream bug in pivot_table caused by integer level names in - MultiIndex (GH678_) - - Fix SparseSeries.combine_first when passed a dense Series (GH687_) - - Fix performance regression in HDFStore loading when DataFrame or Panel - stored in table format with datetimes - - Raise Exception in DateRange when offset with n=0 is passed (GH683_) - - Fix get/set inconsistency with .ix property and integer location but - non-integer index (GH707_) - - Use right dropna function for SparseSeries. Return dense Series for NA fill - value (GH730_) - - Fix Index.format bug causing incorrectly string-formatted Series with - datetime indexes (GH726_, GH758_) - - Fix errors caused by object dtype arrays passed to ols (GH759_) - - Fix error where column names lost when passing list of labels to - DataFrame.__getitem__, (GH662_) - - Fix error whereby top-level week iterator overwrote week instance - - Fix circular reference causing memory leak in sparse array / series / - frame, (GH663_) - - Fix integer-slicing from integers-as-floats (GH670_) - - Fix zero division errors in nanops from object dtype arrays in all NA case - (GH676_) - - Fix csv encoding when using unicode (GH705_, GH717_, GH738_) - - Fix assumption that each object contains every unique block type in concat, - (GH708_) - - Fix sortedness check of multiindex in to_panel (GH719_, 720) - - Fix that None was not treated as NA in PyObjectHashtable - - Fix hashing dtype because of endianness confusion (GH747_, GH748_) - - Fix SparseSeries.dropna to return dense Series in case of NA fill value (GH - GH730_) - - Use map_infer instead of np.vectorize. handle NA sentinels if converter - yields numeric array, (GH753_) - - Fixes and improvements to DataFrame.rank (GH742_) - - Fix catching AttributeError instead of NameError for bottleneck - - Try to cast non-MultiIndex to better dtype when calling reset_index (GH726_ - GH440_) - - Fix #1.QNAN0' float bug on 2.6/win64 - - Allow subclasses of dicts in DataFrame constructor, with tests - - Fix problem whereby set_index destroys column multiindex (GH764_) - - Hack around bug in generating DateRange from naive DateOffset (GH770_) - - Fix bug in DateRange.intersection causing incorrect results with some - overlapping ranges (GH771_) - -Thanks ------- -- Craig Austin -- Chris Billington -- Marius Cobzarenco -- Mario Gamboa-Cavazos -- Hans-Martin Gaudecker -- Arthur Gerigk -- Yaroslav Halchenko -- Jeff Hammerbacher -- Matt Harrison -- Andreas Hilboll -- Luc Kesters -- Adam Klein -- Gregg Lind -- Solomon Negusse -- Wouter Overmeire -- Christian Prinoth -- Jeff Reback -- Sam Reckoner -- Craig Reeson -- Jan Schulz -- Skipper Seabold -- Ted Square -- Graham Taylor -- Aman Thakral -- Chris Uga -- Dieter Vandenbussche -- Texas P. -- Pinxing Ye -- ... and everyone I forgot - -.. _GH220: https://github.com/pydata/pandas/issues/220 -.. _GH249: https://github.com/pydata/pandas/issues/249 -.. _GH267: https://github.com/pydata/pandas/issues/267 -.. _GH468: https://github.com/pydata/pandas/issues/468 -.. _GH479: https://github.com/pydata/pandas/issues/479 -.. _GH273: https://github.com/pydata/pandas/issues/273 -.. _GH498: https://github.com/pydata/pandas/issues/498 -.. _GH526: https://github.com/pydata/pandas/issues/526 -.. _GH534: https://github.com/pydata/pandas/issues/534 -.. _GH521: https://github.com/pydata/pandas/issues/521 -.. _GH542: https://github.com/pydata/pandas/issues/542 -.. _GH552: https://github.com/pydata/pandas/issues/552 -.. _GH554: https://github.com/pydata/pandas/issues/554 -.. _GH570: https://github.com/pydata/pandas/issues/570 -.. _GH571: https://github.com/pydata/pandas/issues/571 -.. _GH115: https://github.com/pydata/pandas/issues/115 -.. _GH595: https://github.com/pydata/pandas/issues/595 -.. _GH563: https://github.com/pydata/pandas/issues/563 -.. _GH338: https://github.com/pydata/pandas/issues/338 -.. _GH614: https://github.com/pydata/pandas/issues/614 -.. _GH464: https://github.com/pydata/pandas/issues/464 -.. _GH371: https://github.com/pydata/pandas/issues/371 -.. _GH629: https://github.com/pydata/pandas/issues/629 -.. _GH170: https://github.com/pydata/pandas/issues/170 -.. _GH166: https://github.com/pydata/pandas/issues/166 -.. _GH628: https://github.com/pydata/pandas/issues/628 -.. _GH658: https://github.com/pydata/pandas/issues/658 -.. _GH647: https://github.com/pydata/pandas/issues/647 -.. _GH288: https://github.com/pydata/pandas/issues/288 -.. _GH699: https://github.com/pydata/pandas/issues/699 -.. _GH657: https://github.com/pydata/pandas/issues/657 -.. _GH740: https://github.com/pydata/pandas/issues/740 -.. _GH731: https://github.com/pydata/pandas/issues/731 -.. _GH737: https://github.com/pydata/pandas/issues/737 -.. _GH700: https://github.com/pydata/pandas/issues/700 -.. _GH328: https://github.com/pydata/pandas/issues/328 -.. _GH316: https://github.com/pydata/pandas/issues/316 -.. _GH395: https://github.com/pydata/pandas/issues/395 -.. _GH734: https://github.com/pydata/pandas/issues/734 -.. _GH497: https://github.com/pydata/pandas/issues/497 -.. _GH496: https://github.com/pydata/pandas/issues/496 -.. _GH492: https://github.com/pydata/pandas/issues/492 -.. _GH489: https://github.com/pydata/pandas/issues/489 -.. _GH536: https://github.com/pydata/pandas/issues/536 -.. _GH476: https://github.com/pydata/pandas/issues/476 -.. _GH545: https://github.com/pydata/pandas/issues/545 -.. _GH374: https://github.com/pydata/pandas/issues/374 -.. _GH586: https://github.com/pydata/pandas/issues/586 -.. _GH540: https://github.com/pydata/pandas/issues/540 -.. _GH621: https://github.com/pydata/pandas/issues/621 -.. _GH507: https://github.com/pydata/pandas/issues/507 -.. _GH477: https://github.com/pydata/pandas/issues/477 -.. _GH659: https://github.com/pydata/pandas/issues/659 -.. _GH668: https://github.com/pydata/pandas/issues/668 -.. _GH684: https://github.com/pydata/pandas/issues/684 -.. _GH745: https://github.com/pydata/pandas/issues/745 -.. _GH765: https://github.com/pydata/pandas/issues/765 -.. _GH654: https://github.com/pydata/pandas/issues/654 -.. _GH714: https://github.com/pydata/pandas/issues/714 -.. _GH768: https://github.com/pydata/pandas/issues/768 -.. _GH495: https://github.com/pydata/pandas/issues/495 -.. _GH486: https://github.com/pydata/pandas/issues/486 -.. _GH487: https://github.com/pydata/pandas/issues/487 -.. _GH501: https://github.com/pydata/pandas/issues/501 -.. _GH500: https://github.com/pydata/pandas/issues/500 -.. _GH491: https://github.com/pydata/pandas/issues/491 -.. _GH490: https://github.com/pydata/pandas/issues/490 -.. _GH488: https://github.com/pydata/pandas/issues/488 -.. _GH510: https://github.com/pydata/pandas/issues/510 -.. _GH505: https://github.com/pydata/pandas/issues/505 -.. _GH529: https://github.com/pydata/pandas/issues/529 -.. _GH548: https://github.com/pydata/pandas/issues/548 -.. _GH546: https://github.com/pydata/pandas/issues/546 -.. _GH551: https://github.com/pydata/pandas/issues/551 -.. _GH527: https://github.com/pydata/pandas/issues/527 -.. _GH449: https://github.com/pydata/pandas/issues/449 -.. _GH583: https://github.com/pydata/pandas/issues/583 -.. _GH601: https://github.com/pydata/pandas/issues/601 -.. _GH612: https://github.com/pydata/pandas/issues/612 -.. _GH611: https://github.com/pydata/pandas/issues/611 -.. _GH666: https://github.com/pydata/pandas/issues/666 -.. _GH672: https://github.com/pydata/pandas/issues/672 -.. _GH669: https://github.com/pydata/pandas/issues/669 -.. _GH678: https://github.com/pydata/pandas/issues/678 -.. _GH687: https://github.com/pydata/pandas/issues/687 -.. _GH683: https://github.com/pydata/pandas/issues/683 -.. _GH707: https://github.com/pydata/pandas/issues/707 -.. _GH730: https://github.com/pydata/pandas/issues/730 -.. _GH759: https://github.com/pydata/pandas/issues/759 -.. _GH662: https://github.com/pydata/pandas/issues/662 -.. _GH663: https://github.com/pydata/pandas/issues/663 -.. _GH670: https://github.com/pydata/pandas/issues/670 -.. _GH676: https://github.com/pydata/pandas/issues/676 -.. _GH705: https://github.com/pydata/pandas/issues/705 -.. _GH717: https://github.com/pydata/pandas/issues/717 -.. _GH738: https://github.com/pydata/pandas/issues/738 -.. _GH708: https://github.com/pydata/pandas/issues/708 -.. _GH719: https://github.com/pydata/pandas/issues/719 -.. _GH747: https://github.com/pydata/pandas/issues/747 -.. _GH748: https://github.com/pydata/pandas/issues/748 -.. _GH753: https://github.com/pydata/pandas/issues/753 -.. _GH742: https://github.com/pydata/pandas/issues/742 -.. _GH726: https://github.com/pydata/pandas/issues/726 -.. _GH440: https://github.com/pydata/pandas/issues/440 -.. _GH764: https://github.com/pydata/pandas/issues/764 -.. _GH770: https://github.com/pydata/pandas/issues/770 -.. _GH771: https://github.com/pydata/pandas/issues/771 -.. _GH758: https://github.com/pydata/pandas/issues/758 -.. _GH86: https://github.com/pydata/pandas/issues/86 -.. _GH91: https://github.com/pydata/pandas/issues/91 -.. _GH93: https://github.com/pydata/pandas/issues/93 - - -pandas 0.6.1 -============ - -**Release date:** 12/13/2011 - -**API Changes** - - - Rename `names` argument in DataFrame.from_records to `columns`. Add - deprecation warning - - Boolean get/set operations on Series with boolean Series will reindex - instead of requiring that the indexes be exactly equal (GH429_) - -**New features / modules** - - - Can pass Series to DataFrame.append with ignore_index=True for appending a - single row (GH430_) - - Add Spearman and Kendall correlation options to Series.corr and - DataFrame.corr (GH428_) - - Add new `get_value` and `set_value` methods to Series, DataFrame, and Panel - to very low-overhead access to scalar elements. df.get_value(row, column) - is about 3x faster than df[column][row] by handling fewer cases (GH437_, - GH438_). Add similar methods to sparse data structures for compatibility - - Add Qt table widget to sandbox (GH435_) - - DataFrame.align can accept Series arguments, add axis keyword (GH461_) - - Implement new SparseList and SparseArray data structures. SparseSeries now - derives from SparseArray (GH463_) - - max_columns / max_rows options in set_printoptions (GH453_) - - Implement Series.rank and DataFrame.rank, fast versions of - scipy.stats.rankdata (GH428_) - - Implement DataFrame.from_items alternate constructor (GH444_) - - DataFrame.convert_objects method for inferring better dtypes for object - columns (GH302_) - - Add rolling_corr_pairwise function for computing Panel of correlation - matrices (GH189_) - - Add `margins` option to `pivot_table` for computing subgroup aggregates (GH - GH114_) - - Add `Series.from_csv` function (GH482_) - -**Improvements to existing features** - - - Improve memory usage of `DataFrame.describe` (do not copy data - unnecessarily) (GH425_) - - Use same formatting function for outputting floating point Series to console - as in DataFrame (GH420_) - - DataFrame.delevel will try to infer better dtype for new columns (GH440_) - - Exclude non-numeric types in DataFrame.{corr, cov} - - Override Index.astype to enable dtype casting (GH412_) - - Use same float formatting function for Series.__repr__ (GH420_) - - Use available console width to output DataFrame columns (GH453_) - - Accept ndarrays when setting items in Panel (GH452_) - - Infer console width when printing __repr__ of DataFrame to console (PR - GH453_) - - Optimize scalar value lookups in the general case by 25% or more in Series - and DataFrame - - Can pass DataFrame/DataFrame and DataFrame/Series to - rolling_corr/rolling_cov (GH462_) - - Fix performance regression in cross-sectional count in DataFrame, affecting - DataFrame.dropna speed - - Column deletion in DataFrame copies no data (computes views on blocks) (GH - GH158_) - - MultiIndex.get_level_values can take the level name - - More helpful error message when DataFrame.plot fails on one of the columns - (GH478_) - - Improve performance of DataFrame.{index, columns} attribute lookup - -**Bug fixes** - - - Fix O(K^2) memory leak caused by inserting many columns without - consolidating, had been present since 0.4.0 (GH467_) - - `DataFrame.count` should return Series with zero instead of NA with length-0 - axis (GH423_) - - Fix Yahoo! Finance API usage in pandas.io.data (GH419_, GH427_) - - Fix upstream bug causing failure in Series.align with empty Series (GH434_) - - Function passed to DataFrame.apply can return a list, as long as it's the - right length. Regression from 0.4 (GH432_) - - Don't "accidentally" upcast scalar values when indexing using .ix (GH431_) - - Fix groupby exception raised with as_index=False and single column selected - (GH421_) - - Implement DateOffset.__ne__ causing downstream bug (GH456_) - - Fix __doc__-related issue when converting py -> pyo with py2exe - - Bug fix in left join Cython code with duplicate monotonic labels - - Fix bug when unstacking multiple levels described in GH451_ - - Exclude NA values in dtype=object arrays, regression from 0.5.0 (GH469_) - - Use Cython map_infer function in DataFrame.applymap to properly infer - output type, handle tuple return values and other things that were breaking - (GH465_) - - Handle floating point index values in HDFStore (GH454_) - - Fixed stale column reference bug (cached Series object) caused by type - change / item deletion in DataFrame (GH473_) - - Index.get_loc should always raise Exception when there are duplicates - - Handle differently-indexed Series input to DataFrame constructor (GH475_) - - Omit nuisance columns in multi-groupby with Python function - - Buglet in handling of single grouping in general apply - - Handle type inference properly when passing list of lists or tuples to - DataFrame constructor (GH484_) - - Preserve Index / MultiIndex names in GroupBy.apply concatenation step (GH - GH481_) - -Thanks ------- -- Ralph Bean -- Luca Beltrame -- Marius Cobzarenco -- Andreas Hilboll -- Jev Kuznetsov -- Adam Lichtenstein -- Wouter Overmeire -- Fernando Perez -- Nathan Pinger -- Christian Prinoth -- Alex Reyfman -- Joon Ro -- Chang She -- Ted Square -- Chris Uga -- Dieter Vandenbussche - -.. _GH429: https://github.com/pydata/pandas/issues/429 -.. _GH430: https://github.com/pydata/pandas/issues/430 -.. _GH428: https://github.com/pydata/pandas/issues/428 -.. _GH437: https://github.com/pydata/pandas/issues/437 -.. _GH438: https://github.com/pydata/pandas/issues/438 -.. _GH435: https://github.com/pydata/pandas/issues/435 -.. _GH461: https://github.com/pydata/pandas/issues/461 -.. _GH463: https://github.com/pydata/pandas/issues/463 -.. _GH453: https://github.com/pydata/pandas/issues/453 -.. _GH444: https://github.com/pydata/pandas/issues/444 -.. _GH302: https://github.com/pydata/pandas/issues/302 -.. _GH189: https://github.com/pydata/pandas/issues/189 -.. _GH114: https://github.com/pydata/pandas/issues/114 -.. _GH482: https://github.com/pydata/pandas/issues/482 -.. _GH425: https://github.com/pydata/pandas/issues/425 -.. _GH420: https://github.com/pydata/pandas/issues/420 -.. _GH440: https://github.com/pydata/pandas/issues/440 -.. _GH412: https://github.com/pydata/pandas/issues/412 -.. _GH452: https://github.com/pydata/pandas/issues/452 -.. _GH462: https://github.com/pydata/pandas/issues/462 -.. _GH158: https://github.com/pydata/pandas/issues/158 -.. _GH478: https://github.com/pydata/pandas/issues/478 -.. _GH467: https://github.com/pydata/pandas/issues/467 -.. _GH423: https://github.com/pydata/pandas/issues/423 -.. _GH419: https://github.com/pydata/pandas/issues/419 -.. _GH427: https://github.com/pydata/pandas/issues/427 -.. _GH434: https://github.com/pydata/pandas/issues/434 -.. _GH432: https://github.com/pydata/pandas/issues/432 -.. _GH431: https://github.com/pydata/pandas/issues/431 -.. _GH421: https://github.com/pydata/pandas/issues/421 -.. _GH456: https://github.com/pydata/pandas/issues/456 -.. _GH451: https://github.com/pydata/pandas/issues/451 -.. _GH469: https://github.com/pydata/pandas/issues/469 -.. _GH465: https://github.com/pydata/pandas/issues/465 -.. _GH454: https://github.com/pydata/pandas/issues/454 -.. _GH473: https://github.com/pydata/pandas/issues/473 -.. _GH475: https://github.com/pydata/pandas/issues/475 -.. _GH484: https://github.com/pydata/pandas/issues/484 -.. _GH481: https://github.com/pydata/pandas/issues/481 - - -pandas 0.6.0 -============ - -**Release date:** 11/25/2011 - -**API Changes** - - - Arithmetic methods like `sum` will attempt to sum dtype=object values by - default instead of excluding them (GH382_) - -**New features / modules** - - - Add `melt` function to `pandas.core.reshape` - - Add `level` parameter to group by level in Series and DataFrame - descriptive statistics (GH313_) - - Add `head` and `tail` methods to Series, analogous to to DataFrame (PR - GH296_) - - Add `Series.isin` function which checks if each value is contained in a - passed sequence (GH289_) - - Add `float_format` option to `Series.to_string` - - Add `skip_footer` (GH291_) and `converters` (GH343_) options to - `read_csv` and `read_table` - - Add proper, tested weighted least squares to standard and panel OLS (GH - GH303_) - - Add `drop_duplicates` and `duplicated` functions for removing duplicate - DataFrame rows and checking for duplicate rows, respectively (GH319_) - - Implement logical (boolean) operators &, |, ^ on DataFrame (GH347_) - - Add `Series.mad`, mean absolute deviation, matching DataFrame - - Add `QuarterEnd` DateOffset (GH321_) - - Add matrix multiplication function `dot` to DataFrame (GH65_) - - Add `orient` option to `Panel.from_dict` to ease creation of mixed-type - Panels (GH359_, GH301_) - - Add `DataFrame.from_dict` with similar `orient` option - - Can now pass list of tuples or list of lists to `DataFrame.from_records` - for fast conversion to DataFrame (GH357_) - - Can pass multiple levels to groupby, e.g. `df.groupby(level=[0, 1])` (GH - GH103_) - - Can sort by multiple columns in `DataFrame.sort_index` (GH92_, GH362_) - - Add fast `get_value` and `put_value` methods to DataFrame and - micro-performance tweaks (GH360_) - - Add `cov` instance methods to Series and DataFrame (GH194_, GH362_) - - Add bar plot option to `DataFrame.plot` (GH348_) - - Add `idxmin` and `idxmax` functions to Series and DataFrame for computing - index labels achieving maximum and minimum values (GH286_) - - Add `read_clipboard` function for parsing DataFrame from OS clipboard, - should work across platforms (GH300_) - - Add `nunique` function to Series for counting unique elements (GH297_) - - DataFrame constructor will use Series name if no columns passed (GH373_) - - Support regular expressions and longer delimiters in read_table/read_csv, - but does not handle quoted strings yet (GH364_) - - Add `DataFrame.to_html` for formatting DataFrame to HTML (GH387_) - - MaskedArray can be passed to DataFrame constructor and masked values will be - converted to NaN (GH396_) - - Add `DataFrame.boxplot` function (GH368_, others) - - Can pass extra args, kwds to DataFrame.apply (GH376_) - -**Improvements to existing features** - - - Raise more helpful exception if date parsing fails in DateRange (GH298_) - - Vastly improved performance of GroupBy on axes with a MultiIndex (GH299_) - - Print level names in hierarchical index in Series repr (GH305_) - - Return DataFrame when performing GroupBy on selected column and - as_index=False (GH308_) - - Can pass vector to `on` argument in `DataFrame.join` (GH312_) - - Don't show Series name if it's None in the repr, also omit length for short - Series (GH317_) - - Show legend by default in `DataFrame.plot`, add `legend` boolean flag (GH - GH324_) - - Significantly improved performance of `Series.order`, which also makes - np.unique called on a Series faster (GH327_) - - Faster cythonized count by level in Series and DataFrame (GH341_) - - Raise exception if dateutil 2.0 installed on Python 2.x runtime (GH346_) - - Significant GroupBy performance enhancement with multiple keys with many - "empty" combinations - - New Cython vectorized function `map_infer` speeds up `Series.apply` and - `Series.map` significantly when passed elementwise Python function, - motivated by GH355_ - - Cythonized `cache_readonly`, resulting in substantial micro-performance - enhancements throughout the codebase (GH361_) - - Special Cython matrix iterator for applying arbitrary reduction operations - with 3-5x better performance than `np.apply_along_axis` (GH309_) - - Add `raw` option to `DataFrame.apply` for getting better performance when - the passed function only requires an ndarray (GH309_) - - Improve performance of `MultiIndex.from_tuples` - - Can pass multiple levels to `stack` and `unstack` (GH370_) - - Can pass multiple values columns to `pivot_table` (GH381_) - - Can call `DataFrame.delevel` with standard Index with name set (GH393_) - - Use Series name in GroupBy for result index (GH363_) - - Refactor Series/DataFrame stat methods to use common set of NaN-friendly - function - - Handle NumPy scalar integers at C level in Cython conversion routines - -**Bug fixes** - - - Fix bug in `DataFrame.to_csv` when writing a DataFrame with an index - name (GH290_) - - DataFrame should clear its Series caches on consolidation, was causing - "stale" Series to be returned in some corner cases (GH304_) - - DataFrame constructor failed if a column had a list of tuples (GH293_) - - Ensure that `Series.apply` always returns a Series and implement - `Series.round` (GH314_) - - Support boolean columns in Cythonized groupby functions (GH315_) - - `DataFrame.describe` should not fail if there are no numeric columns, - instead return categorical describe (GH323_) - - Fixed bug which could cause columns to be printed in wrong order in - `DataFrame.to_string` if specific list of columns passed (GH325_) - - Fix legend plotting failure if DataFrame columns are integers (GH326_) - - Shift start date back by one month for Yahoo! Finance API in pandas.io.data - (GH329_) - - Fix `DataFrame.join` failure on unconsolidated inputs (GH331_) - - DataFrame.min/max will no longer fail on mixed-type DataFrame (GH337_) - - Fix `read_csv` / `read_table` failure when passing list to index_col that is - not in ascending order (GH349_) - - Fix failure passing Int64Index to Index.union when both are monotonic - - Fix error when passing SparseSeries to (dense) DataFrame constructor - - Added missing bang at top of setup.py (GH352_) - - Change `is_monotonic` on MultiIndex so it properly compares the tuples - - Fix MultiIndex outer join logic (GH351_) - - Set index name attribute with single-key groupby (GH358_) - - Bug fix in reflexive binary addition in Series and DataFrame for - non-commutative operations (like string concatenation) (GH353_) - - setupegg.py will invoke Cython (GH192_) - - Fix block consolidation bug after inserting column into MultiIndex (GH366_) - - Fix bug in join operations between Index and Int64Index (GH367_) - - Handle min_periods=0 case in moving window functions (GH365_) - - Fixed corner cases in DataFrame.apply/pivot with empty DataFrame (GH378_) - - Fixed repr exception when Series name is a tuple - - Always return DateRange from `asfreq` (GH390_) - - Pass level names to `swaplavel` (GH379_) - - Don't lose index names in `MultiIndex.droplevel` (GH394_) - - Infer more proper return type in `DataFrame.apply` when no columns or rows - depending on whether the passed function is a reduction (GH389_) - - Always return NA/NaN from Series.min/max and DataFrame.min/max when all of a - row/column/values are NA (GH384_) - - Enable partial setting with .ix / advanced indexing (GH397_) - - Handle mixed-type DataFrames correctly in unstack, do not lose type - information (GH403_) - - Fix integer name formatting bug in Index.format and in Series.__repr__ - - Handle label types other than string passed to groupby (GH405_) - - Fix bug in .ix-based indexing with partial retrieval when a label is not - contained in a level - - Index name was not being pickled (GH408_) - - Level name should be passed to result index in GroupBy.apply (GH416_) - -Thanks ------- - -- Craig Austin -- Marius Cobzarenco -- Joel Cross -- Jeff Hammerbacher -- Adam Klein -- Thomas Kluyver -- Jev Kuznetsov -- Kieran O'Mahony -- Wouter Overmeire -- Nathan Pinger -- Christian Prinoth -- Skipper Seabold -- Chang She -- Ted Square -- Aman Thakral -- Chris Uga -- Dieter Vandenbussche -- carljv -- rsamson - -.. _GH382: https://github.com/pydata/pandas/issues/382 -.. _GH313: https://github.com/pydata/pandas/issues/313 -.. _GH296: https://github.com/pydata/pandas/issues/296 -.. _GH289: https://github.com/pydata/pandas/issues/289 -.. _GH291: https://github.com/pydata/pandas/issues/291 -.. _GH343: https://github.com/pydata/pandas/issues/343 -.. _GH303: https://github.com/pydata/pandas/issues/303 -.. _GH319: https://github.com/pydata/pandas/issues/319 -.. _GH347: https://github.com/pydata/pandas/issues/347 -.. _GH321: https://github.com/pydata/pandas/issues/321 -.. _GH359: https://github.com/pydata/pandas/issues/359 -.. _GH301: https://github.com/pydata/pandas/issues/301 -.. _GH357: https://github.com/pydata/pandas/issues/357 -.. _GH103: https://github.com/pydata/pandas/issues/103 -.. _GH362: https://github.com/pydata/pandas/issues/362 -.. _GH360: https://github.com/pydata/pandas/issues/360 -.. _GH194: https://github.com/pydata/pandas/issues/194 -.. _GH348: https://github.com/pydata/pandas/issues/348 -.. _GH286: https://github.com/pydata/pandas/issues/286 -.. _GH300: https://github.com/pydata/pandas/issues/300 -.. _GH297: https://github.com/pydata/pandas/issues/297 -.. _GH373: https://github.com/pydata/pandas/issues/373 -.. _GH364: https://github.com/pydata/pandas/issues/364 -.. _GH387: https://github.com/pydata/pandas/issues/387 -.. _GH396: https://github.com/pydata/pandas/issues/396 -.. _GH368: https://github.com/pydata/pandas/issues/368 -.. _GH376: https://github.com/pydata/pandas/issues/376 -.. _GH298: https://github.com/pydata/pandas/issues/298 -.. _GH299: https://github.com/pydata/pandas/issues/299 -.. _GH305: https://github.com/pydata/pandas/issues/305 -.. _GH308: https://github.com/pydata/pandas/issues/308 -.. _GH312: https://github.com/pydata/pandas/issues/312 -.. _GH317: https://github.com/pydata/pandas/issues/317 -.. _GH324: https://github.com/pydata/pandas/issues/324 -.. _GH327: https://github.com/pydata/pandas/issues/327 -.. _GH341: https://github.com/pydata/pandas/issues/341 -.. _GH346: https://github.com/pydata/pandas/issues/346 -.. _GH355: https://github.com/pydata/pandas/issues/355 -.. _GH361: https://github.com/pydata/pandas/issues/361 -.. _GH309: https://github.com/pydata/pandas/issues/309 -.. _GH370: https://github.com/pydata/pandas/issues/370 -.. _GH381: https://github.com/pydata/pandas/issues/381 -.. _GH393: https://github.com/pydata/pandas/issues/393 -.. _GH363: https://github.com/pydata/pandas/issues/363 -.. _GH290: https://github.com/pydata/pandas/issues/290 -.. _GH304: https://github.com/pydata/pandas/issues/304 -.. _GH293: https://github.com/pydata/pandas/issues/293 -.. _GH314: https://github.com/pydata/pandas/issues/314 -.. _GH315: https://github.com/pydata/pandas/issues/315 -.. _GH323: https://github.com/pydata/pandas/issues/323 -.. _GH325: https://github.com/pydata/pandas/issues/325 -.. _GH326: https://github.com/pydata/pandas/issues/326 -.. _GH329: https://github.com/pydata/pandas/issues/329 -.. _GH331: https://github.com/pydata/pandas/issues/331 -.. _GH337: https://github.com/pydata/pandas/issues/337 -.. _GH349: https://github.com/pydata/pandas/issues/349 -.. _GH352: https://github.com/pydata/pandas/issues/352 -.. _GH351: https://github.com/pydata/pandas/issues/351 -.. _GH358: https://github.com/pydata/pandas/issues/358 -.. _GH353: https://github.com/pydata/pandas/issues/353 -.. _GH192: https://github.com/pydata/pandas/issues/192 -.. _GH366: https://github.com/pydata/pandas/issues/366 -.. _GH367: https://github.com/pydata/pandas/issues/367 -.. _GH365: https://github.com/pydata/pandas/issues/365 -.. _GH378: https://github.com/pydata/pandas/issues/378 -.. _GH390: https://github.com/pydata/pandas/issues/390 -.. _GH379: https://github.com/pydata/pandas/issues/379 -.. _GH394: https://github.com/pydata/pandas/issues/394 -.. _GH389: https://github.com/pydata/pandas/issues/389 -.. _GH384: https://github.com/pydata/pandas/issues/384 -.. _GH397: https://github.com/pydata/pandas/issues/397 -.. _GH403: https://github.com/pydata/pandas/issues/403 -.. _GH405: https://github.com/pydata/pandas/issues/405 -.. _GH408: https://github.com/pydata/pandas/issues/408 -.. _GH416: https://github.com/pydata/pandas/issues/416 -.. _GH65: https://github.com/pydata/pandas/issues/65 -.. _GH92: https://github.com/pydata/pandas/issues/92 - - -pandas 0.5.0 -============ - -**Release date:** 10/24/2011 - -This release of pandas includes a number of API changes (see below) and cleanup -of deprecated APIs from pre-0.4.0 releases. There are also bug fixes, new -features, numerous significant performance enhancements, and includes a new -IPython completer hook to enable tab completion of DataFrame columns accesses -as attributes (a new feature). - -In addition to the changes listed here from 0.4.3 to 0.5.0, the minor releases -0.4.1, 0.4.2, and 0.4.3 brought some significant new functionality and -performance improvements that are worth taking a look at. - -Thanks to all for bug reports, contributed patches and generally providing -feedback on the library. - -**API Changes** - - - `read_table`, `read_csv`, and `ExcelFile.parse` default arguments for - `index_col` is now None. To use one or more of the columns as the resulting - DataFrame's index, these must be explicitly specified now - - Parsing functions like `read_csv` no longer parse dates by default (GH - GH225_) - - Removed `weights` option in panel regression which was not doing anything - principled (GH155_) - - Changed `buffer` argument name in `Series.to_string` to `buf` - - `Series.to_string` and `DataFrame.to_string` now return strings by default - instead of printing to sys.stdout - - Deprecated `nanRep` argument in various `to_string` and `to_csv` functions - in favor of `na_rep`. Will be removed in 0.6 (GH275_) - - Renamed `delimiter` to `sep` in `DataFrame.from_csv` for consistency - - Changed order of `Series.clip` arguments to match those of `numpy.clip` and - added (unimplemented) `out` argument so `numpy.clip` can be called on a - Series (GH272_) - - Series functions renamed (and thus deprecated) in 0.4 series have been - removed: - - * `asOf`, use `asof` - * `toDict`, use `to_dict` - * `toString`, use `to_string` - * `toCSV`, use `to_csv` - * `merge`, use `map` - * `applymap`, use `apply` - * `combineFirst`, use `combine_first` - * `_firstTimeWithValue` use `first_valid_index` - * `_lastTimeWithValue` use `last_valid_index` - - - DataFrame functions renamed / deprecated in 0.4 series have been removed: - - * `asMatrix` method, use `as_matrix` or `values` attribute - * `combineFirst`, use `combine_first` - * `getXS`, use `xs` - * `merge`, use `join` - * `fromRecords`, use `from_records` - * `fromcsv`, use `from_csv` - * `toRecords`, use `to_records` - * `toDict`, use `to_dict` - * `toString`, use `to_string` - * `toCSV`, use `to_csv` - * `_firstTimeWithValue` use `first_valid_index` - * `_lastTimeWithValue` use `last_valid_index` - * `toDataMatrix` is no longer needed - * `rows()` method, use `index` attribute - * `cols()` method, use `columns` attribute - * `dropEmptyRows()`, use `dropna(how='all')` - * `dropIncompleteRows()`, use `dropna()` - * `tapply(f)`, use `apply(f, axis=1)` - * `tgroupby(keyfunc, aggfunc)`, use `groupby` with `axis=1` - - - Other outstanding deprecations have been removed: - - * `indexField` argument in `DataFrame.from_records` - * `missingAtEnd` argument in `Series.order`. Use `na_last` instead - * `Series.fromValue` classmethod, use regular `Series` constructor instead - * Functions `parseCSV`, `parseText`, and `parseExcel` methods in - `pandas.io.parsers` have been removed - * `Index.asOfDate` function - * `Panel.getMinorXS` (use `minor_xs`) and `Panel.getMajorXS` (use - `major_xs`) - * `Panel.toWide`, use `Panel.to_wide` instead - -**New features / modules** - - - Added `DataFrame.align` method with standard join options - - Added `parse_dates` option to `read_csv` and `read_table` methods to - optionally try to parse dates in the index columns - - Add `nrows`, `chunksize`, and `iterator` arguments to `read_csv` and - `read_table`. The last two return a new `TextParser` class capable of - lazily iterating through chunks of a flat file (GH242_) - - Added ability to join on multiple columns in `DataFrame.join` (GH214_) - - Added private `_get_duplicates` function to `Index` for identifying - duplicate values more easily - - Added column attribute access to DataFrame, e.g. df.A equivalent to df['A'] - if 'A' is a column in the DataFrame (GH213_) - - Added IPython tab completion hook for DataFrame columns. (GH233_, GH230_) - - Implement `Series.describe` for Series containing objects (GH241_) - - Add inner join option to `DataFrame.join` when joining on key(s) (GH248_) - - Can select set of DataFrame columns by passing a list to `__getitem__` (GH - GH253_) - - Can use & and | to intersection / union Index objects, respectively (GH - GH261_) - - Added `pivot_table` convenience function to pandas namespace (GH234_) - - Implemented `Panel.rename_axis` function (GH243_) - - DataFrame will show index level names in console output - - Implemented `Panel.take` - - Add `set_eng_float_format` function for setting alternate DataFrame - floating point string formatting - - Add convenience `set_index` function for creating a DataFrame index from - its existing columns - -**Improvements to existing features** - - - Major performance improvements in file parsing functions `read_csv` and - `read_table` - - Added Cython function for converting tuples to ndarray very fast. Speeds up - many MultiIndex-related operations - - File parsing functions like `read_csv` and `read_table` will explicitly - check if a parsed index has duplicates and raise a more helpful exception - rather than deferring the check until later - - Refactored merging / joining code into a tidy class and disabled unnecessary - computations in the float/object case, thus getting about 10% better - performance (GH211_) - - Improved speed of `DataFrame.xs` on mixed-type DataFrame objects by about - 5x, regression from 0.3.0 (GH215_) - - With new `DataFrame.align` method, speeding up binary operations between - differently-indexed DataFrame objects by 10-25%. - - Significantly sped up conversion of nested dict into DataFrame (GH212_) - - Can pass hierarchical index level name to `groupby` instead of the level - number if desired (GH223_) - - Add support for different delimiters in `DataFrame.to_csv` (GH244_) - - Add more helpful error message when importing pandas post-installation from - the source directory (GH250_) - - Significantly speed up DataFrame `__repr__` and `count` on large mixed-type - DataFrame objects - - Better handling of pyx file dependencies in Cython module build (GH271_) - -**Bug fixes** - - - `read_csv` / `read_table` fixes - - Be less aggressive about converting float->int in cases of floating point - representations of integers like 1.0, 2.0, etc. - - "True"/"False" will not get correctly converted to boolean - - Index name attribute will get set when specifying an index column - - Passing column names should force `header=None` (GH257_) - - Don't modify passed column names when `index_col` is not - None (GH258_) - - Can sniff CSV separator in zip file (since seek is not supported, was - failing before) - - Worked around matplotlib "bug" in which series[:, np.newaxis] fails. Should - be reported upstream to matplotlib (GH224_) - - DataFrame.iteritems was not returning Series with the name attribute - set. Also neither was DataFrame._series - - Can store datetime.date objects in HDFStore (GH231_) - - Index and Series names are now stored in HDFStore - - Fixed problem in which data would get upcasted to object dtype in - GroupBy.apply operations (GH237_) - - Fixed outer join bug with empty DataFrame (GH238_) - - Can create empty Panel (GH239_) - - Fix join on single key when passing list with 1 entry (GH246_) - - Don't raise Exception on plotting DataFrame with an all-NA column (GH251_, - GH254_) - - Bug min/max errors when called on integer DataFrames (GH241_) - - `DataFrame.iteritems` and `DataFrame._series` not assigning name attribute - - Panel.__repr__ raised exception on length-0 major/minor axes - - `DataFrame.join` on key with empty DataFrame produced incorrect columns - - Implemented `MultiIndex.diff` (GH260_) - - `Int64Index.take` and `MultiIndex.take` lost name field, fix downstream - issue GH262_ - - Can pass list of tuples to `Series` (GH270_) - - Can pass level name to `DataFrame.stack` - - Support set operations between MultiIndex and Index - - Fix many corner cases in MultiIndex set operations - - Fix MultiIndex-handling bug with GroupBy.apply when returned groups are not - indexed the same - - Fix corner case bugs in DataFrame.apply - - Setting DataFrame index did not cause Series cache to get cleared - - Various int32 -> int64 platform-specific issues - - Don't be too aggressive converting to integer when parsing file with - MultiIndex (GH285_) - - Fix bug when slicing Series with negative indices before beginning - -Thanks ------- - -- Thomas Kluyver -- Daniel Fortunov -- Aman Thakral -- Luca Beltrame -- Wouter Overmeire - -.. _GH225: https://github.com/pydata/pandas/issues/225 -.. _GH155: https://github.com/pydata/pandas/issues/155 -.. _GH275: https://github.com/pydata/pandas/issues/275 -.. _GH272: https://github.com/pydata/pandas/issues/272 -.. _GH242: https://github.com/pydata/pandas/issues/242 -.. _GH214: https://github.com/pydata/pandas/issues/214 -.. _GH213: https://github.com/pydata/pandas/issues/213 -.. _GH233: https://github.com/pydata/pandas/issues/233 -.. _GH230: https://github.com/pydata/pandas/issues/230 -.. _GH241: https://github.com/pydata/pandas/issues/241 -.. _GH248: https://github.com/pydata/pandas/issues/248 -.. _GH253: https://github.com/pydata/pandas/issues/253 -.. _GH261: https://github.com/pydata/pandas/issues/261 -.. _GH234: https://github.com/pydata/pandas/issues/234 -.. _GH243: https://github.com/pydata/pandas/issues/243 -.. _GH211: https://github.com/pydata/pandas/issues/211 -.. _GH215: https://github.com/pydata/pandas/issues/215 -.. _GH212: https://github.com/pydata/pandas/issues/212 -.. _GH223: https://github.com/pydata/pandas/issues/223 -.. _GH244: https://github.com/pydata/pandas/issues/244 -.. _GH250: https://github.com/pydata/pandas/issues/250 -.. _GH271: https://github.com/pydata/pandas/issues/271 -.. _GH257: https://github.com/pydata/pandas/issues/257 -.. _GH258: https://github.com/pydata/pandas/issues/258 -.. _GH224: https://github.com/pydata/pandas/issues/224 -.. _GH231: https://github.com/pydata/pandas/issues/231 -.. _GH237: https://github.com/pydata/pandas/issues/237 -.. _GH238: https://github.com/pydata/pandas/issues/238 -.. _GH239: https://github.com/pydata/pandas/issues/239 -.. _GH246: https://github.com/pydata/pandas/issues/246 -.. _GH251: https://github.com/pydata/pandas/issues/251 -.. _GH254: https://github.com/pydata/pandas/issues/254 -.. _GH260: https://github.com/pydata/pandas/issues/260 -.. _GH262: https://github.com/pydata/pandas/issues/262 -.. _GH270: https://github.com/pydata/pandas/issues/270 -.. _GH285: https://github.com/pydata/pandas/issues/285 - - -pandas 0.4.3 -============ - -Release notes -------------- - -**Release date:** 10/9/2011 - -This is largely a bugfix release from 0.4.2 but also includes a handful of new -and enhanced features. Also, pandas can now be installed and used on Python 3 -(thanks Thomas Kluyver!). - -**New features / modules** - - - Python 3 support using 2to3 (GH200_, Thomas Kluyver) - - Add `name` attribute to `Series` and added relevant logic and tests. Name - now prints as part of `Series.__repr__` - - Add `name` attribute to standard Index so that stacking / unstacking does - not discard names and so that indexed DataFrame objects can be reliably - round-tripped to flat files, pickle, HDF5, etc. - - Add `isnull` and `notnull` as instance methods on Series (GH209_, GH203_) - -**Improvements to existing features** - - - Skip xlrd-related unit tests if not installed - - `Index.append` and `MultiIndex.append` can accept a list of Index objects to - concatenate together - - Altered binary operations on differently-indexed SparseSeries objects to use - the integer-based (dense) alignment logic which is faster with a larger - number of blocks (GH205_) - - Refactored `Series.__repr__` to be a bit more clean and consistent - -**API Changes** - - - `Series.describe` and `DataFrame.describe` now bring the 25% and 75% - quartiles instead of the 10% and 90% deciles. The other outputs have not - changed - - `Series.toString` will print deprecation warning, has been de-camelCased to - `to_string` - -**Bug fixes** - - - Fix broken interaction between `Index` and `Int64Index` when calling - intersection. Implement `Int64Index.intersection` - - `MultiIndex.sortlevel` discarded the level names (GH202_) - - Fix bugs in groupby, join, and append due to improper concatenation of - `MultiIndex` objects (GH201_) - - Fix regression from 0.4.1, `isnull` and `notnull` ceased to work on other - kinds of Python scalar objects like `datetime.datetime` - - Raise more helpful exception when attempting to write empty DataFrame or - LongPanel to `HDFStore` (GH204_) - - Use stdlib csv module to properly escape strings with commas in - `DataFrame.to_csv` (GH206_, Thomas Kluyver) - - Fix Python ndarray access in Cython code for sparse blocked index integrity - check - - Fix bug writing Series to CSV in Python 3 (GH209_) - - Miscellaneous Python 3 bugfixes - -Thanks ------- - - - Thomas Kluyver - - rsamson - -.. _GH200: https://github.com/pydata/pandas/issues/200 -.. _GH209: https://github.com/pydata/pandas/issues/209 -.. _GH203: https://github.com/pydata/pandas/issues/203 -.. _GH205: https://github.com/pydata/pandas/issues/205 -.. _GH202: https://github.com/pydata/pandas/issues/202 -.. _GH201: https://github.com/pydata/pandas/issues/201 -.. _GH204: https://github.com/pydata/pandas/issues/204 -.. _GH206: https://github.com/pydata/pandas/issues/206 - - -pandas 0.4.2 -============ - -Release notes -------------- - -**Release date:** 10/3/2011 - -This is a performance optimization release with several bug fixes. The new -Int64Index and new merging / joining Cython code and related Python -infrastructure are the main new additions - -**New features / modules** - - - Added fast `Int64Index` type with specialized join, union, - intersection. Will result in significant performance enhancements for - int64-based time series (e.g. using NumPy's datetime64 one day) and also - faster operations on DataFrame objects storing record array-like data. - - Refactored `Index` classes to have a `join` method and associated data - alignment routines throughout the codebase to be able to leverage optimized - joining / merging routines. - - Added `Series.align` method for aligning two series with choice of join - method - - Wrote faster Cython data alignment / merging routines resulting in - substantial speed increases - - Added `is_monotonic` property to `Index` classes with associated Cython - code to evaluate the monotonicity of the `Index` values - - Add method `get_level_values` to `MultiIndex` - - Implemented shallow copy of `BlockManager` object in `DataFrame` internals - -**Improvements to existing features** - - - Improved performance of `isnull` and `notnull`, a regression from v0.3.0 - (GH187_) - - Wrote templating / code generation script to auto-generate Cython code for - various functions which need to be available for the 4 major data types - used in pandas (float64, bool, object, int64) - - Refactored code related to `DataFrame.join` so that intermediate aligned - copies of the data in each `DataFrame` argument do not need to be - created. Substantial performance increases result (GH176_) - - Substantially improved performance of generic `Index.intersection` and - `Index.union` - - Improved performance of `DateRange.union` with overlapping ranges and - non-cacheable offsets (like Minute). Implemented analogous fast - `DateRange.intersection` for overlapping ranges. - - Implemented `BlockManager.take` resulting in significantly faster `take` - performance on mixed-type `DataFrame` objects (GH104_) - - Improved performance of `Series.sort_index` - - Significant groupby performance enhancement: removed unnecessary integrity - checks in DataFrame internals that were slowing down slicing operations to - retrieve groups - - Added informative Exception when passing dict to DataFrame groupby - aggregation with axis != 0 - -**API Changes** - -None - -**Bug fixes** - - - Fixed minor unhandled exception in Cython code implementing fast groupby - aggregation operations - - Fixed bug in unstacking code manifesting with more than 3 hierarchical - levels - - Throw exception when step specified in label-based slice (GH185_) - - Fix isnull to correctly work with np.float32. Fix upstream bug described in - GH182_ - - Finish implementation of as_index=False in groupby for DataFrame - aggregation (GH181_) - - Raise SkipTest for pre-epoch HDFStore failure. Real fix will be sorted out - via datetime64 dtype - -Thanks ------- - -- Uri Laserson -- Scott Sinclair - -.. _GH187: https://github.com/pydata/pandas/issues/187 -.. _GH176: https://github.com/pydata/pandas/issues/176 -.. _GH104: https://github.com/pydata/pandas/issues/104 -.. _GH185: https://github.com/pydata/pandas/issues/185 -.. _GH182: https://github.com/pydata/pandas/issues/182 -.. _GH181: https://github.com/pydata/pandas/issues/181 - - -pandas 0.4.1 -============ - -Release notes -------------- - -**Release date:** 9/25/2011 - -This is primarily a bug fix release but includes some new features and -improvements - -**New features / modules** - - - Added new `DataFrame` methods `get_dtype_counts` and property `dtypes` - - Setting of values using ``.ix`` indexing attribute in mixed-type DataFrame - objects has been implemented (fixes GH135_) - - `read_csv` can read multiple columns into a `MultiIndex`. DataFrame's - `to_csv` method will properly write out a `MultiIndex` which can be read - back (GH151_, thanks to Skipper Seabold) - - Wrote fast time series merging / joining methods in Cython. Will be - integrated later into DataFrame.join and related functions - - Added `ignore_index` option to `DataFrame.append` for combining unindexed - records stored in a DataFrame - -**Improvements to existing features** - - - Some speed enhancements with internal Index type-checking function - - `DataFrame.rename` has a new `copy` parameter which can rename a DataFrame - in place - - Enable unstacking by level name (GH142_) - - Enable sortlevel to work by level name (GH141_) - - `read_csv` can automatically "sniff" other kinds of delimiters using - `csv.Sniffer` (GH146_) - - Improved speed of unit test suite by about 40% - - Exception will not be raised calling `HDFStore.remove` on non-existent node - with where clause - - Optimized `_ensure_index` function resulting in performance savings in - type-checking Index objects - -**API Changes** - -None - -**Bug fixes** - - - Fixed DataFrame constructor bug causing downstream problems (e.g. .copy() - failing) when passing a Series as the values along with a column name and - index - - Fixed single-key groupby on DataFrame with as_index=False (GH160_) - - `Series.shift` was failing on integer Series (GH154_) - - `unstack` methods were producing incorrect output in the case of duplicate - hierarchical labels. An exception will now be raised (GH147_) - - Calling `count` with level argument caused reduceat failure or segfault in - earlier NumPy (GH169_) - - Fixed `DataFrame.corrwith` to automatically exclude non-numeric data (GH - GH144_) - - Unicode handling bug fixes in `DataFrame.to_string` (GH138_) - - Excluding OLS degenerate unit test case that was causing platform specific - failure (GH149_) - - Skip blosc-dependent unit tests for PyTables < 2.2 (GH137_) - - Calling `copy` on `DateRange` did not copy over attributes to the new object - (GH168_) - - Fix bug in `HDFStore` in which Panel data could be appended to a Table with - different item order, thus resulting in an incorrect result read back - -Thanks ------- -- Yaroslav Halchenko -- Jeff Reback -- Skipper Seabold -- Dan Lovell -- Nick Pentreath - -.. _GH135: https://github.com/pydata/pandas/issues/135 -.. _GH151: https://github.com/pydata/pandas/issues/151 -.. _GH142: https://github.com/pydata/pandas/issues/142 -.. _GH141: https://github.com/pydata/pandas/issues/141 -.. _GH146: https://github.com/pydata/pandas/issues/146 -.. _GH160: https://github.com/pydata/pandas/issues/160 -.. _GH154: https://github.com/pydata/pandas/issues/154 -.. _GH147: https://github.com/pydata/pandas/issues/147 -.. _GH169: https://github.com/pydata/pandas/issues/169 -.. _GH144: https://github.com/pydata/pandas/issues/144 -.. _GH138: https://github.com/pydata/pandas/issues/138 -.. _GH149: https://github.com/pydata/pandas/issues/149 -.. _GH137: https://github.com/pydata/pandas/issues/137 -.. _GH168: https://github.com/pydata/pandas/issues/168 - - -pandas 0.4.0 -============ - -Release notes -------------- - -**Release date:** 9/12/2011 - -**New features / modules** - - - `pandas.core.sparse` module: "Sparse" (mostly-NA, or some other fill value) - versions of `Series`, `DataFrame`, and `Panel`. For low-density data, this - will result in significant performance boosts, and smaller memory - footprint. Added `to_sparse` methods to `Series`, `DataFrame`, and - `Panel`. See online documentation for more on these - - Fancy indexing operator on Series / DataFrame, e.g. via .ix operator. Both - getting and setting of values is supported; however, setting values will only - currently work on homogeneously-typed DataFrame objects. Things like: - - * series.ix[[d1, d2, d3]] - * frame.ix[5:10, ['C', 'B', 'A']], frame.ix[5:10, 'A':'C'] - * frame.ix[date1:date2] - - - Significantly enhanced `groupby` functionality - - * Can groupby multiple keys, e.g. df.groupby(['key1', 'key2']). Iteration with - multiple groupings products a flattened tuple - * "Nuisance" columns (non-aggregatable) will automatically be excluded from - DataFrame aggregation operations - * Added automatic "dispatching to Series / DataFrame methods to more easily - invoke methods on groups. e.g. s.groupby(crit).std() will work even though - `std` is not implemented on the `GroupBy` class - - - Hierarchical / multi-level indexing - - * New the `MultiIndex` class. Integrated `MultiIndex` into `Series` and - `DataFrame` fancy indexing, slicing, __getitem__ and __setitem, - reindexing, etc. Added `level` keyword argument to `groupby` to enable - grouping by a level of a `MultiIndex` - - - New data reshaping functions: `stack` and `unstack` on DataFrame and Series - - * Integrate with MultiIndex to enable sophisticated reshaping of data - - - `Index` objects (labels for axes) are now capable of holding tuples - - `Series.describe`, `DataFrame.describe`: produces an R-like table of summary - statistics about each data column - - `DataFrame.quantile`, `Series.quantile` for computing sample quantiles of data - across requested axis - - Added general `DataFrame.dropna` method to replace `dropIncompleteRows` and - `dropEmptyRows`, deprecated those. - - `Series` arithmetic methods with optional fill_value for missing data, - e.g. a.add(b, fill_value=0). If a location is missing for both it will still - be missing in the result though. - - fill_value option has been added to `DataFrame`.{add, mul, sub, div} methods - similar to `Series` - - Boolean indexing with `DataFrame` objects: data[data > 0.1] = 0.1 or - data[data> other] = 1. - - `pytz` / tzinfo support in `DateRange` - - * `tz_localize`, `tz_normalize`, and `tz_validate` methods added - - - Added `ExcelFile` class to `pandas.io.parsers` for parsing multiple sheets out - of a single Excel 2003 document - - `GroupBy` aggregations can now optionally *broadcast*, e.g. produce an object - of the same size with the aggregated value propagated - - Added `select` function in all data structures: reindex axis based on - arbitrary criterion (function returning boolean value), - e.g. frame.select(lambda x: 'foo' in x, axis=1) - - `DataFrame.consolidate` method, API function relating to redesigned internals - - `DataFrame.insert` method for inserting column at a specified location rather - than the default __setitem__ behavior (which puts it at the end) - - `HDFStore` class in `pandas.io.pytables` has been largely rewritten using - patches from Jeff Reback from others. It now supports mixed-type `DataFrame` - and `Series` data and can store `Panel` objects. It also has the option to - query `DataFrame` and `Panel` data. Loading data from legacy `HDFStore` - files is supported explicitly in the code - - Added `set_printoptions` method to modify appearance of DataFrame tabular - output - - `rolling_quantile` functions; a moving version of `Series.quantile` / - `DataFrame.quantile` - - Generic `rolling_apply` moving window function - - New `drop` method added to `Series`, `DataFrame`, etc. which can drop a set of - labels from an axis, producing a new object - - `reindex` methods now sport a `copy` option so that data is not forced to be - copied then the resulting object is indexed the same - - Added `sort_index` methods to Series and Panel. Renamed `DataFrame.sort` - to `sort_index`. Leaving `DataFrame.sort` for now. - - Added ``skipna`` option to statistical instance methods on all the data - structures - - `pandas.io.data` module providing a consistent interface for reading time - series data from several different sources - -**Improvements to existing features** - - * The 2-dimensional `DataFrame` and `DataMatrix` classes have been extensively - redesigned internally into a single class `DataFrame`, preserving where - possible their optimal performance characteristics. This should reduce - confusion from users about which class to use. - - * Note that under the hood there is a new essentially "lazy evaluation" - scheme within respect to adding columns to DataFrame. During some - operations, like-typed blocks will be "consolidated" but not before. - - * `DataFrame` accessing columns repeatedly is now significantly faster than - `DataMatrix` used to be in 0.3.0 due to an internal Series caching mechanism - (which are all views on the underlying data) - * Column ordering for mixed type data is now completely consistent in - `DataFrame`. In prior releases, there was inconsistent column ordering in - `DataMatrix` - * Improved console / string formatting of DataMatrix with negative numbers - * Improved tabular data parsing functions, `read_table` and `read_csv`: - - * Added `skiprows` and `na_values` arguments to `pandas.io.parsers` functions - for more flexible IO - * `parseCSV` / `read_csv` functions and others in `pandas.io.parsers` now can - take a list of custom NA values, and also a list of rows to skip - - * Can slice `DataFrame` and get a view of the data (when homogeneously typed), - e.g. frame.xs(idx, copy=False) or frame.ix[idx] - * Many speed optimizations throughout `Series` and `DataFrame` - * Eager evaluation of groups when calling ``groupby`` functions, so if there is - an exception with the grouping function it will raised immediately versus - sometime later on when the groups are needed - * `datetools.WeekOfMonth` offset can be parameterized with `n` different than 1 - or -1. - * Statistical methods on DataFrame like `mean`, `std`, `var`, `skew` will now - ignore non-numerical data. Before a not very useful error message was - generated. A flag `numeric_only` has been added to `DataFrame.sum` and - `DataFrame.count` to enable this behavior in those methods if so desired - (disabled by default) - * `DataFrame.pivot` generalized to enable pivoting multiple columns into a - `DataFrame` with hierarchical columns - * `DataFrame` constructor can accept structured / record arrays - * `Panel` constructor can accept a dict of DataFrame-like objects. Do not - need to use `from_dict` anymore (`from_dict` is there to stay, though). - -**API Changes** - - * The `DataMatrix` variable now refers to `DataFrame`, will be removed within - two releases - * `WidePanel` is now known as `Panel`. The `WidePanel` variable in the pandas - namespace now refers to the renamed `Panel` class - * `LongPanel` and `Panel` / `WidePanel` now no longer have a common - subclass. `LongPanel` is now a subclass of `DataFrame` having a number of - additional methods and a hierarchical index instead of the old - `LongPanelIndex` object, which has been removed. Legacy `LongPanel` pickles - may not load properly - * Cython is now required to build `pandas` from a development branch. This was - done to avoid continuing to check in cythonized C files into source - control. Builds from released source distributions will not require Cython - * Cython code has been moved up to a top level `pandas/src` directory. Cython - extension modules have been renamed and promoted from the `lib` subpackage to - the top level, i.e. - - * `pandas.lib.tseries` -> `pandas._tseries` - * `pandas.lib.sparse` -> `pandas._sparse` - - * `DataFrame` pickling format has changed. Backwards compatibility for legacy - pickles is provided, but it's recommended to consider PyTables-based - `HDFStore` for storing data with a longer expected shelf life - * A `copy` argument has been added to the `DataFrame` constructor to avoid - unnecessary copying of data. Data is no longer copied by default when passed - into the constructor - * Handling of boolean dtype in `DataFrame` has been improved to support storage - of boolean data with NA / NaN values. Before it was being converted to float64 - so this should not (in theory) cause API breakage - * To optimize performance, Index objects now only check that their labels are - unique when uniqueness matters (i.e. when someone goes to perform a - lookup). This is a potentially dangerous tradeoff, but will lead to much - better performance in many places (like groupby). - * Boolean indexing using Series must now have the same indices (labels) - * Backwards compatibility support for begin/end/nPeriods keyword arguments in - DateRange class has been removed - * More intuitive / shorter filling aliases `ffill` (for `pad`) and `bfill` (for - `backfill`) have been added to the functions that use them: `reindex`, - `asfreq`, `fillna`. - * `pandas.core.mixins` code moved to `pandas.core.generic` - * `buffer` keyword arguments (e.g. `DataFrame.toString`) renamed to `buf` to - avoid using Python built-in name - * `DataFrame.rows()` removed (use `DataFrame.index`) - * Added deprecation warning to `DataFrame.cols()`, to be removed in next release - * `DataFrame` deprecations and de-camelCasing: `merge`, `asMatrix`, - `toDataMatrix`, `_firstTimeWithValue`, `_lastTimeWithValue`, `toRecords`, - `fromRecords`, `tgroupby`, `toString` - * `pandas.io.parsers` method deprecations - - * `parseCSV` is now `read_csv` and keyword arguments have been de-camelCased - * `parseText` is now `read_table` - * `parseExcel` is replaced by the `ExcelFile` class and its `parse` method - - * `fillMethod` arguments (deprecated in prior release) removed, should be - replaced with `method` - * `Series.fill`, `DataFrame.fill`, and `Panel.fill` removed, use `fillna` - instead - * `groupby` functions now exclude NA / NaN values from the list of groups. This - matches R behavior with NAs in factors e.g. with the `tapply` function - * Removed `parseText`, `parseCSV` and `parseExcel` from pandas namespace - * `Series.combineFunc` renamed to `Series.combine` and made a bit more general - with a `fill_value` keyword argument defaulting to NaN - * Removed `pandas.core.pytools` module. Code has been moved to - `pandas.core.common` - * Tacked on `groupName` attribute for groups in GroupBy renamed to `name` - * Panel/LongPanel `dims` attribute renamed to `shape` to be more conformant - * Slicing a `Series` returns a view now - * More Series deprecations / renaming: `toCSV` to `to_csv`, `asOf` to `asof`, - `merge` to `map`, `applymap` to `apply`, `toDict` to `to_dict`, - `combineFirst` to `combine_first`. Will print `FutureWarning`. - * `DataFrame.to_csv` does not write an "index" column label by default - anymore since the output file can be read back without it. However, there - is a new ``index_label`` argument. So you can do ``index_label='index'`` to - emulate the old behavior - * `datetools.Week` argument renamed from `dayOfWeek` to `weekday` - * `timeRule` argument in `shift` has been deprecated in favor of using the - `offset` argument for everything. So you can still pass a time rule string - to `offset` - * Added optional `encoding` argument to `read_csv`, `read_table`, `to_csv`, - `from_csv` to handle unicode in python 2.x - -**Bug fixes** - - * Column ordering in `pandas.io.parsers.parseCSV` will match CSV in the presence - of mixed-type data - * Fixed handling of Excel 2003 dates in `pandas.io.parsers` - * `DateRange` caching was happening with high resolution `DateOffset` objects, - e.g. `DateOffset(seconds=1)`. This has been fixed - * Fixed __truediv__ issue in `DataFrame` - * Fixed `DataFrame.toCSV` bug preventing IO round trips in some cases - * Fixed bug in `Series.plot` causing matplotlib to barf in exceptional cases - * Disabled `Index` objects from being hashable, like ndarrays - * Added `__ne__` implementation to `Index` so that operations like ts[ts != idx] - will work - * Added `__ne__` implementation to `DataFrame` - * Bug / unintuitive result when calling `fillna` on unordered labels - * Bug calling `sum` on boolean DataFrame - * Bug fix when creating a DataFrame from a dict with scalar values - * Series.{sum, mean, std, ...} now return NA/NaN when the whole Series is NA - * NumPy 1.4 through 1.6 compatibility fixes - * Fixed bug in bias correction in `rolling_cov`, was affecting `rolling_corr` - too - * R-square value was incorrect in the presence of fixed and time effects in - the `PanelOLS` classes - * `HDFStore` can handle duplicates in table format, will take - -Thanks ------- - - Joon Ro - - Michael Pennington - - Chris Uga - - Chris Withers - - Jeff Reback - - Ted Square - - Craig Austin - - William Ferreira - - Daniel Fortunov - - Tony Roberts - - Martin Felder - - John Marino - - Tim McNamara - - Justin Berka - - Dieter Vandenbussche - - Shane Conway - - Skipper Seabold - - Chris Jordan-Squire - -pandas 0.3.0 -============ - -Release notes -------------- - -**Release date:** February 20, 2011 - -**New features / modules** - - - `corrwith` function to compute column- or row-wise correlations between two - DataFrame objects - - Can boolean-index DataFrame objects, e.g. df[df > 2] = 2, px[px > last_px] = 0 - - Added comparison magic methods (__lt__, __gt__, etc.) - - Flexible explicit arithmetic methods (add, mul, sub, div, etc.) - - Added `reindex_like` method - - Added `reindex_like` method to WidePanel - - Convenience functions for accessing SQL-like databases in `pandas.io.sql` - module - - Added (still experimental) HDFStore class for storing pandas data - structures using HDF5 / PyTables in `pandas.io.pytables` module - - Added WeekOfMonth date offset - - `pandas.rpy` (experimental) module created, provide some interfacing / - conversion between rpy2 and pandas - -**Improvements** - - - Unit test coverage: 100% line coverage of core data structures - - Speed enhancement to rolling_{median, max, min} - - Column ordering between DataFrame and DataMatrix is now consistent: before - DataFrame would not respect column order - - Improved {Series, DataFrame}.plot methods to be more flexible (can pass - matplotlib Axis arguments, plot DataFrame columns in multiple subplots, - etc.) - -**API Changes** - - - Exponentially-weighted moment functions in `pandas.stats.moments` have a - more consistent API and accept a min_periods argument like their regular - moving counterparts. - - **fillMethod** argument in Series, DataFrame changed to **method**, - `FutureWarning` added. - - **fill** method in Series, DataFrame/DataMatrix, WidePanel renamed to - **fillna**, `FutureWarning` added to **fill** - - Renamed **DataFrame.getXS** to **xs**, `FutureWarning` added - - Removed **cap** and **floor** functions from DataFrame, renamed to - **clip_upper** and **clip_lower** for consistency with NumPy - -**Bug fixes** - - - Fixed bug in IndexableSkiplist Cython code that was breaking - rolling_max function - - Numerous numpy.int64-related indexing fixes - - Several NumPy 1.4.0 NaN-handling fixes - - Bug fixes to pandas.io.parsers.parseCSV - - Fixed `DateRange` caching issue with unusual date offsets - - Fixed bug in `DateRange.union` - - Fixed corner case in `IndexableSkiplist` implementation diff --git a/doc/source/conf.py b/doc/source/conf.py index 60218a1d2480b..99d1703b9ca34 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -243,8 +243,6 @@ # extlinks alias extlinks = {'issue': ('https://github.com/pydata/pandas/issues/%s', - 'issue '), - 'pull request': ('https://github.com/pydata/pandas/pulls/%s', - 'pull request '), - 'wiki': ('https://github.com/pydata/pandas/pulls/%s', + 'GH'), + 'wiki': ('https://github.com/pydata/pandas/wiki/%s', 'wiki ')} diff --git a/doc/source/index.rst b/doc/source/index.rst index 21a79ffdb85fd..3534cd2b577f4 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -105,7 +105,6 @@ See the package overview for more detail about what's in the library. .. toctree:: - :hidden: :maxdepth: 3 whatsnew @@ -132,4 +131,4 @@ See the package overview for more detail about what's in the library. related comparison_with_r api - + release diff --git a/doc/source/release.rst b/doc/source/release.rst new file mode 100644 index 0000000000000..fdff03217c050 --- /dev/null +++ b/doc/source/release.rst @@ -0,0 +1,3012 @@ +.. _release: + +============= +Release Notes +============= + +This is the list of changes to pandas between each release. For full details, +see the commit logs at http://github.com/pydata/pandas + +What is it +---------- + +pandas is a Python package providing fast, flexible, and expressive data +structures designed to make working with “relational” or “labeled” data both +easy and intuitive. It aims to be the fundamental high-level building block for +doing practical, real world data analysis in Python. Additionally, it has the +broader goal of becoming the most powerful and flexible open source data +analysis / manipulation tool available in any language. + +Where to get it +--------------- + +* Source code: http://github.com/pydata/pandas +* Binary installers on PyPI: http://pypi.python.org/pypi/pandas +* Documentation: http://pandas.pydata.org + +pandas 0.11.1 +============= + +**Release date:** not-yet-released + +**New features** + + - ``pd.read_html()`` can now parse HTML strings, files or urls and returns a + list of ``DataFrame`` s courtesy of @cpcloud. (:issue:`3477`, + :issue:`3605`, :issue:`3606`) + - Support for reading Amazon S3 files. (:issue:`3504`) + - Added module for reading and writing Stata files: pandas.io.stata (:issue:`1512`) + includes ``to_stata`` DataFrame method, and a ``read_stata`` top-level reader + - Added support for writing in ``to_csv`` and reading in ``read_csv``, + multi-index columns. The ``header`` option in ``read_csv`` now accepts a + list of the rows from which to read the index. Added the option, + ``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of + writing and reading multi-index columns via a list of tuples. The default in + 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a + multi-index column. + Note: The default value will change in 0.12 to make the default *to* write and + read multi-index columns in the new format. (:issue:`3571`, :issue:`1651`, :issue:`3141`) + - Add iterator to ``Series.str`` (:issue:`3638`) + - ``pd.set_option()`` now allows N option, value pairs (:issue:`3667`). + - Added keyword parameters for different types of scatter_matrix subplots + - A ``filter`` method on grouped Series or DataFrames returns a subset of + the original (:issue:`3680`, :issue:`919`) + - Access to historical Google Finance data in pandas.io.data (:issue:`3814`) + +**Improvements to existing features** + + - Fixed various issues with internal pprinting code, the repr() for various objects + including TimeStamp and Index now produces valid python code strings and + can be used to recreate the object, (:issue:`3038`, :issue:`3379`, :issue:`3251`, :issue:`3460`) + - ``convert_objects`` now accepts a ``copy`` parameter (defaults to ``True``) + - ``HDFStore`` + + - will retain index attributes (freq,tz,name) on recreation (:issue:`3499`) + - will warn with a ``AttributeConflictWarning`` if you are attempting to append + an index with a different frequency than the existing, or attempting + to append an index with a different name than the existing + - support datelike columns with a timezone as data_columns (:issue:`2852`) + - table writing performance improvements. + - support python3 (via ``PyTables 3.0.0``) (:issue:`3750`) + - Add modulo operator to Series, DataFrame + - Add ``date`` method to DatetimeIndex + - Simplified the API and added a describe method to Categorical + - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name`` + to specify custom column names of the returned DataFrame (:issue:`3649`), + thanks @hoechenberger + - clipboard functions use pyperclip (no dependencies on Windows, alternative + dependencies offered for Linux) (:issue:`3837`). + - Plotting functions now raise a ``TypeError`` before trying to plot anything + if the associated objects have have a dtype of ``object`` (:issue:`1818`, + :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to + numeric arrays if possible so that you can still plot, for example, an + object array with floats. This happens before any drawing takes place which + elimnates any spurious plots from showing up. + - Added Faq section on repr display options, to help users customize their setup. + - ``where`` operations that result in block splitting are much faster (:issue:`3733`) + - Series and DataFrame hist methods now take a ``figsize`` argument (:issue:`3834`) + - DatetimeIndexes no longer try to convert mixed-integer indexes during join + operations (:issue:`3877`) + - Add ``unit`` keyword to ``Timestamp`` and ``to_datetime`` to enable passing of + integers or floats that are in an epoch unit of ``s, ms, us, ns`` + (e.g. unix timestamps or epoch ``s``, with fracional seconds allowed) (:issue:`3540`) + - DataFrame corr method (spearman) is now cythonized. + +**API Changes** + + - ``HDFStore`` + + - When removing an object, ``remove(key)`` raises + ``KeyError`` if the key is not a valid store object. + - raise a ``TypeError`` on passing ``where`` or ``columns`` + to select with a Storer; these are invalid parameters at this time + - can now specify an ``encoding`` option to ``append/put`` + to enable alternate encodings (:issue:`3750`) + - enable support for ``iterator/chunksize`` with ``read_hdf`` + - The repr() for (Multi)Index now obeys display.max_seq_items rather + then numpy threshold print options. (:issue:`3426`, :issue:`3466`) + - Added mangle_dupe_cols option to read_table/csv, allowing users + to control legacy behaviour re dupe cols (A, A.1, A.2 vs A, A ) (:issue:`3468`) + Note: The default value will change in 0.12 to the "no mangle" behaviour, + If your code relies on this behaviour, explicitly specify mangle_dupe_cols=True + in your calls. + - Do not allow astypes on ``datetime64[ns]`` except to ``object``, and + ``timedelta64[ns]`` to ``object/int`` (:issue:`3425`) + - The behavior of ``datetime64`` dtypes has changed with respect to certain + so-called reduction operations (:issue:`3726`). The following operations now + raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty* + ``Series`` when performed on a ``DataFrame`` similar to performing these + operations on, for example, a ``DataFrame`` of ``slice`` objects: + - sum, prod, mean, std, var, skew, kurt, corr, and cov + - Do not allow datetimelike/timedeltalike creation except with valid types + (e.g. cannot pass ``datetime64[ms]``) (:issue:`3423`) + - Add ``squeeze`` keyword to ``groupby`` to allow reduction from + DataFrame -> Series if groups are unique. Regression from 0.10.1, + partial revert on (:issue:`2893`) with (:issue:`3596`) + - Raise on ``iloc`` when boolean indexing with a label based indexer mask + e.g. a boolean Series, even with integer labels, will raise. Since ``iloc`` + is purely positional based, the labels on the Series are not alignable (:issue:`3631`) + - The ``raise_on_error`` option to plotting methods is obviated by :issue:`3572`, + so it is removed. Plots now always raise when data cannot be plotted or the + object being plotted has a dtype of ``object``. + - ``DataFrame.interpolate()`` is now deprecated. Please use + ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead (:issue:`3582`, + :issue:`3675`, :issue:`3676`). + - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are + deprecated + - ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now + performs conversion by default. (:issue:`3907`) + - Deprecated display.height, display.width is now only a formatting option + does not control triggering of summary, similar to < 0.11.0. + - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column + to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (:issue:`3679`) + - io API changes + + - added ``pandas.io.api`` for i/o imports + - removed ``Excel`` support to ``pandas.io.excel`` + - added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods + - removed ``clipboard`` support to ``pandas.io.clipboard`` + - replace top-level and instance methods ``save`` and ``load`` with + top-level ``read_pickle`` and ``to_pickle`` instance method, ``save`` and + ``load`` will give deprecation warning. + - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are + deprecated + - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are + deprecated + - Implement ``__nonzero__`` for ``NDFrame`` objects (:issue:`3691`, :issue:`3696`) + - ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned + as an int, maxing with ``int64``, to avoid precision issues (:issue:`3733`) + - ``na_values`` in a list provided to ``read_csv/read_excel`` will match string and numeric versions + e.g. ``na_values=['99']`` will match 99 whether the column ends up being int, float, or string (:issue:`3611`) + - ``read_html`` now defaults to ``None`` when reading, and falls back on + ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try + until success is also valid + - more consistency in the to_datetime return types (give string/array of string inputs) (:issue:`3888`) + +**Bug Fixes** + + - Fixed an esoteric excel reading bug, xlrd>= 0.9.0 now required for excel + support. Should provide python3 support (for reading) which has been + lacking. (:issue:`3164`) + - Allow unioning of date ranges sharing a timezone (:issue:`3491`) + - Fix to_csv issue when having a large number of rows and ``NaT`` in some + columns (:issue:`3437`) + - ``.loc`` was not raising when passed an integer list (:issue:`3449`) + - Unordered time series selection was misbehaving when using label slicing (:issue:`3448`) + - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (:issue:`3461`) + - DataFrames fetched via FRED now handle '.' as a NaN. (:issue:`3469`) + - Fix regression in a DataFrame apply with axis=1, objects were not being converted back + to base dtypes correctly (:issue:`3480`) + - Fix issue when storing uint dtypes in an HDFStore. (:issue:`3493`) + - Non-unique index support clarified (:issue:`3468`) + + - Addressed handling of dupe columns in df.to_csv new and old (:issue:`3454`, :issue:`3457`) + - Fix assigning a new index to a duplicate index in a DataFrame would fail (:issue:`3468`) + - Fix construction of a DataFrame with a duplicate index + - ref_locs support to allow duplicative indices across dtypes, + allows iget support to always find the index (even across dtypes) (:issue:`2194`) + - applymap on a DataFrame with a non-unique index now works + (removed warning) (:issue:`2786`), and fix (:issue:`3230`) + - Fix to_csv to handle non-unique columns (:issue:`3495`) + - Duplicate indexes with getitem will return items in the correct order (:issue:`3455`, :issue:`3457`) + and handle missing elements like unique indices (:issue:`3561`) + - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (:issue:`3562`) + - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (:issue:`3602`) + - Non-unique indexing with a slice via ``loc`` and friends fixed (:issue:`3659`) + - Allow insert/delete to non-unique columns (:issue:`3679`) + - Extend ``reindex`` to correctly deal with non-unique indices (:issue:`3679`) + - ``DataFrame.itertuples()`` now works with frames with duplicate column + names (:issue:`3873`) + + - Fixed bug in groupby with empty series referencing a variable before assignment. (:issue:`3510`) + - Fixed bug in mixed-frame assignment with aligned series (:issue:`3492`) + - Fixed bug in selecting month/quarter/year from a series would not select the time element + on the last day (:issue:`3546`) + - Fixed a couple of MultiIndex rendering bugs in df.to_html() (:issue:`3547`, :issue:`3553`) + - Properly convert np.datetime64 objects in a Series (:issue:`3416`) + - Raise a ``TypeError`` on invalid datetime/timedelta operations + e.g. add datetimes, multiple timedelta x datetime + - Fix ``.diff`` on datelike and timedelta operations (:issue:`3100`) + - ``combine_first`` not returning the same dtype in cases where it can (:issue:`3552`) + - Fixed bug with ``Panel.transpose`` argument aliases (:issue:`3556`) + - Fixed platform bug in ``PeriodIndex.take`` (:issue:`3579`) + - Fixed bud in incorrect conversion of datetime64[ns] in ``combine_first`` (:issue:`3593`) + - Fixed bug in reset_index with ``NaN`` in a multi-index (:issue:`3586`) + - ``fillna`` methods now raise a ``TypeError`` when the ``value`` parameter + is a ``list`` or ``tuple``. + - Fixed bug where a time-series was being selected in preference to an actual column name + in a frame (:issue:`3594`) + - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return + ``np.nan`` or ``np.inf`` as appropriate (:issue:`3590`) + - Fix incorrect dtype on groupby with ``as_index=False`` (:issue:`3610`) + - Fix ``read_csv/read_excel`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]`` + was failing (:issue:`3611`) + - Disable HTML output in qtconsole again. (:issue:`3657`) + - Reworked the new repr display logic, which users found confusing. (:issue:`3663`) + - Fix indexing issue in ndim >= 3 with ``iloc`` (:issue:`3617`) + - Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv`` + when ``parse_dates`` is specified (:issue:`3062`) + - Fix not consolidating before to_csv (:issue:`3624`) + - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (:issue:`3626`) or + a mixed DataFrame and a Series (:issue:`3668`) + - Fix plotting of unordered DatetimeIndex (:issue:`3601`) + - ``sql.write_frame`` failing when writing a single column to sqlite (:issue:`3628`), + thanks to @stonebig + - Fix pivoting with ``nan`` in the index (:issue:`3558`) + - Fix running of bs4 tests when it is not installed (:issue:`3605`) + - Fix parsing of html table (:issue:`3606`) + - ``read_html()`` now only allows a single backend: ``html5lib`` (:issue:`3616`) + - ``convert_objects`` with ``convert_dates='coerce'`` was parsing some single-letter strings + into today's date + - ``DataFrame.from_records`` did not accept empty recarrays (:issue:`3682`) + - ``DataFrame.to_csv`` will succeed with the deprecated option ``nanRep``, @tdsmith + - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for + their first argument (:issue:`3702`) + - Fix file tokenization error with \r delimiter and quoted fields (:issue:`3453`) + - Groupby transform with item-by-item not upcasting correctly (:issue:`3740`) + - Incorrectly read a HDFStore multi-index Frame witha column specification (:issue:`3748`) + - ``read_html`` now correctly skips tests (:issue:`3741`) + - PandasObjects raise TypeError when trying to hash (:issue:`3882`) + - Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (:issue:`3481`) + - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) + in ``read_csv`` (:issue:`3795`) + - Fix index name not propogating when using ``loc/ix`` (:issue:`3880`) + - Fix groupby when applying a custom function resulting in a returned DataFrame was + not converting dtypes (:issue:`3911`) + - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression + in the ``to_replace`` argument wasn't working (:issue:`3907`) + - Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing + two integer arrays with at least 10000 cells total (:issue:`3764`) + - Indexing with a string with seconds resolution not selecting from a time index (:issue:`3925`) + +.. _Gh3616: https://github.com/pydata/pandas/issues/3616 + +pandas 0.11.0 +============= + +**Release date:** 2013-04-22 + +**New features** + + - New documentation section, ``10 Minutes to Pandas`` + - New documentation section, ``Cookbook`` + - Allow mixed dtypes (e.g ``float32/float64/int32/int16/int8``) to coexist in + DataFrames and propogate in operations + - Add function to pandas.io.data for retrieving stock index components from + Yahoo! finance (:issue:`2795`) + - Support slicing with time objects (:issue:`2681`) + - Added ``.iloc`` attribute, to support strict integer based indexing, + analogous to ``.ix`` (:issue:`2922`) + - Added ``.loc`` attribute, to support strict label based indexing, analagous + to ``.ix`` (:issue:`3053`) + - Added ``.iat`` attribute, to support fast scalar access via integers + (replaces ``iget_value/iset_value``) + - Added ``.at`` attribute, to support fast scalar access via labels (replaces + ``get_value/set_value``) + - Moved functionaility from ``irow,icol,iget_value/iset_value`` to ``.iloc`` indexer + (via ``_ixs`` methods in each object) + - Added support for expression evaluation using the ``numexpr`` library + - Added ``convert=boolean`` to ``take`` routines to translate negative + indices to positive, defaults to True + - Added to_series() method to indices, to facilitate the creation of indexeres + (:issue:`3275`) + +**Improvements to existing features** + + - Improved performance of df.to_csv() by up to 10x in some cases. (:issue:`3059`) + - added ``blocks`` attribute to DataFrames, to return a dict of dtypes to + homogeneously dtyped DataFrames + - added keyword ``convert_numeric`` to ``convert_objects()`` to try to + convert object dtypes to numeric types (default is False) + - ``convert_dates`` in ``convert_objects`` can now be ``coerce`` which will + return a datetime64[ns] dtype with non-convertibles set as ``NaT``; will + preserve an all-nan object (e.g. strings), default is True (to perform + soft-conversion + - Series print output now includes the dtype by default + - Optimize internal reindexing routines (:issue:`2819`, :issue:`2867`) + - ``describe_option()`` now reports the default and current value of options. + - Add ``format`` option to ``pandas.to_datetime`` with faster conversion of + strings that can be parsed with datetime.strptime + - Add ``axes`` property to ``Series`` for compatibility + - Add ``xs`` function to ``Series`` for compatibility + - Allow setitem in a frame where only mixed numerics are present (e.g. int + and float), (:issue:`3037`) + - ``HDFStore`` + + - Provide dotted attribute access to ``get`` from stores + (e.g. store.df == store['df']) + - New keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` + are provided to support iteration on ``select`` and + ``select_as_multiple`` (:issue:`3076`) + - support ``read_hdf/to_hdf`` API similar to ``read_csv/to_csv`` (:issue:`3222`) + + - Add ``squeeze`` method to possibly remove length 1 dimensions from an + object. + + .. ipython:: python + + p = Panel(randn(3,4,4),items=['ItemA','ItemB','ItemC'], + major_axis=date_range('20010102',periods=4), + minor_axis=['A','B','C','D']) + p + p.reindex(items=['ItemA']).squeeze() + p.reindex(items=['ItemA'],minor=['B']).squeeze() + + - Improvement to Yahoo API access in ``pd.io.data.Options`` (:issue:`2758`) + - added option `display.max_seq_items` to control the number of + elements printed per sequence pprinting it. (:issue:`2979`) + - added option `display.chop_threshold` to control display of small numerical + values. (:issue:`2739`) + - added option `display.max_info_rows` to prevent verbose_info from being + calculated for frames above 1M rows (configurable). (:issue:`2807`, :issue:`2918`) + - value_counts() now accepts a "normalize" argument, for normalized + histograms. (:issue:`2710`). + - DataFrame.from_records now accepts not only dicts but any instance of + the collections.Mapping ABC. + - Allow selection semantics via a string with a datelike index to work in both + Series and DataFrames (:issue:`3070`) + + .. ipython:: python + + idx = date_range("2001-10-1", periods=5, freq='M') + ts = Series(np.random.rand(len(idx)),index=idx) + ts['2001'] + + df = DataFrame(dict(A = ts)) + df['2001'] + + - added option `display.mpl_style` providing a sleeker visual style + for plots. Based on https://gist.github.com/huyng/816622 (:issue:`3075`). + + + - Improved performance across several core functions by taking memory + ordering of arrays into account. Courtesy of @stephenwlin (:issue:`3130`) + - Improved performance of groupby transform method (:issue:`2121`) + - Handle "ragged" CSV files missing trailing delimiters in rows with missing + fields when also providing explicit list of column names (so the parser + knows how many columns to expect in the result) (:issue:`2981`) + - On a mixed DataFrame, allow setting with indexers with ndarray/DataFrame + on rhs (:issue:`3216`) + - Treat boolean values as integers (values 1 and 0) for numeric + operations. (:issue:`2641`) + - Add ``time`` method to DatetimeIndex (:issue:`3180`) + - Return NA when using Series.str[...] for values that are not long enough + (:issue:`3223`) + - Display cursor coordinate information in time-series plots (:issue:`1670`) + - to_html() now accepts an optional "escape" argument to control reserved + HTML character escaping (enabled by default) and escapes ``&``, in addition + to ``<`` and ``>``. (:issue:`2919`) + +**API Changes** + + - Do not automatically upcast numeric specified dtypes to ``int64`` or + ``float64`` (:issue:`622` and :issue:`797`) + - DataFrame construction of lists and scalars, with no dtype present, will + result in casting to ``int64`` or ``float64``, regardless of platform. + This is not an apparent change in the API, but noting it. + - Guarantee that ``convert_objects()`` for Series/DataFrame always returns a + copy + - groupby operations will respect dtypes for numeric float operations + (float32/float64); other types will be operated on, and will try to cast + back to the input dtype (e.g. if an int is passed, as long as the output + doesn't have nans, then an int will be returned) + - backfill/pad/take/diff/ohlc will now support ``float32/int16/int8`` + operations + - Block types will upcast as needed in where/masking operations (:issue:`2793`) + - Series now automatically will try to set the correct dtype based on passed + datetimelike objects (datetime/Timestamp) + + - timedelta64 are returned in appropriate cases (e.g. Series - Series, + when both are datetime64) + - mixed datetimes and objects (:issue:`2751`) in a constructor will be cast + correctly + - astype on datetimes to object are now handled (as well as NaT + conversions to np.nan) + - all timedelta like objects will be correctly assigned to ``timedelta64`` + with mixed ``NaN`` and/or ``NaT`` allowed + + - arguments to DataFrame.clip were inconsistent to numpy and Series clipping + (:issue:`2747`) + - util.testing.assert_frame_equal now checks the column and index names (:issue:`2964`) + - Constructors will now return a more informative ValueError on failures + when invalid shapes are passed + - Don't suppress TypeError in GroupBy.agg (:issue:`3238`) + - Methods return None when inplace=True (:issue:`1893`) + - ``HDFStore`` + + - added the method ``select_column`` to select a single column from a table as a Series. + - deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` + - ``min_itemsize`` parameter will now automatically create data_columns for passed keys + + - Downcast on pivot if possible (:issue:`3283`), adds argument ``downcast`` to ``fillna`` + - Introduced options `display.height/width` for explicitly specifying terminal + height/width in characters. Deprecated display.line_width, now replaced by display.width. + These defaults are in effect for scripts as well, so unless disabled, previously + very wide output will now be output as "expand_repr" style wrapped output. + - Various defaults for options (including display.max_rows) have been revised, + after a brief survey concluded they were wrong for everyone. Now at w=80,h=60. + - HTML repr output in IPython qtconsole is once again controlled by the option + `display.notebook_repr_html`, and on by default. + +**Bug Fixes** + + - Fix seg fault on empty data frame when fillna with ``pad`` or ``backfill`` + (:issue:`2778`) + - Single element ndarrays of datetimelike objects are handled + (e.g. np.array(datetime(2001,1,1,0,0))), w/o dtype being passed + - 0-dim ndarrays with a passed dtype are handled correctly + (e.g. np.array(0.,dtype='float32')) + - Fix some boolean indexing inconsistencies in Series.__getitem__/__setitem__ + (:issue:`2776`) + - Fix issues with DataFrame and Series constructor with integers that + overflow ``int64`` and some mixed typed type lists (:issue:`2845`) + + - ``HDFStore`` + + - Fix weird PyTables error when using too many selectors in a where + also correctly filter on any number of values in a Term expression + (so not using numexpr filtering, but isin filtering) + - Internally, change all variables to be private-like (now have leading + underscore) + - Fixes for query parsing to correctly interpret boolean and != (:issue:`2849`, :issue:`2973`) + - Fixes for pathological case on SparseSeries with 0-len array and + compression (:issue:`2931`) + - Fixes bug with writing rows if part of a block was all-nan (:issue:`3012`) + - Exceptions are now ValueError or TypeError as needed + - A table will now raise if min_itemsize contains fields which are not queryables + + - Bug showing up in applymap where some object type columns are converted (:issue:`2909`) + had an incorrect default in convert_objects + + - TimeDeltas + + - Series ops with a Timestamp on the rhs was throwing an exception (:issue:`2898`) + added tests for Series ops with datetimes,timedeltas,Timestamps, and datelike + Series on both lhs and rhs + - Fixed subtle timedelta64 inference issue on py3 & numpy 1.7.0 (:issue:`3094`) + - Fixed some formatting issues on timedelta when negative + - Support null checking on timedelta64, representing (and formatting) with NaT + - Support setitem with np.nan value, converts to NaT + - Support min/max ops in a Dataframe (abs not working, nor do we error on non-supported ops) + - Support idxmin/idxmax/abs/max/min in a Series (:issue:`2989`, :issue:`2982`) + + - Bug on in-place putmasking on an ``integer`` series that needs to be converted to + ``float`` (:issue:`2746`) + - Bug in argsort of ``datetime64[ns]`` Series with ``NaT`` (:issue:`2967`) + - Bug in value_counts of ``datetime64[ns]`` Series (:issue:`3002`) + - Fixed printing of ``NaT`` in an index + - Bug in idxmin/idxmax of ``datetime64[ns]`` Series with ``NaT`` (:issue:`2982`) + - Bug in ``icol, take`` with negative indicies was producing incorrect return + values (see :issue:`2922`, :issue:`2892`), also check for out-of-bounds indices (:issue:`3029`) + - Bug in DataFrame column insertion when the column creation fails, existing frame is left in + an irrecoverable state (:issue:`3010`) + - Bug in DataFrame update, combine_first where non-specified values could cause + dtype changes (:issue:`3016`, :issue:`3041`) + - Bug in groupby with first/last where dtypes could change (:issue:`3041`, :issue:`2763`) + - Formatting of an index that has ``nan`` was inconsistent or wrong (would fill from + other values), (:issue:`2850`) + - Unstack of a frame with no nans would always cause dtype upcasting (:issue:`2929`) + - Fix scalar datetime.datetime parsing bug in read_csv (:issue:`3071`) + - Fixed slow printing of large Dataframes, due to inefficient dtype + reporting (:issue:`2807`) + - Fixed a segfault when using a function as grouper in groupby (:issue:`3035`) + - Fix pretty-printing of infinite data structures (closes :issue:`2978`) + - Fixed exception when plotting timeseries bearing a timezone (closes :issue:`2877`) + - str.contains ignored na argument (:issue:`2806`) + - Substitute warning for segfault when grouping with categorical grouper + of mismatched length (:issue:`3011`) + - Fix exception in SparseSeries.density (:issue:`2083`) + - Fix upsampling bug with closed='left' and daily to daily data (:issue:`3020`) + - Fixed missing tick bars on scatter_matrix plot (:issue:`3063`) + - Fixed bug in Timestamp(d,tz=foo) when d is date() rather then datetime() (:issue:`2993`) + - series.plot(kind='bar') now respects pylab color schem (:issue:`3115`) + - Fixed bug in reshape if not passed correct input, now raises TypeError (:issue:`2719`) + - Fixed a bug where Series ctor did not respect ordering if OrderedDict passed in (:issue:`3282`) + - Fix NameError issue on RESO_US (:issue:`2787`) + - Allow selection in an *unordered* timeseries to work similary + to an *ordered* timeseries (:issue:`2437`). + - Fix implemented ``.xs`` when called with ``axes=1`` and a level parameter (:issue:`2903`) + - Timestamp now supports the class method fromordinal similar to datetimes (:issue:`3042`) + - Fix issue with indexing a series with a boolean key and specifiying a 1-len list on the rhs (:issue:`2745`) + or a list on the rhs (:issue:`3235`) + - Fixed bug in groupby apply when kernel generate list of arrays having unequal len (:issue:`1738`) + - fixed handling of rolling_corr with center=True which could produce corr>1 (:issue:`3155`) + - Fixed issues where indices can be passed as 'index/column' in addition to 0/1 for the axis parameter + - PeriodIndex.tolist now boxes to Period (:issue:`3178`) + - PeriodIndex.get_loc KeyError now reports Period instead of ordinal (:issue:`3179`) + - df.to_records bug when handling MultiIndex (GH3189) + - Fix Series.__getitem__ segfault when index less than -length (:issue:`3168`) + - Fix bug when using Timestamp as a date parser (:issue:`2932`) + - Fix bug creating date range from Timestamp with time zone and passing same + time zone (:issue:`2926`) + - Add comparison operators to Period object (:issue:`2781`) + - Fix bug when concatenating two Series into a DataFrame when they have the + same name (:issue:`2797`) + - Fix automatic color cycling when plotting consecutive timeseries + without color arguments (:issue:`2816`) + - fixed bug in the pickling of PeriodIndex (:issue:`2891`) + - Upcast/split blocks when needed in a mixed DataFrame when setitem + with an indexer (:issue:`3216`) + - Invoking df.applymap on a dataframe with dupe cols now raises a ValueError (:issue:`2786`) + - Apply with invalid returned indices raise correct Exception (:issue:`2808`) + - Fixed a bug in plotting log-scale bar plots (:issue:`3247`) + - df.plot() grid on/off now obeys the mpl default style, just like + series.plot(). (:issue:`3233`) + - Fixed a bug in the legend of plotting.andrews_curves() (:issue:`3278`) + - Produce a series on apply if we only generate a singular series and have + a simple index (:issue:`2893`) + - Fix Python ascii file parsing when integer falls outside of floating point + spacing (:issue:`3258`) + - fixed pretty priniting of sets (:issue:`3294`) + - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (:issue:`3303`) + - DataFrame where with a datetimelike incorrectly selecting (:issue:`3311`) + - Ensure index casts work even in Int64Index + - Fix set_index segfault when passing MultiIndex (:issue:`3308`) + - Ensure pickles created in py2 can be read in py3 + - Insert ellipsis in MultiIndex summary repr (:issue:`3348`) + - Groupby will handle mutation among an input groups columns (and fallback + to non-fast apply) (:issue:`3380`) + - Eliminated unicode errors on FreeBSD when using MPL GTK backend (:issue:`3360`) + - Period.strftime should return unicode strings always (:issue:`3363`) + - Respect passed read_* chunksize in get_chunk function (:issue:`3406`) + + +pandas 0.10.1 +============= + +**Release date:** 2013-01-22 + +**New features** + + - Add data inferface to World Bank WDI pandas.io.wb (:issue:`2592`) + +**API Changes** + + - Restored inplace=True behavior returning self (same object) with + deprecation warning until 0.11 (:issue:`1893`) + - ``HDFStore`` + + - refactored HFDStore to deal with non-table stores as objects, will allow future enhancements + - removed keyword ``compression`` from ``put`` (replaced by keyword + ``complib`` to be consistent across library) + - warn `PerformanceWarning` if you are attempting to store types that will be pickled by PyTables + +**Improvements to existing features** + + - ``HDFStore`` + + - enables storing of multi-index dataframes (closes :issue:`1277`) + - support data column indexing and selection, via ``data_columns`` keyword + in append + - support write chunking to reduce memory footprint, via ``chunksize`` + keyword to append + - support automagic indexing via ``index`` keyword to append + - support ``expectedrows`` keyword in append to inform ``PyTables`` about + the expected tablesize + - support ``start`` and ``stop`` keywords in select to limit the row + selection space + - added ``get_store`` context manager to automatically import with pandas + - added column filtering via ``columns`` keyword in select + - added methods append_to_multiple/select_as_multiple/select_as_coordinates + to do multiple-table append/selection + - added support for datetime64 in columns + - added method ``unique`` to select the unique values in an indexable or + data column + - added method ``copy`` to copy an existing store (and possibly upgrade) + - show the shape of the data on disk for non-table stores when printing the + store + - added ability to read PyTables flavor tables (allows compatiblity to + other HDF5 systems) + + - Add ``logx`` option to DataFrame/Series.plot (:issue:`2327`, :issue:`2565`) + - Support reading gzipped data from file-like object + - ``pivot_table`` aggfunc can be anything used in GroupBy.aggregate (:issue:`2643`) + - Implement DataFrame merges in case where set cardinalities might overflow + 64-bit integer (:issue:`2690`) + - Raise exception in C file parser if integer dtype specified and have NA + values. (:issue:`2631`) + - Attempt to parse ISO8601 format dates when parse_dates=True in read_csv for + major performance boost in such cases (:issue:`2698`) + - Add methods ``neg`` and ``inv`` to Series + - Implement ``kind`` option in ``ExcelFile`` to indicate whether it's an XLS + or XLSX file (:issue:`2613`) + +**Bug fixes** + + - Fix read_csv/read_table multithreading issues (:issue:`2608`) + - ``HDFStore`` + + - correctly handle ``nan`` elements in string columns; serialize via the + ``nan_rep`` keyword to append + - raise correctly on non-implemented column types (unicode/date) + - handle correctly ``Term`` passed types (e.g. ``index<1000``, when index + is ``Int64``), (closes :issue:`512`) + - handle Timestamp correctly in data_columns (closes :issue:`2637`) + - contains correctly matches on non-natural names + - correctly store ``float32`` dtypes in tables (if not other float types in + the same table) + + - Fix DataFrame.info bug with UTF8-encoded columns. (:issue:`2576`) + - Fix DatetimeIndex handling of FixedOffset tz (:issue:`2604`) + - More robust detection of being in IPython session for wide DataFrame + console formatting (:issue:`2585`) + - Fix platform issues with ``file:///`` in unit test (:issue:`2564`) + - Fix bug and possible segfault when grouping by hierarchical level that + contains NA values (:issue:`2616`) + - Ensure that MultiIndex tuples can be constructed with NAs (:issue:`2616`) + - Fix int64 overflow issue when unstacking MultiIndex with many levels + (:issue:`2616`) + - Exclude non-numeric data from DataFrame.quantile by default (:issue:`2625`) + - Fix a Cython C int64 boxing issue causing read_csv to return incorrect + results (:issue:`2599`) + - Fix groupby summing performance issue on boolean data (:issue:`2692`) + - Don't bork Series containing datetime64 values with to_datetime (:issue:`2699`) + - Fix DataFrame.from_records corner case when passed columns, index column, + but empty record list (:issue:`2633`) + - Fix C parser-tokenizer bug with trailing fields. (:issue:`2668`) + - Don't exclude non-numeric data from GroupBy.max/min (:issue:`2700`) + - Don't lose time zone when calling DatetimeIndex.drop (:issue:`2621`) + - Fix setitem on a Series with a boolean key and a non-scalar as value + (:issue:`2686`) + - Box datetime64 values in Series.apply/map (:issue:`2627`, :issue:`2689`) + - Upconvert datetime + datetime64 values when concatenating frames (:issue:`2624`) + - Raise a more helpful error message in merge operations when one DataFrame + has duplicate columns (:issue:`2649`) + - Fix partial date parsing issue occuring only when code is run at EOM + (:issue:`2618`) + - Prevent MemoryError when using counting sort in sortlevel with + high-cardinality MultiIndex objects (:issue:`2684`) + - Fix Period resampling bug when all values fall into a single bin (:issue:`2070`) + - Fix buggy interaction with usecols argument in read_csv when there is an + implicit first index column (:issue:`2654`) + + +pandas 0.10.0 +============= + +**Release date:** 2012-12-17 + +**New features** + + - Brand new high-performance delimited file parsing engine written in C and + Cython. 50% or better performance in many standard use cases with a + fraction as much memory usage. (:issue:`407`, :issue:`821`) + - Many new file parser (read_csv, read_table) features: + + - Support for on-the-fly gzip or bz2 decompression (`compression` option) + - Ability to get back numpy.recarray instead of DataFrame + (`as_recarray=True`) + - `dtype` option: explicit column dtypes + - `usecols` option: specify list of columns to be read from a file. Good + for reading very wide files with many irrelevant columns (:issue:`1216` :issue:`926`, :issue:`2465`) + - Enhanced unicode decoding support via `encoding` option + - `skipinitialspace` dialect option + - Can specify strings to be recognized as True (`true_values`) or False + (`false_values`) + - High-performance `delim_whitespace` option for whitespace-delimited + files; a preferred alternative to the '\s+' regular expression delimiter + - Option to skip "bad" lines (wrong number of fields) that would otherwise + have caused an error in the past (`error_bad_lines` and `warn_bad_lines` + options) + - Substantially improved performance in the parsing of integers with + thousands markers and lines with comments + - Easy of European (and other) decimal formats (`decimal` option) (:issue:`584`, :issue:`2466`) + - Custom line terminators (e.g. lineterminator='~') (:issue:`2457`) + - Handling of no trailing commas in CSV files (:issue:`2333`) + - Ability to handle fractional seconds in date_converters (:issue:`2209`) + - read_csv allow scalar arg to na_values (:issue:`1944`) + - Explicit column dtype specification in read_* functions (:issue:`1858`) + - Easier CSV dialect specification (:issue:`1743`) + - Improve parser performance when handling special characters (:issue:`1204`) + + - Google Analytics API integration with easy oauth2 workflow (:issue:`2283`) + - Add error handling to Series.str.encode/decode (:issue:`2276`) + - Add ``where`` and ``mask`` to Series (:issue:`2337`) + - Grouped histogram via `by` keyword in Series/DataFrame.hist (:issue:`2186`) + - Support optional ``min_periods`` keyword in ``corr`` and ``cov`` + for both Series and DataFrame (:issue:`2002`) + - Add ``duplicated`` and ``drop_duplicates`` functions to Series (:issue:`1923`) + - Add docs for ``HDFStore table`` format + - 'density' property in `SparseSeries` (:issue:`2384`) + - Add ``ffill`` and ``bfill`` convenience functions for forward- and + backfilling time series data (:issue:`2284`) + - New option configuration system and functions `set_option`, `get_option`, + `describe_option`, and `reset_option`. Deprecate `set_printoptions` and + `reset_printoptions` (:issue:`2393`). + You can also access options as attributes via ``pandas.options.X`` + - Wide DataFrames can be viewed more easily in the console with new + `expand_frame_repr` and `line_width` configuration options. This is on by + default now (:issue:`2436`) + - Scikits.timeseries-like moving window functions via ``rolling_window`` (:issue:`1270`) + +**Experimental Features** + + - Add support for Panel4D, a named 4 Dimensional stucture + - Add support for ndpanel factory functions, to create custom, + domain-specific N-Dimensional containers + +**API Changes** + + - The default binning/labeling behavior for ``resample`` has been changed to + `closed='left', label='left'` for daily and lower frequencies. This had + been a large source of confusion for users. See "what's new" page for more + on this. (:issue:`2410`) + - Methods with ``inplace`` option now return None instead of the calling + (modified) object (:issue:`1893`) + - The special case DataFrame - TimeSeries doing column-by-column broadcasting + has been deprecated. Users should explicitly do e.g. df.sub(ts, axis=0) + instead. This is a legacy hack and can lead to subtle bugs. + - inf/-inf are no longer considered as NA by isnull/notnull. To be clear, this + is legacy cruft from early pandas. This behavior can be globally re-enabled + using the new option ``mode.use_inf_as_null`` (:issue:`2050`, :issue:`1919`) + - ``pandas.merge`` will now default to ``sort=False``. For many use cases + sorting the join keys is not necessary, and doing it by default is wasteful + - Specify ``header=0`` explicitly to replace existing column names in file in + read_* functions. + - Default column names for header-less parsed files (yielded by read_csv, + etc.) are now the integers 0, 1, .... A new argument `prefix` has been + added; to get the v0.9.x behavior specify ``prefix='X'`` (:issue:`2034`). This API + change was made to make the default column names more consistent with the + DataFrame constructor's default column names when none are specified. + - DataFrame selection using a boolean frame now preserves input shape + - If function passed to Series.apply yields a Series, result will be a + DataFrame (:issue:`2316`) + - Values like YES/NO/yes/no will not be considered as boolean by default any + longer in the file parsers. This can be customized using the new + ``true_values`` and ``false_values`` options (:issue:`2360`) + - `obj.fillna()` is no longer valid; make `method='pad'` no longer the + default option, to be more explicit about what kind of filling to + perform. Add `ffill/bfill` convenience functions per above (:issue:`2284`) + - `HDFStore.keys()` now returns an absolute path-name for each key + - `to_string()` now always returns a unicode string. (:issue:`2224`) + - File parsers will not handle NA sentinel values arising from passed + converter functions + +**Improvements to existing features** + + - Add ``nrows`` option to DataFrame.from_records for iterators (:issue:`1794`) + - Unstack/reshape algorithm rewrite to avoid high memory use in cases where + the number of observed key-tuples is much smaller than the total possible + number that could occur (:issue:`2278`). Also improves performance in most cases. + - Support duplicate columns in DataFrame.from_records (:issue:`2179`) + - Add ``normalize`` option to Series/DataFrame.asfreq (:issue:`2137`) + - SparseSeries and SparseDataFrame construction from empty and scalar + values now no longer create dense ndarrays unnecessarily (:issue:`2322`) + - ``HDFStore`` now supports hierarchial keys (:issue:`2397`) + - Support multiple query selection formats for ``HDFStore tables`` (:issue:`1996`) + - Support ``del store['df']`` syntax to delete HDFStores + - Add multi-dtype support for ``HDFStore tables`` + - ``min_itemsize`` parameter can be specified in ``HDFStore table`` creation + - Indexing support in ``HDFStore tables`` (:issue:`698`) + - Add `line_terminator` option to DataFrame.to_csv (:issue:`2383`) + - added implementation of str(x)/unicode(x)/bytes(x) to major pandas data + structures, which should do the right thing on both py2.x and py3.x. (:issue:`2224`) + - Reduce groupby.apply overhead substantially by low-level manipulation of + internal NumPy arrays in DataFrames (:issue:`535`) + - Implement ``value_vars`` in ``melt`` and add ``melt`` to pandas namespace + (:issue:`2412`) + - Added boolean comparison operators to Panel + - Enable ``Series.str.strip/lstrip/rstrip`` methods to take an argument (:issue:`2411`) + - The DataFrame ctor now respects column ordering when given + an OrderedDict (:issue:`2455`) + - Assigning DatetimeIndex to Series changes the class to TimeSeries (:issue:`2139`) + - Improve performance of .value_counts method on non-integer data (:issue:`2480`) + - ``get_level_values`` method for MultiIndex return Index instead of ndarray (:issue:`2449`) + - ``convert_to_r_dataframe`` conversion for datetime values (:issue:`2351`) + - Allow ``DataFrame.to_csv`` to represent inf and nan differently (:issue:`2026`) + - Add ``min_i`` argument to ``nancorr`` to specify minimum required observations (:issue:`2002`) + - Add ``inplace`` option to ``sortlevel`` / ``sort`` functions on DataFrame (:issue:`1873`) + - Enable DataFrame to accept scalar constructor values like Series (:issue:`1856`) + - DataFrame.from_records now takes optional ``size`` parameter (:issue:`1794`) + - include iris dataset (:issue:`1709`) + - No datetime64 DataFrame column conversion of datetime.datetime with tzinfo (:issue:`1581`) + - Micro-optimizations in DataFrame for tracking state of internal consolidation (:issue:`217`) + - Format parameter in DataFrame.to_csv (:issue:`1525`) + - Partial string slicing for ``DatetimeIndex`` for daily and higher frequencies (:issue:`2306`) + - Implement ``col_space`` parameter in ``to_html`` and ``to_string`` in DataFrame (:issue:`1000`) + - Override ``Series.tolist`` and box datetime64 types (:issue:`2447`) + - Optimize ``unstack`` memory usage by compressing indices (:issue:`2278`) + - Fix HTML repr in IPython qtconsole if opening window is small (:issue:`2275`) + - Escape more special characters in console output (:issue:`2492`) + - df.select now invokes bool on the result of crit(x) (:issue:`2487`) + +**Bug fixes** + + - Fix major performance regression in DataFrame.iteritems (:issue:`2273`) + - Fixes bug when negative period passed to Series/DataFrame.diff (:issue:`2266`) + - Escape tabs in console output to avoid alignment issues (:issue:`2038`) + - Properly box datetime64 values when retrieving cross-section from + mixed-dtype DataFrame (:issue:`2272`) + - Fix concatenation bug leading to :issue:`2057`, :issue:`2257` + - Fix regression in Index console formatting (:issue:`2319`) + - Box Period data when assigning PeriodIndex to frame column (:issue:`2243`, :issue:`2281`) + - Raise exception on calling reset_index on Series with inplace=True (:issue:`2277`) + - Enable setting multiple columns in DataFrame with hierarchical columns + (:issue:`2295`) + - Respect dtype=object in DataFrame constructor (:issue:`2291`) + - Fix DatetimeIndex.join bug with tz-aware indexes and how='outer' (:issue:`2317`) + - pop(...) and del works with DataFrame with duplicate columns (:issue:`2349`) + - Treat empty strings as NA in date parsing (rather than let dateutil do + something weird) (:issue:`2263`) + - Prevent uint64 -> int64 overflows (:issue:`2355`) + - Enable joins between MultiIndex and regular Index (:issue:`2024`) + - Fix time zone metadata issue when unioning non-overlapping DatetimeIndex + objects (:issue:`2367`) + - Raise/handle int64 overflows in parsers (:issue:`2247`) + - Deleting of consecutive rows in ``HDFStore tables``` is much faster than before + - Appending on a HDFStore would fail if the table was not first created via ``put`` + - Use `col_space` argument as minimum column width in DataFrame.to_html (:issue:`2328`) + - Fix tz-aware DatetimeIndex.to_period (:issue:`2232`) + - Fix DataFrame row indexing case with MultiIndex (:issue:`2314`) + - Fix to_excel exporting issues with Timestamp objects in index (:issue:`2294`) + - Fixes assigning scalars and array to hierarchical column chunk (:issue:`1803`) + - Fixed a UnicdeDecodeError with series tidy_repr (:issue:`2225`) + - Fixed issued with duplicate keys in an index (:issue:`2347`, :issue:`2380`) + - Fixed issues re: Hash randomization, default on starting w/ py3.3 (:issue:`2331`) + - Fixed issue with missing attributes after loading a pickled dataframe (:issue:`2431`) + - Fix Timestamp formatting with tzoffset time zone in dateutil 2.1 (:issue:`2443`) + - Fix GroupBy.apply issue when using BinGrouper to do ts binning (:issue:`2300`) + - Fix issues resulting from datetime.datetime columns being converted to + datetime64 when calling DataFrame.apply. (:issue:`2374`) + - Raise exception when calling to_panel on non uniquely-indexed frame (:issue:`2441`) + - Improved detection of console encoding on IPython zmq frontends (:issue:`2458`) + - Preserve time zone when .append-ing two time series (:issue:`2260`) + - Box timestamps when calling reset_index on time-zone-aware index rather + than creating a tz-less datetime64 column (:issue:`2262`) + - Enable searching non-string columns in DataFrame.filter(like=...) (:issue:`2467`) + - Fixed issue with losing nanosecond precision upon conversion to DatetimeIndex(:issue:`2252`) + - Handle timezones in Datetime.normalize (:issue:`2338`) + - Fix test case where dtype specification with endianness causes + failures on big endian machines (:issue:`2318`) + - Fix plotting bug where upsampling causes data to appear shifted in time (:issue:`2448`) + - Fix ``read_csv`` failure for UTF-16 with BOM and skiprows(:issue:`2298`) + - read_csv with names arg not implicitly setting header=None(:issue:`2459`) + - Unrecognized compression mode causes segfault in read_csv(:issue:`2474`) + - In read_csv, header=0 and passed names should discard first row(:issue:`2269`) + - Correctly route to stdout/stderr in read_table (:issue:`2071`) + - Fix exception when Timestamp.to_datetime is called on a Timestamp with tzoffset (:issue:`2471`) + - Fixed unintentional conversion of datetime64 to long in groupby.first() (:issue:`2133`) + - Union of empty DataFrames now return empty with concatenated index (:issue:`2307`) + - DataFrame.sort_index raises more helpful exception if sorting by column + with duplicates (:issue:`2488`) + - DataFrame.to_string formatters can be list, too (:issue:`2520`) + - DataFrame.combine_first will always result in the union of the index and + columns, even if one DataFrame is length-zero (:issue:`2525`) + - Fix several DataFrame.icol/irow with duplicate indices issues (:issue:`2228`, :issue:`2259`) + - Use Series names for column names when using concat with axis=1 (:issue:`2489`) + - Raise Exception if start, end, periods all passed to date_range (:issue:`2538`) + - Fix Panel resampling issue (:issue:`2537`) + + + +pandas 0.9.1 +============ + +**Release date:** 2012-11-14 + +**New features** + + - Can specify multiple sort orders in DataFrame/Series.sort/sort_index (:issue:`928`) + - New `top` and `bottom` options for handling NAs in rank (:issue:`1508`, :issue:`2159`) + - Add `where` and `mask` functions to DataFrame (:issue:`2109`, :issue:`2151`) + - Add `at_time` and `between_time` functions to DataFrame (:issue:`2149`) + - Add flexible `pow` and `rpow` methods to DataFrame (:issue:`2190`) + +**API Changes** + + - Upsampling period index "spans" intervals. Example: annual periods + upsampled to monthly will span all months in each year + - Period.end_time will yield timestamp at last nanosecond in the interval + (:issue:`2124`, :issue:`2125`, :issue:`1764`) + - File parsers no longer coerce to float or bool for columns that have custom + converters specified (:issue:`2184`) + +**Improvements to existing features** + + - Time rule inference for week-of-month (e.g. WOM-2FRI) rules (:issue:`2140`) + - Improve performance of datetime + business day offset with large number of + offset periods + - Improve HTML display of DataFrame objects with hierarchical columns + - Enable referencing of Excel columns by their column names (:issue:`1936`) + - DataFrame.dot can accept ndarrays (:issue:`2042`) + - Support negative periods in Panel.shift (:issue:`2164`) + - Make .drop(...) work with non-unique indexes (:issue:`2101`) + - Improve performance of Series/DataFrame.diff (re: :issue:`2087`) + - Support unary ~ (__invert__) in DataFrame (:issue:`2110`) + - Turn off pandas-style tick locators and formatters (:issue:`2205`) + - DataFrame[DataFrame] uses DataFrame.where to compute masked frame (:issue:`2230`) + +**Bug fixes** + + - Fix some duplicate-column DataFrame constructor issues (:issue:`2079`) + - Fix bar plot color cycle issues (:issue:`2082`) + - Fix off-center grid for stacked bar plots (:issue:`2157`) + - Fix plotting bug if inferred frequency is offset with N > 1 (:issue:`2126`) + - Implement comparisons on date offsets with fixed delta (:issue:`2078`) + - Handle inf/-inf correctly in read_* parser functions (:issue:`2041`) + - Fix matplotlib unicode interaction bug + - Make WLS r-squared match statsmodels 0.5.0 fixed value + - Fix zero-trimming DataFrame formatting bug + - Correctly compute/box datetime64 min/max values from Series.min/max (:issue:`2083`) + - Fix unstacking edge case with unrepresented groups (:issue:`2100`) + - Fix Series.str failures when using pipe pattern '|' (:issue:`2119`) + - Fix pretty-printing of dict entries in Series, DataFrame (:issue:`2144`) + - Cast other datetime64 values to nanoseconds in DataFrame ctor (:issue:`2095`) + - Alias Timestamp.astimezone to tz_convert, so will yield Timestamp (:issue:`2060`) + - Fix timedelta64 formatting from Series (:issue:`2165`, :issue:`2146`) + - Handle None values gracefully in dict passed to Panel constructor (:issue:`2075`) + - Box datetime64 values as Timestamp objects in Series/DataFrame.iget (:issue:`2148`) + - Fix Timestamp indexing bug in DatetimeIndex.insert (:issue:`2155`) + - Use index name(s) (if any) in DataFrame.to_records (:issue:`2161`) + - Don't lose index names in Panel.to_frame/DataFrame.to_panel (:issue:`2163`) + - Work around length-0 boolean indexing NumPy bug (:issue:`2096`) + - Fix partial integer indexing bug in DataFrame.xs (:issue:`2107`) + - Fix variety of cut/qcut string-bin formatting bugs (:issue:`1978`, :issue:`1979`) + - Raise Exception when xs view not possible of MultiIndex'd DataFrame (:issue:`2117`) + - Fix groupby(...).first() issue with datetime64 (:issue:`2133`) + - Better floating point error robustness in some rolling_* functions + (:issue:`2114`, :issue:`2527`) + - Fix ewma NA handling in the middle of Series (:issue:`2128`) + - Fix numerical precision issues in diff with integer data (:issue:`2087`) + - Fix bug in MultiIndex.__getitem__ with NA values (:issue:`2008`) + - Fix DataFrame.from_records dict-arg bug when passing columns (:issue:`2179`) + - Fix Series and DataFrame.diff for integer dtypes (:issue:`2087`, :issue:`2174`) + - Fix bug when taking intersection of DatetimeIndex with empty index (:issue:`2129`) + - Pass through timezone information when calling DataFrame.align (:issue:`2127`) + - Properly sort when joining on datetime64 values (:issue:`2196`) + - Fix indexing bug in which False/True were being coerced to 0/1 (:issue:`2199`) + - Many unicode formatting fixes (:issue:`2201`) + - Fix improper MultiIndex conversion issue when assigning + e.g. DataFrame.index (:issue:`2200`) + - Fix conversion of mixed-type DataFrame to ndarray with dup columns (:issue:`2236`) + - Fix duplicate columns issue (:issue:`2218`, :issue:`2219`) + - Fix SparseSeries.__pow__ issue with NA input (:issue:`2220`) + - Fix icol with integer sequence failure (:issue:`2228`) + - Fixed resampling tz-aware time series issue (:issue:`2245`) + - SparseDataFrame.icol was not returning SparseSeries (:issue:`2227`, :issue:`2229`) + - Enable ExcelWriter to handle PeriodIndex (:issue:`2240`) + - Fix issue constructing DataFrame from empty Series with name (:issue:`2234`) + - Use console-width detection in interactive sessions only (:issue:`1610`) + - Fix parallel_coordinates legend bug with mpl 1.2.0 (:issue:`2237`) + - Make tz_localize work in corner case of empty Series (:issue:`2248`) + + + +pandas 0.9.0 +============ + +**Release date:** 10/7/2012 + +**New features** + + - Add ``str.encode`` and ``str.decode`` to Series (:issue:`1706`) + - Add `to_latex` method to DataFrame (:issue:`1735`) + - Add convenient expanding window equivalents of all rolling_* ops (:issue:`1785`) + - Add Options class to pandas.io.data for fetching options data from Yahoo! + Finance (:issue:`1748`, :issue:`1739`) + - Recognize and convert more boolean values in file parsing (Yes, No, TRUE, + FALSE, variants thereof) (:issue:`1691`, :issue:`1295`) + - Add Panel.update method, analogous to DataFrame.update (:issue:`1999`, :issue:`1988`) + +**Improvements to existing features** + + - Proper handling of NA values in merge operations (:issue:`1990`) + - Add ``flags`` option for ``re.compile`` in some Series.str methods (:issue:`1659`) + - Parsing of UTC date strings in read_* functions (:issue:`1693`) + - Handle generator input to Series (:issue:`1679`) + - Add `na_action='ignore'` to Series.map to quietly propagate NAs (:issue:`1661`) + - Add args/kwds options to Series.apply (:issue:`1829`) + - Add inplace option to Series/DataFrame.reset_index (:issue:`1797`) + - Add ``level`` parameter to ``Series.reset_index`` + - Add quoting option for DataFrame.to_csv (:issue:`1902`) + - Indicate long column value truncation in DataFrame output with ... (:issue:`1854`) + - DataFrame.dot will not do data alignment, and also work with Series (:issue:`1915`) + - Add ``na`` option for missing data handling in some vectorized string + methods (:issue:`1689`) + - If index_label=False in DataFrame.to_csv, do not print fields/commas in the + text output. Results in easier importing into R (:issue:`1583`) + - Can pass tuple/list of axes to DataFrame.dropna to simplify repeated calls + (dropping both columns and rows) (:issue:`924`) + - Improve DataFrame.to_html output for hierarchically-indexed rows (do not + repeat levels) (:issue:`1929`) + - TimeSeries.between_time can now select times across midnight (:issue:`1871`) + - Enable `skip_footer` parameter in `ExcelFile.parse` (:issue:`1843`) + +**API Changes** + + - Change default header names in read_* functions to more Pythonic X0, X1, + etc. instead of X.1, X.2. (:issue:`2000`) + - Deprecated ``day_of_year`` API removed from PeriodIndex, use ``dayofyear`` + (:issue:`1723`) + - Don't modify NumPy suppress printoption at import time + - The internal HDF5 data arrangement for DataFrames has been + transposed. Legacy files will still be readable by HDFStore (:issue:`1834`, :issue:`1824`) + - Legacy cruft removed: pandas.stats.misc.quantileTS + - Use ISO8601 format for Period repr: monthly, daily, and on down (:issue:`1776`) + - Empty DataFrame columns are now created as object dtype. This will prevent + a class of TypeErrors that was occurring in code where the dtype of a + column would depend on the presence of data or not (e.g. a SQL query having + results) (:issue:`1783`) + - Setting parts of DataFrame/Panel using ix now aligns input Series/DataFrame + (:issue:`1630`) + - `first` and `last` methods in `GroupBy` no longer drop non-numeric columns + (:issue:`1809`) + - Resolved inconsistencies in specifying custom NA values in text parser. + `na_values` of type dict no longer override default NAs unless + `keep_default_na` is set to false explicitly (:issue:`1657`) + - Enable `skipfooter` parameter in text parsers as an alias for `skip_footer` + +**Bug fixes** + + - Perform arithmetic column-by-column in mixed-type DataFrame to avoid type + upcasting issues. Caused downstream DataFrame.diff bug (:issue:`1896`) + - Fix matplotlib auto-color assignment when no custom spectrum passed. Also + respect passed color keyword argument (:issue:`1711`) + - Fix resampling logical error with closed='left' (:issue:`1726`) + - Fix critical DatetimeIndex.union bugs (:issue:`1730`, :issue:`1719`, :issue:`1745`, :issue:`1702`, :issue:`1753`) + - Fix critical DatetimeIndex.intersection bug with unanchored offsets (:issue:`1708`) + - Fix MM-YYYY time series indexing case (:issue:`1672`) + - Fix case where Categorical group key was not being passed into index in + GroupBy result (:issue:`1701`) + - Handle Ellipsis in Series.__getitem__/__setitem__ (:issue:`1721`) + - Fix some bugs with handling datetime64 scalars of other units in NumPy 1.6 + and 1.7 (:issue:`1717`) + - Fix performance issue in MultiIndex.format (:issue:`1746`) + - Fixed GroupBy bugs interacting with DatetimeIndex asof / map methods (:issue:`1677`) + - Handle factors with NAs in pandas.rpy (:issue:`1615`) + - Fix statsmodels import in pandas.stats.var (:issue:`1734`) + - Fix DataFrame repr/info summary with non-unique columns (:issue:`1700`) + - Fix Series.iget_value for non-unique indexes (:issue:`1694`) + - Don't lose tzinfo when passing DatetimeIndex as DataFrame column (:issue:`1682`) + - Fix tz conversion with time zones that haven't had any DST transitions since + first date in the array (:issue:`1673`) + - Fix field access with UTC->local conversion on unsorted arrays (:issue:`1756`) + - Fix isnull handling of array-like (list) inputs (:issue:`1755`) + - Fix regression in handling of Series in Series constructor (:issue:`1671`) + - Fix comparison of Int64Index with DatetimeIndex (:issue:`1681`) + - Fix min_periods handling in new rolling_max/min at array start (:issue:`1695`) + - Fix errors with how='median' and generic NumPy resampling in some cases + caused by SeriesBinGrouper (:issue:`1648`, :issue:`1688`) + - When grouping by level, exclude unobserved levels (:issue:`1697`) + - Don't lose tzinfo in DatetimeIndex when shifting by different offset (:issue:`1683`) + - Hack to support storing data with a zero-length axis in HDFStore (:issue:`1707`) + - Fix DatetimeIndex tz-aware range generation issue (:issue:`1674`) + - Fix method='time' interpolation with intraday data (:issue:`1698`) + - Don't plot all-NA DataFrame columns as zeros (:issue:`1696`) + - Fix bug in scatter_plot with by option (:issue:`1716`) + - Fix performance problem in infer_freq with lots of non-unique stamps (:issue:`1686`) + - Fix handling of PeriodIndex as argument to create MultiIndex (:issue:`1705`) + - Fix re: unicode MultiIndex level names in Series/DataFrame repr (:issue:`1736`) + - Handle PeriodIndex in to_datetime instance method (:issue:`1703`) + - Support StaticTzInfo in DatetimeIndex infrastructure (:issue:`1692`) + - Allow MultiIndex setops with length-0 other type indexes (:issue:`1727`) + - Fix handling of DatetimeIndex in DataFrame.to_records (:issue:`1720`) + - Fix handling of general objects in isnull on which bool(...) fails (:issue:`1749`) + - Fix .ix indexing with MultiIndex ambiguity (:issue:`1678`) + - Fix .ix setting logic error with non-unique MultiIndex (:issue:`1750`) + - Basic indexing now works on MultiIndex with > 1000000 elements, regression + from earlier version of pandas (:issue:`1757`) + - Handle non-float64 dtypes in fast DataFrame.corr/cov code paths (:issue:`1761`) + - Fix DatetimeIndex.isin to function properly (:issue:`1763`) + - Fix conversion of array of tz-aware datetime.datetime to DatetimeIndex with + right time zone (:issue:`1777`) + - Fix DST issues with generating ancxhored date ranges (:issue:`1778`) + - Fix issue calling sort on result of Series.unique (:issue:`1807`) + - Fix numerical issue leading to square root of negative number in + rolling_std (:issue:`1840`) + - Let Series.str.split accept no arguments (like str.split) (:issue:`1859`) + - Allow user to have dateutil 2.1 installed on a Python 2 system (:issue:`1851`) + - Catch ImportError less aggressively in pandas/__init__.py (:issue:`1845`) + - Fix pip source installation bug when installing from GitHub (:issue:`1805`) + - Fix error when window size > array size in rolling_apply (:issue:`1850`) + - Fix pip source installation issues via SSH from GitHub + - Fix OLS.summary when column is a tuple (:issue:`1837`) + - Fix bug in __doc__ patching when -OO passed to interpreter + (:issue:`1792` :issue:`1741` :issue:`1774`) + - Fix unicode console encoding issue in IPython notebook (:issue:`1782`, :issue:`1768`) + - Fix unicode formatting issue with Series.name (:issue:`1782`) + - Fix bug in DataFrame.duplicated with datetime64 columns (:issue:`1833`) + - Fix bug in Panel internals resulting in error when doing fillna after + truncate not changing size of panel (:issue:`1823`) + - Prevent segfault due to MultiIndex not being supported in HDFStore table + format (:issue:`1848`) + - Fix UnboundLocalError in Panel.__setitem__ and add better error (:issue:`1826`) + - Fix to_csv issues with list of string entries. Isnull works on list of + strings now too (:issue:`1791`) + - Fix Timestamp comparisons with datetime values outside the nanosecond range + (1677-2262) + - Revert to prior behavior of normalize_date with datetime.date objects + (return datetime) + - Fix broken interaction between np.nansum and Series.any/all + - Fix bug with multiple column date parsers (:issue:`1866`) + - DatetimeIndex.union(Int64Index) was broken + - Make plot x vs y interface consistent with integer indexing (:issue:`1842`) + - set_index inplace modified data even if unique check fails (:issue:`1831`) + - Only use Q-OCT/NOV/DEC in quarterly frequency inference (:issue:`1789`) + - Upcast to dtype=object when unstacking boolean DataFrame (:issue:`1820`) + - Fix float64/float32 merging bug (:issue:`1849`) + - Fixes to Period.start_time for non-daily frequencies (:issue:`1857`) + - Fix failure when converter used on index_col in read_csv (:issue:`1835`) + - Implement PeriodIndex.append so that pandas.concat works correctly (:issue:`1815`) + - Avoid Cython out-of-bounds access causing segfault sometimes in pad_2d, + backfill_2d + - Fix resampling error with intraday times and anchored target time (like + AS-DEC) (:issue:`1772`) + - Fix .ix indexing bugs with mixed-integer indexes (:issue:`1799`) + - Respect passed color keyword argument in Series.plot (:issue:`1890`) + - Fix rolling_min/max when the window is larger than the size of the input + array. Check other malformed inputs (:issue:`1899`, :issue:`1897`) + - Rolling variance / standard deviation with only a single observation in + window (:issue:`1884`) + - Fix unicode sheet name failure in to_excel (:issue:`1828`) + - Override DatetimeIndex.min/max to return Timestamp objects (:issue:`1895`) + - Fix column name formatting issue in length-truncated column (:issue:`1906`) + - Fix broken handling of copying Index metadata to new instances created by + view(...) calls inside the NumPy infrastructure + - Support datetime.date again in DateOffset.rollback/rollforward + - Raise Exception if set passed to Series constructor (:issue:`1913`) + - Add TypeError when appending HDFStore table w/ wrong index type (:issue:`1881`) + - Don't raise exception on empty inputs in EW functions (e.g. ewma) (:issue:`1900`) + - Make asof work correctly with PeriodIndex (:issue:`1883`) + - Fix extlinks in doc build + - Fill boolean DataFrame with NaN when calling shift (:issue:`1814`) + - Fix setuptools bug causing pip not to Cythonize .pyx files sometimes + - Fix negative integer indexing regression in .ix from 0.7.x (:issue:`1888`) + - Fix error while retrieving timezone and utc offset from subclasses of + datetime.tzinfo without .zone and ._utcoffset attributes (:issue:`1922`) + - Fix DataFrame formatting of small, non-zero FP numbers (:issue:`1911`) + - Various fixes by upcasting of date -> datetime (:issue:`1395`) + - Raise better exception when passing multiple functions with the same name, + such as lambdas, to GroupBy.aggregate + - Fix DataFrame.apply with axis=1 on a non-unique index (:issue:`1878`) + - Proper handling of Index subclasses in pandas.unique (:issue:`1759`) + - Set index names in DataFrame.from_records (:issue:`1744`) + - Fix time series indexing error with duplicates, under and over hash table + size cutoff (:issue:`1821`) + - Handle list keys in addition to tuples in DataFrame.xs when + partial-indexing a hierarchically-indexed DataFrame (:issue:`1796`) + - Support multiple column selection in DataFrame.__getitem__ with duplicate + columns (:issue:`1943`) + - Fix time zone localization bug causing improper fields (e.g. hours) in time + zones that have not had a UTC transition in a long time (:issue:`1946`) + - Fix errors when parsing and working with with fixed offset timezones + (:issue:`1922`, :issue:`1928`) + - Fix text parser bug when handling UTC datetime objects generated by + dateutil (:issue:`1693`) + - Fix plotting bug when 'B' is the inferred frequency but index actually + contains weekends (:issue:`1668`, :issue:`1669`) + - Fix plot styling bugs (:issue:`1666`, :issue:`1665`, :issue:`1658`) + - Fix plotting bug with index/columns with unicode (:issue:`1685`) + - Fix DataFrame constructor bug when passed Series with datetime64 dtype + in a dict (:issue:`1680`) + - Fixed regression in generating DatetimeIndex using timezone aware + datetime.datetime (:issue:`1676`) + - Fix DataFrame bug when printing concatenated DataFrames with duplicated + columns (:issue:`1675`) + - Fixed bug when plotting time series with multiple intraday frequencies + (:issue:`1732`) + - Fix bug in DataFrame.duplicated to enable iterables other than list-types + as input argument (:issue:`1773`) + - Fix resample bug when passed list of lambdas as `how` argument (:issue:`1808`) + - Repr fix for MultiIndex level with all NAs (:issue:`1971`) + - Fix PeriodIndex slicing bug when slice start/end are out-of-bounds (:issue:`1977`) + - Fix read_table bug when parsing unicode (:issue:`1975`) + - Fix BlockManager.iget bug when dealing with non-unique MultiIndex as columns + (:issue:`1970`) + - Fix reset_index bug if both drop and level are specified (:issue:`1957`) + - Work around unsafe NumPy object->int casting with Cython function (:issue:`1987`) + - Fix datetime64 formatting bug in DataFrame.to_csv (:issue:`1993`) + - Default start date in pandas.io.data to 1/1/2000 as the docs say (:issue:`2011`) + + + + +pandas 0.8.1 +============ + +**Release date:** July 22, 2012 + +**New features** + + - Add vectorized, NA-friendly string methods to Series (:issue:`1621`, :issue:`620`) + - Can pass dict of per-column line styles to DataFrame.plot (:issue:`1559`) + - Selective plotting to secondary y-axis on same subplot (:issue:`1640`) + - Add new ``bootstrap_plot`` plot function + - Add new ``parallel_coordinates`` plot function (:issue:`1488`) + - Add ``radviz`` plot function (:issue:`1566`) + - Add ``multi_sparse`` option to ``set_printoptions`` to modify display of + hierarchical indexes (:issue:`1538`) + - Add ``dropna`` method to Panel (:issue:`171`) + +**Improvements to existing features** + + - Use moving min/max algorithms from Bottleneck in rolling_min/rolling_max + for > 100x speedup. (:issue:`1504`, :issue:`50`) + - Add Cython group median method for >15x speedup (:issue:`1358`) + - Drastically improve ``to_datetime`` performance on ISO8601 datetime strings + (with no time zones) (:issue:`1571`) + - Improve single-key groupby performance on large data sets, accelerate use of + groupby with a Categorical variable + - Add ability to append hierarchical index levels with ``set_index`` and to + drop single levels with ``reset_index`` (:issue:`1569`, :issue:`1577`) + - Always apply passed functions in ``resample``, even if upsampling (:issue:`1596`) + - Avoid unnecessary copies in DataFrame constructor with explicit dtype (:issue:`1572`) + - Cleaner DatetimeIndex string representation with 1 or 2 elements (:issue:`1611`) + - Improve performance of array-of-Period to PeriodIndex, convert such arrays + to PeriodIndex inside Index (:issue:`1215`) + - More informative string representation for weekly Period objects (:issue:`1503`) + - Accelerate 3-axis multi data selection from homogeneous Panel (:issue:`979`) + - Add ``adjust`` option to ewma to disable adjustment factor (:issue:`1584`) + - Add new matplotlib converters for high frequency time series plotting (:issue:`1599`) + - Handling of tz-aware datetime.datetime objects in to_datetime; raise + Exception unless utc=True given (:issue:`1581`) + +**Bug fixes** + + - Fix NA handling in DataFrame.to_panel (:issue:`1582`) + - Handle TypeError issues inside PyObject_RichCompareBool calls in khash + (:issue:`1318`) + - Fix resampling bug to lower case daily frequency (:issue:`1588`) + - Fix kendall/spearman DataFrame.corr bug with no overlap (:issue:`1595`) + - Fix bug in DataFrame.set_index (:issue:`1592`) + - Don't ignore axes in boxplot if by specified (:issue:`1565`) + - Fix Panel .ix indexing with integers bug (:issue:`1603`) + - Fix Partial indexing bugs (years, months, ...) with PeriodIndex (:issue:`1601`) + - Fix MultiIndex console formatting issue (:issue:`1606`) + - Unordered index with duplicates doesn't yield scalar location for single + entry (:issue:`1586`) + - Fix resampling of tz-aware time series with "anchored" freq (:issue:`1591`) + - Fix DataFrame.rank error on integer data (:issue:`1589`) + - Selection of multiple SparseDataFrame columns by list in __getitem__ (:issue:`1585`) + - Override Index.tolist for compatibility with MultiIndex (:issue:`1576`) + - Fix hierarchical summing bug with MultiIndex of length 1 (:issue:`1568`) + - Work around numpy.concatenate use/bug in Series.set_value (:issue:`1561`) + - Ensure Series/DataFrame are sorted before resampling (:issue:`1580`) + - Fix unhandled IndexError when indexing very large time series (:issue:`1562`) + - Fix DatetimeIndex intersection logic error with irregular indexes (:issue:`1551`) + - Fix unit test errors on Python 3 (:issue:`1550`) + - Fix .ix indexing bugs in duplicate DataFrame index (:issue:`1201`) + - Better handle errors with non-existing objects in HDFStore (:issue:`1254`) + - Don't copy int64 array data in DatetimeIndex when copy=False (:issue:`1624`) + - Fix resampling of conforming periods quarterly to annual (:issue:`1622`) + - Don't lose index name on resampling (:issue:`1631`) + - Support python-dateutil version 2.1 (:issue:`1637`) + - Fix broken scatter_matrix axis labeling, esp. with time series (:issue:`1625`) + - Fix cases where extra keywords weren't being passed on to matplotlib from + Series.plot (:issue:`1636`) + - Fix BusinessMonthBegin logic for dates before 1st bday of month (:issue:`1645`) + - Ensure string alias converted (valid in DatetimeIndex.get_loc) in + DataFrame.xs / __getitem__ (:issue:`1644`) + - Fix use of string alias timestamps with tz-aware time series (:issue:`1647`) + - Fix Series.max/min and Series.describe on len-0 series (:issue:`1650`) + - Handle None values in dict passed to concat (:issue:`1649`) + - Fix Series.interpolate with method='values' and DatetimeIndex (:issue:`1646`) + - Fix IndexError in left merges on a DataFrame with 0-length (:issue:`1628`) + - Fix DataFrame column width display with UTF-8 encoded characters (:issue:`1620`) + - Handle case in pandas.io.data.get_data_yahoo where Yahoo! returns duplicate + dates for most recent business day + - Avoid downsampling when plotting mixed frequencies on the same subplot (:issue:`1619`) + - Fix read_csv bug when reading a single line (:issue:`1553`) + - Fix bug in C code causing monthly periods prior to December 1969 to be off (:issue:`1570`) + + + +pandas 0.8.0 +============ + +**Release date:** 6/29/2012 + +**New features** + + - New unified DatetimeIndex class for nanosecond-level timestamp data + - New Timestamp datetime.datetime subclass with easy time zone conversions, + and support for nanoseconds + - New PeriodIndex class for timespans, calendar logic, and Period scalar object + - High performance resampling of timestamp and period data. New `resample` + method of all pandas data structures + - New frequency names plus shortcut string aliases like '15h', '1h30min' + - Time series string indexing shorthand (:issue:`222`) + - Add week, dayofyear array and other timestamp array-valued field accessor + functions to DatetimeIndex + - Add GroupBy.prod optimized aggregation function and 'prod' fast time series + conversion method (:issue:`1018`) + - Implement robust frequency inference function and `inferred_freq` attribute + on DatetimeIndex (:issue:`391`) + - New ``tz_convert`` and ``tz_localize`` methods in Series / DataFrame + - Convert DatetimeIndexes to UTC if time zones are different in join/setops + (:issue:`864`) + - Add limit argument for forward/backward filling to reindex, fillna, + etc. (:issue:`825` and others) + - Add support for indexes (dates or otherwise) with duplicates and common + sense indexing/selection functionality + - Series/DataFrame.update methods, in-place variant of combine_first (:issue:`961`) + - Add ``match`` function to API (:issue:`502`) + - Add Cython-optimized first, last, min, max, prod functions to GroupBy (:issue:`994`, + :issue:`1043`) + - Dates can be split across multiple columns (:issue:`1227`, :issue:`1186`) + - Add experimental support for converting pandas DataFrame to R data.frame + via rpy2 (:issue:`350`, :issue:`1212`) + - Can pass list of (name, function) to GroupBy.aggregate to get aggregates in + a particular order (:issue:`610`) + - Can pass dicts with lists of functions or dicts to GroupBy aggregate to do + much more flexible multiple function aggregation (:issue:`642`, :issue:`610`) + - New ordered_merge functions for merging DataFrames with ordered + data. Also supports group-wise merging for panel data (:issue:`813`) + - Add keys() method to DataFrame + - Add flexible replace method for replacing potentially values to Series and + DataFrame (:issue:`929`, :issue:`1241`) + - Add 'kde' plot kind for Series/DataFrame.plot (:issue:`1059`) + - More flexible multiple function aggregation with GroupBy + - Add pct_change function to Series/DataFrame + - Add option to interpolate by Index values in Series.interpolate (:issue:`1206`) + - Add ``max_colwidth`` option for DataFrame, defaulting to 50 + - Conversion of DataFrame through rpy2 to R data.frame (:issue:`1282`, ) + - Add keys() method on DataFrame (:issue:`1240`) + - Add new ``match`` function to API (similar to R) (:issue:`502`) + - Add dayfirst option to parsers (:issue:`854`) + - Add ``method`` argument to ``align`` method for forward/backward fillin + (:issue:`216`) + - Add Panel.transpose method for rearranging axes (:issue:`695`) + - Add new ``cut`` function (patterned after R) for discretizing data into + equal range-length bins or arbitrary breaks of your choosing (:issue:`415`) + - Add new ``qcut`` for cutting with quantiles (:issue:`1378`) + - Add ``value_counts`` top level array method (:issue:`1392`) + - Added Andrews curves plot tupe (:issue:`1325`) + - Add lag plot (:issue:`1440`) + - Add autocorrelation_plot (:issue:`1425`) + - Add support for tox and Travis CI (:issue:`1382`) + - Add support for Categorical use in GroupBy (:issue:`292`) + - Add ``any`` and ``all`` methods to DataFrame (:issue:`1416`) + - Add ``secondary_y`` option to Series.plot + - Add experimental ``lreshape`` function for reshaping wide to long + +**Improvements to existing features** + + - Switch to klib/khash-based hash tables in Index classes for better + performance in many cases and lower memory footprint + - Shipping some functions from scipy.stats to reduce dependency, + e.g. Series.describe and DataFrame.describe (:issue:`1092`) + - Can create MultiIndex by passing list of lists or list of arrays to Series, + DataFrame constructor, etc. (:issue:`831`) + - Can pass arrays in addition to column names to DataFrame.set_index (:issue:`402`) + - Improve the speed of "square" reindexing of homogeneous DataFrame objects + by significant margin (:issue:`836`) + - Handle more dtypes when passed MaskedArrays in DataFrame constructor (:issue:`406`) + - Improved performance of join operations on integer keys (:issue:`682`) + - Can pass multiple columns to GroupBy object, e.g. grouped[[col1, col2]] to + only aggregate a subset of the value columns (:issue:`383`) + - Add histogram / kde plot options for scatter_matrix diagonals (:issue:`1237`) + - Add inplace option to Series/DataFrame.rename and sort_index, + DataFrame.drop_duplicates (:issue:`805`, :issue:`207`) + - More helpful error message when nothing passed to Series.reindex (:issue:`1267`) + - Can mix array and scalars as dict-value inputs to DataFrame ctor (:issue:`1329`) + - Use DataFrame columns' name for legend title in plots + - Preserve frequency in DatetimeIndex when possible in boolean indexing + operations + - Promote datetime.date values in data alignment operations (:issue:`867`) + - Add ``order`` method to Index classes (:issue:`1028`) + - Avoid hash table creation in large monotonic hash table indexes (:issue:`1160`) + - Store time zones in HDFStore (:issue:`1232`) + - Enable storage of sparse data structures in HDFStore (:issue:`85`) + - Enable Series.asof to work with arrays of timestamp inputs + - Cython implementation of DataFrame.corr speeds up by > 100x (:issue:`1349`, :issue:`1354`) + - Exclude "nuisance" columns automatically in GroupBy.transform (:issue:`1364`) + - Support functions-as-strings in GroupBy.transform (:issue:`1362`) + - Use index name as xlabel/ylabel in plots (:issue:`1415`) + - Add ``convert_dtype`` option to Series.apply to be able to leave data as + dtype=object (:issue:`1414`) + - Can specify all index level names in concat (:issue:`1419`) + - Add ``dialect`` keyword to parsers for quoting conventions (:issue:`1363`) + - Enable DataFrame[bool_DataFrame] += value (:issue:`1366`) + - Add ``retries`` argument to ``get_data_yahoo`` to try to prevent Yahoo! API + 404s (:issue:`826`) + - Improve performance of reshaping by using O(N) categorical sorting + - Series names will be used for index of DataFrame if no index passed (:issue:`1494`) + - Header argument in DataFrame.to_csv can accept a list of column names to + use instead of the object's columns (:issue:`921`) + - Add ``raise_conflict`` argument to DataFrame.update (:issue:`1526`) + - Support file-like objects in ExcelFile (:issue:`1529`) + +**API Changes** + + - Rename `pandas._tseries` to `pandas.lib` + - Rename Factor to Categorical and add improvements. Numerous Categorical bug + fixes + - Frequency name overhaul, WEEKDAY/EOM and rules with @ + deprecated. get_legacy_offset_name backwards compatibility function added + - Raise ValueError in DataFrame.__nonzero__, so "if df" no longer works + (:issue:`1073`) + - Change BDay (business day) to not normalize dates by default (:issue:`506`) + - Remove deprecated DataMatrix name + - Default merge suffixes for overlap now have underscores instead of periods + to facilitate tab completion, etc. (:issue:`1239`) + - Deprecation of offset, time_rule timeRule parameters throughout codebase + - Series.append and DataFrame.append no longer check for duplicate indexes + by default, add verify_integrity parameter (:issue:`1394`) + - Refactor Factor class, old constructor moved to Factor.from_array + - Modified internals of MultiIndex to use less memory (no longer represented + as array of tuples) internally, speed up construction time and many methods + which construct intermediate hierarchical indexes (:issue:`1467`) + +**Bug fixes** + + - Fix OverflowError from storing pre-1970 dates in HDFStore by switching to + datetime64 (:issue:`179`) + - Fix logical error with February leap year end in YearEnd offset + - Series([False, nan]) was getting casted to float64 (:issue:`1074`) + - Fix binary operations between boolean Series and object Series with + booleans and NAs (:issue:`1074`, :issue:`1079`) + - Couldn't assign whole array to column in mixed-type DataFrame via .ix + (:issue:`1142`) + - Fix label slicing issues with float index values (:issue:`1167`) + - Fix segfault caused by empty groups passed to groupby (:issue:`1048`) + - Fix occasionally misbehaved reindexing in the presence of NaN labels (:issue:`522`) + - Fix imprecise logic causing weird Series results from .apply (:issue:`1183`) + - Unstack multiple levels in one shot, avoiding empty columns in some + cases. Fix pivot table bug (:issue:`1181`) + - Fix formatting of MultiIndex on Series/DataFrame when index name coincides + with label (:issue:`1217`) + - Handle Excel 2003 #N/A as NaN from xlrd (:issue:`1213`, :issue:`1225`) + - Fix timestamp locale-related deserialization issues with HDFStore by moving + to datetime64 representation (:issue:`1081`, :issue:`809`) + - Fix DataFrame.duplicated/drop_duplicates NA value handling (:issue:`557`) + - Actually raise exceptions in fast reducer (:issue:`1243`) + - Fix various timezone-handling bugs from 0.7.3 (:issue:`969`) + - GroupBy on level=0 discarded index name (:issue:`1313`) + - Better error message with unmergeable DataFrames (:issue:`1307`) + - Series.__repr__ alignment fix with unicode index values (:issue:`1279`) + - Better error message if nothing passed to reindex (:issue:`1267`) + - More robust NA handling in DataFrame.drop_duplicates (:issue:`557`) + - Resolve locale-based and pre-epoch HDF5 timestamp deserialization issues + (:issue:`973`, :issue:`1081`, :issue:`179`) + - Implement Series.repeat (:issue:`1229`) + - Fix indexing with namedtuple and other tuple subclasses (:issue:`1026`) + - Fix float64 slicing bug (:issue:`1167`) + - Parsing integers with commas (:issue:`796`) + - Fix groupby improper data type when group consists of one value (:issue:`1065`) + - Fix negative variance possibility in nanvar resulting from floating point + error (:issue:`1090`) + - Consistently set name on groupby pieces (:issue:`184`) + - Treat dict return values as Series in GroupBy.apply (:issue:`823`) + - Respect column selection for DataFrame in in GroupBy.transform (:issue:`1365`) + - Fix MultiIndex partial indexing bug (:issue:`1352`) + - Enable assignment of rows in mixed-type DataFrame via .ix (:issue:`1432`) + - Reset index mapping when grouping Series in Cython (:issue:`1423`) + - Fix outer/inner DataFrame.join with non-unique indexes (:issue:`1421`) + - Fix MultiIndex groupby bugs with empty lower levels (:issue:`1401`) + - Calling fillna with a Series will have same behavior as with dict (:issue:`1486`) + - SparseSeries reduction bug (:issue:`1375`) + - Fix unicode serialization issue in HDFStore (:issue:`1361`) + - Pass keywords to pyplot.boxplot in DataFrame.boxplot (:issue:`1493`) + - Bug fixes in MonthBegin (:issue:`1483`) + - Preserve MultiIndex names in drop (:issue:`1513`) + - Fix Panel DataFrame slice-assignment bug (:issue:`1533`) + - Don't use locals() in read_* functions (:issue:`1547`) + + + +pandas 0.7.3 +============ + +**Release date:** April 12, 2012 + +**New features / modules** + + - Support for non-unique indexes: indexing and selection, many-to-one and + many-to-many joins (:issue:`1306`) + - Added fixed-width file reader, read_fwf (:issue:`952`) + - Add group_keys argument to groupby to not add group names to MultiIndex in + result of apply (:issue:`938`) + - DataFrame can now accept non-integer label slicing (:issue:`946`). Previously + only DataFrame.ix was able to do so. + - DataFrame.apply now retains name attributes on Series objects (:issue:`983`) + - Numeric DataFrame comparisons with non-numeric values now raises proper + TypeError (:issue:`943`). Previously raise "PandasError: DataFrame constructor + not properly called!" + - Add ``kurt`` methods to Series and DataFrame (:issue:`964`) + - Can pass dict of column -> list/set NA values for text parsers (:issue:`754`) + - Allows users specified NA values in text parsers (:issue:`754`) + - Parsers checks for openpyxl dependency and raises ImportError if not found + (:issue:`1007`) + - New factory function to create HDFStore objects that can be used in a with + statement so users do not have to explicitly call HDFStore.close (:issue:`1005`) + - pivot_table is now more flexible with same parameters as groupby (:issue:`941`) + - Added stacked bar plots (:issue:`987`) + - scatter_matrix method in pandas/tools/plotting.py (:issue:`935`) + - DataFrame.boxplot returns plot results for ex-post styling (:issue:`985`) + - Short version number accessible as pandas.version.short_version (:issue:`930`) + - Additional documentation in panel.to_frame (:issue:`942`) + - More informative Series.apply docstring regarding element-wise apply + (:issue:`977`) + - Notes on rpy2 installation (:issue:`1006`) + - Add rotation and font size options to hist method (:issue:`1012`) + - Use exogenous / X variable index in result of OLS.y_predict. Add + OLS.predict method (:issue:`1027`, :issue:`1008`) + +**API Changes** + + - Calling apply on grouped Series, e.g. describe(), will no longer yield + DataFrame by default. Will have to call unstack() to get prior behavior + - NA handling in non-numeric comparisons has been tightened up (:issue:`933`, :issue:`953`) + - No longer assign dummy names key_0, key_1, etc. to groupby index (:issue:`1291`) + +**Bug fixes** + + - Fix logic error when selecting part of a row in a DataFrame with a + MultiIndex index (:issue:`1013`) + - Series comparison with Series of differing length causes crash (:issue:`1016`). + - Fix bug in indexing when selecting section of hierarchically-indexed row + (:issue:`1013`) + - DataFrame.plot(logy=True) has no effect (:issue:`1011`). + - Broken arithmetic operations between SparsePanel-Panel (:issue:`1015`) + - Unicode repr issues in MultiIndex with non-ascii characters (:issue:`1010`) + - DataFrame.lookup() returns inconsistent results if exact match not present + (:issue:`1001`) + - DataFrame arithmetic operations not treating None as NA (:issue:`992`) + - DataFrameGroupBy.apply returns incorrect result (:issue:`991`) + - Series.reshape returns incorrect result for multiple dimensions (:issue:`989`) + - Series.std and Series.var ignores ddof parameter (:issue:`934`) + - DataFrame.append loses index names (:issue:`980`) + - DataFrame.plot(kind='bar') ignores color argument (:issue:`958`) + - Inconsistent Index comparison results (:issue:`948`) + - Improper int dtype DataFrame construction from data with NaN (:issue:`846`) + - Removes default 'result' name in grouby results (:issue:`995`) + - DataFrame.from_records no longer mutate input columns (:issue:`975`) + - Use Index name when grouping by it (:issue:`1313`) + + + +pandas 0.7.2 +============ + +**Release date:** March 16, 2012 + +**New features / modules** + + - Add additional tie-breaking methods in DataFrame.rank (:issue:`874`) + - Add ascending parameter to rank in Series, DataFrame (:issue:`875`) + - Add sort_columns parameter to allow unsorted plots (:issue:`918`) + - IPython tab completion on GroupBy objects + +**API Changes** + + - Series.sum returns 0 instead of NA when called on an empty + series. Analogously for a DataFrame whose rows or columns are length 0 + (:issue:`844`) + +**Improvements to existing features** + + - Don't use groups dict in Grouper.size (:issue:`860`) + - Use khash for Series.value_counts, add raw function to algorithms.py (:issue:`861`) + - Enable column access via attributes on GroupBy (:issue:`882`) + - Enable setting existing columns (only) via attributes on DataFrame, Panel + (:issue:`883`) + - Intercept __builtin__.sum in groupby (:issue:`885`) + - Can pass dict to DataFrame.fillna to use different values per column (:issue:`661`) + - Can select multiple hierarchical groups by passing list of values in .ix + (:issue:`134`) + - Add level keyword to ``drop`` for dropping values from a level (:issue:`159`) + - Add ``coerce_float`` option on DataFrame.from_records (:issue:`893`) + - Raise exception if passed date_parser fails in ``read_csv`` + - Add ``axis`` option to DataFrame.fillna (:issue:`174`) + - Fixes to Panel to make it easier to subclass (:issue:`888`) + +**Bug fixes** + + - Fix overflow-related bugs in groupby (:issue:`850`, :issue:`851`) + - Fix unhelpful error message in parsers (:issue:`856`) + - Better err msg for failed boolean slicing of dataframe (:issue:`859`) + - Series.count cannot accept a string (level name) in the level argument (:issue:`869`) + - Group index platform int check (:issue:`870`) + - concat on axis=1 and ignore_index=True raises TypeError (:issue:`871`) + - Further unicode handling issues resolved (:issue:`795`) + - Fix failure in multiindex-based access in Panel (:issue:`880`) + - Fix DataFrame boolean slice assignment failure (:issue:`881`) + - Fix combineAdd NotImplementedError for SparseDataFrame (:issue:`887`) + - Fix DataFrame.to_html encoding and columns (:issue:`890`, :issue:`891`, :issue:`909`) + - Fix na-filling handling in mixed-type DataFrame (:issue:`910`) + - Fix to DataFrame.set_value with non-existant row/col (:issue:`911`) + - Fix malformed block in groupby when excluding nuisance columns (:issue:`916`) + - Fix inconsistant NA handling in dtype=object arrays (:issue:`925`) + - Fix missing center-of-mass computation in ewmcov (:issue:`862`) + - Don't raise exception when opening read-only HDF5 file (:issue:`847`) + - Fix possible out-of-bounds memory access in 0-length Series (:issue:`917`) + + + +pandas 0.7.1 +============ + +**Release date:** February 29, 2012 + +**New features / modules** + + - Add ``to_clipboard`` function to pandas namespace for writing objects to + the system clipboard (:issue:`774`) + - Add ``itertuples`` method to DataFrame for iterating through the rows of a + dataframe as tuples (:issue:`818`) + - Add ability to pass fill_value and method to DataFrame and Series align + method (:issue:`806`, :issue:`807`) + - Add fill_value option to reindex, align methods (:issue:`784`) + - Enable concat to produce DataFrame from Series (:issue:`787`) + - Add ``between`` method to Series (:issue:`802`) + - Add HTML representation hook to DataFrame for the IPython HTML notebook + (:issue:`773`) + - Support for reading Excel 2007 XML documents using openpyxl + +**Improvements to existing features** + + - Improve performance and memory usage of fillna on DataFrame + - Can concatenate a list of Series along axis=1 to obtain a DataFrame (:issue:`787`) + +**Bug fixes** + + - Fix memory leak when inserting large number of columns into a single + DataFrame (:issue:`790`) + - Appending length-0 DataFrame with new columns would not result in those new + columns being part of the resulting concatenated DataFrame (:issue:`782`) + - Fixed groupby corner case when passing dictionary grouper and as_index is + False (:issue:`819`) + - Fixed bug whereby bool array sometimes had object dtype (:issue:`820`) + - Fix exception thrown on np.diff (:issue:`816`) + - Fix to_records where columns are non-strings (:issue:`822`) + - Fix Index.intersection where indices have incomparable types (:issue:`811`) + - Fix ExcelFile throwing an exception for two-line file (:issue:`837`) + - Add clearer error message in csv parser (:issue:`835`) + - Fix loss of fractional seconds in HDFStore (:issue:`513`) + - Fix DataFrame join where columns have datetimes (:issue:`787`) + - Work around numpy performance issue in take (:issue:`817`) + - Improve comparison operations for NA-friendliness (:issue:`801`) + - Fix indexing operation for floating point values (:issue:`780`, :issue:`798`) + - Fix groupby case resulting in malformed dataframe (:issue:`814`) + - Fix behavior of reindex of Series dropping name (:issue:`812`) + - Improve on redudant groupby computation (:issue:`775`) + - Catch possible NA assignment to int/bool series with exception (:issue:`839`) + + + +pandas 0.7.0 +============ + +**Release date:** 2/9/2012 + +**New features / modules** + + - New ``merge`` function for efficiently performing full gamut of database / + relational-algebra operations. Refactored existing join methods to use the + new infrastructure, resulting in substantial performance gains (:issue:`220`, + :issue:`249`, :issue:`267`) + - New ``concat`` function for concatenating DataFrame or Panel objects along + an axis. Can form union or intersection of the other axes. Improves + performance of ``DataFrame.append`` (:issue:`468`, :issue:`479`, :issue:`273`) + - Handle differently-indexed output values in ``DataFrame.apply`` (:issue:`498`) + - Can pass list of dicts (e.g., a list of shallow JSON objects) to DataFrame + constructor (:issue:`526`) + - Add ``reorder_levels`` method to Series and DataFrame (:issue:`534`) + - Add dict-like ``get`` function to DataFrame and Panel (:issue:`521`) + - ``DataFrame.iterrows`` method for efficiently iterating through the rows of + a DataFrame + - Added ``DataFrame.to_panel`` with code adapted from ``LongPanel.to_long`` + - ``reindex_axis`` method added to DataFrame + - Add ``level`` option to binary arithmetic functions on ``DataFrame`` and + ``Series`` + - Add ``level`` option to the ``reindex`` and ``align`` methods on Series and + DataFrame for broadcasting values across a level (:issue:`542`, :issue:`552`, others) + - Add attribute-based item access to ``Panel`` and add IPython completion (PR + :issue:`554`) + - Add ``logy`` option to ``Series.plot`` for log-scaling on the Y axis + - Add ``index``, ``header``, and ``justify`` options to + ``DataFrame.to_string``. Add option to (:issue:`570`, :issue:`571`) + - Can pass multiple DataFrames to ``DataFrame.join`` to join on index (:issue:`115`) + - Can pass multiple Panels to ``Panel.join`` (:issue:`115`) + - Can pass multiple DataFrames to `DataFrame.append` to concatenate (stack) + and multiple Series to ``Series.append`` too + - Added ``justify`` argument to ``DataFrame.to_string`` to allow different + alignment of column headers + - Add ``sort`` option to GroupBy to allow disabling sorting of the group keys + for potential speedups (:issue:`595`) + - Can pass MaskedArray to Series constructor (:issue:`563`) + - Add Panel item access via attributes and IPython completion (:issue:`554`) + - Implement ``DataFrame.lookup``, fancy-indexing analogue for retrieving + values given a sequence of row and column labels (:issue:`338`) + - Add ``verbose`` option to ``read_csv`` and ``read_table`` to show number of + NA values inserted in non-numeric columns (:issue:`614`) + - Can pass a list of dicts or Series to ``DataFrame.append`` to concatenate + multiple rows (:issue:`464`) + - Add ``level`` argument to ``DataFrame.xs`` for selecting data from other + MultiIndex levels. Can take one or more levels with potentially a tuple of + keys for flexible retrieval of data (:issue:`371`, :issue:`629`) + - New ``crosstab`` function for easily computing frequency tables (:issue:`170`) + - Can pass a list of functions to aggregate with groupby on a DataFrame, + yielding an aggregated result with hierarchical columns (:issue:`166`) + - Add integer-indexing functions ``iget`` in Series and ``irow`` / ``iget`` + in DataFrame (:issue:`628`) + - Add new ``Series.unique`` function, significantly faster than + ``numpy.unique`` (:issue:`658`) + - Add new ``cummin`` and ``cummax`` instance methods to ``Series`` and + ``DataFrame`` (:issue:`647`) + - Add new ``value_range`` function to return min/max of a dataframe (:issue:`288`) + - Add ``drop`` parameter to ``reset_index`` method of ``DataFrame`` and added + method to ``Series`` as well (:issue:`699`) + - Add ``isin`` method to Index objects, works just like ``Series.isin`` (GH + :issue:`657`) + - Implement array interface on Panel so that ufuncs work (re: :issue:`740`) + - Add ``sort`` option to ``DataFrame.join`` (:issue:`731`) + - Improved handling of NAs (propagation) in binary operations with + dtype=object arrays (:issue:`737`) + - Add ``abs`` method to Pandas objects + - Added ``algorithms`` module to start collecting central algos + +**API Changes** + + - Label-indexing with integer indexes now raises KeyError if a label is not + found instead of falling back on location-based indexing (:issue:`700`) + - Label-based slicing via ``ix`` or ``[]`` on Series will now only work if + exact matches for the labels are found or if the index is monotonic (for + range selections) + - Label-based slicing and sequences of labels can be passed to ``[]`` on a + Series for both getting and setting (:issue:`86`) + - `[]` operator (``__getitem__`` and ``__setitem__``) will raise KeyError + with integer indexes when an index is not contained in the index. The prior + behavior would fall back on position-based indexing if a key was not found + in the index which would lead to subtle bugs. This is now consistent with + the behavior of ``.ix`` on DataFrame and friends (:issue:`328`) + - Rename ``DataFrame.delevel`` to ``DataFrame.reset_index`` and add + deprecation warning + - `Series.sort` (an in-place operation) called on a Series which is a view on + a larger array (e.g. a column in a DataFrame) will generate an Exception to + prevent accidentally modifying the data source (:issue:`316`) + - Refactor to remove deprecated ``LongPanel`` class (:issue:`552`) + - Deprecated ``Panel.to_long``, renamed to ``to_frame`` + - Deprecated ``colSpace`` argument in ``DataFrame.to_string``, renamed to + ``col_space`` + - Rename ``precision`` to ``accuracy`` in engineering float formatter (GH + :issue:`395`) + - The default delimiter for ``read_csv`` is comma rather than letting + ``csv.Sniffer`` infer it + - Rename ``col_or_columns`` argument in ``DataFrame.drop_duplicates`` (GH + :issue:`734`) + +**Improvements to existing features** + + - Better error message in DataFrame constructor when passed column labels + don't match data (:issue:`497`) + - Substantially improve performance of multi-GroupBy aggregation when a + Python function is passed, reuse ndarray object in Cython (:issue:`496`) + - Can store objects indexed by tuples and floats in HDFStore (:issue:`492`) + - Don't print length by default in Series.to_string, add `length` option (GH + :issue:`489`) + - Improve Cython code for multi-groupby to aggregate without having to sort + the data (:issue:`93`) + - Improve MultiIndex reindexing speed by storing tuples in the MultiIndex, + test for backwards unpickling compatibility + - Improve column reindexing performance by using specialized Cython take + function + - Further performance tweaking of Series.__getitem__ for standard use cases + - Avoid Index dict creation in some cases (i.e. when getting slices, etc.), + regression from prior versions + - Friendlier error message in setup.py if NumPy not installed + - Use common set of NA-handling operations (sum, mean, etc.) in Panel class + also (:issue:`536`) + - Default name assignment when calling ``reset_index`` on DataFrame with a + regular (non-hierarchical) index (:issue:`476`) + - Use Cythonized groupers when possible in Series/DataFrame stat ops with + ``level`` parameter passed (:issue:`545`) + - Ported skiplist data structure to C to speed up ``rolling_median`` by about + 5-10x in most typical use cases (:issue:`374`) + - Some performance enhancements in constructing a Panel from a dict of + DataFrame objects + - Made ``Index._get_duplicates`` a public method by removing the underscore + - Prettier printing of floats, and column spacing fix (:issue:`395`, :issue:`571`) + - Add ``bold_rows`` option to DataFrame.to_html (:issue:`586`) + - Improve the performance of ``DataFrame.sort_index`` by up to 5x or more + when sorting by multiple columns + - Substantially improve performance of DataFrame and Series constructors when + passed a nested dict or dict, respectively (:issue:`540`, :issue:`621`) + - Modified setup.py so that pip / setuptools will install dependencies (GH + :issue:`507`, various pull requests) + - Unstack called on DataFrame with non-MultiIndex will return Series (GH + :issue:`477`) + - Improve DataFrame.to_string and console formatting to be more consistent in + the number of displayed digits (:issue:`395`) + - Use bottleneck if available for performing NaN-friendly statistical + operations that it implemented (:issue:`91`) + - Monkey-patch context to traceback in ``DataFrame.apply`` to indicate which + row/column the function application failed on (:issue:`614`) + - Improved ability of read_table and read_clipboard to parse + console-formatted DataFrames (can read the row of index names, etc.) + - Can pass list of group labels (without having to convert to an ndarray + yourself) to ``groupby`` in some cases (:issue:`659`) + - Use ``kind`` argument to Series.order for selecting different sort kinds + (:issue:`668`) + - Add option to Series.to_csv to omit the index (:issue:`684`) + - Add ``delimiter`` as an alternative to ``sep`` in ``read_csv`` and other + parsing functions + - Substantially improved performance of groupby on DataFrames with many + columns by aggregating blocks of columns all at once (:issue:`745`) + - Can pass a file handle or StringIO to Series/DataFrame.to_csv (:issue:`765`) + - Can pass sequence of integers to DataFrame.irow(icol) and Series.iget, (GH + :issue:`654`) + - Prototypes for some vectorized string functions + - Add float64 hash table to solve the Series.unique problem with NAs (:issue:`714`) + - Memoize objects when reading from file to reduce memory footprint + - Can get and set a column of a DataFrame with hierarchical columns + containing "empty" ('') lower levels without passing the empty levels (PR + :issue:`768`) + +**Bug fixes** + + - Raise exception in out-of-bounds indexing of Series instead of + seg-faulting, regression from earlier releases (:issue:`495`) + - Fix error when joining DataFrames of different dtypes within the same + typeclass (e.g. float32 and float64) (:issue:`486`) + - Fix bug in Series.min/Series.max on objects like datetime.datetime (GH + :issue:`487`) + - Preserve index names in Index.union (:issue:`501`) + - Fix bug in Index joining causing subclass information (like DateRange type) + to be lost in some cases (:issue:`500`) + - Accept empty list as input to DataFrame constructor, regression from 0.6.0 + (:issue:`491`) + - Can output DataFrame and Series with ndarray objects in a dtype=object + array (:issue:`490`) + - Return empty string from Series.to_string when called on empty Series (GH + :issue:`488`) + - Fix exception passing empty list to DataFrame.from_records + - Fix Index.format bug (excluding name field) with datetimes with time info + - Fix scalar value access in Series to always return NumPy scalars, + regression from prior versions (:issue:`510`) + - Handle rows skipped at beginning of file in read_* functions (:issue:`505`) + - Handle improper dtype casting in ``set_value`` methods + - Unary '-' / __neg__ operator on DataFrame was returning integer values + - Unbox 0-dim ndarrays from certain operators like all, any in Series + - Fix handling of missing columns (was combine_first-specific) in + DataFrame.combine for general case (:issue:`529`) + - Fix type inference logic with boolean lists and arrays in DataFrame indexing + - Use centered sum of squares in R-square computation if entity_effects=True + in panel regression + - Handle all NA case in Series.{corr, cov}, was raising exception (:issue:`548`) + - Aggregating by multiple levels with ``level`` argument to DataFrame, Series + stat method, was broken (:issue:`545`) + - Fix Cython buf when converter passed to read_csv produced a numeric array + (buffer dtype mismatch when passed to Cython type inference function) (GH + :issue:`546`) + - Fix exception when setting scalar value using .ix on a DataFrame with a + MultiIndex (:issue:`551`) + - Fix outer join between two DateRanges with different offsets that returned + an invalid DateRange + - Cleanup DataFrame.from_records failure where index argument is an integer + - Fix Data.from_records failure when passed a dictionary + - Fix NA handling in {Series, DataFrame}.rank with non-floating point dtypes + - Fix bug related to integer type-checking in .ix-based indexing + - Handle non-string index name passed to DataFrame.from_records + - DataFrame.insert caused the columns name(s) field to be discarded (:issue:`527`) + - Fix erroneous in monotonic many-to-one left joins + - Fix DataFrame.to_string to remove extra column white space (:issue:`571`) + - Format floats to default to same number of digits (:issue:`395`) + - Added decorator to copy docstring from one function to another (:issue:`449`) + - Fix error in monotonic many-to-one left joins + - Fix __eq__ comparison between DateOffsets with different relativedelta + keywords passed + - Fix exception caused by parser converter returning strings (:issue:`583`) + - Fix MultiIndex formatting bug with integer names (:issue:`601`) + - Fix bug in handling of non-numeric aggregates in Series.groupby (:issue:`612`) + - Fix TypeError with tuple subclasses (e.g. namedtuple) in + DataFrame.from_records (:issue:`611`) + - Catch misreported console size when running IPython within Emacs + - Fix minor bug in pivot table margins, loss of index names and length-1 + 'All' tuple in row labels + - Add support for legacy WidePanel objects to be read from HDFStore + - Fix out-of-bounds segfault in pad_object and backfill_object methods when + either source or target array are empty + - Could not create a new column in a DataFrame from a list of tuples + - Fix bugs preventing SparseDataFrame and SparseSeries working with groupby + (:issue:`666`) + - Use sort kind in Series.sort / argsort (:issue:`668`) + - Fix DataFrame operations on non-scalar, non-pandas objects (:issue:`672`) + - Don't convert DataFrame column to integer type when passing integer to + __setitem__ (:issue:`669`) + - Fix downstream bug in pivot_table caused by integer level names in + MultiIndex (:issue:`678`) + - Fix SparseSeries.combine_first when passed a dense Series (:issue:`687`) + - Fix performance regression in HDFStore loading when DataFrame or Panel + stored in table format with datetimes + - Raise Exception in DateRange when offset with n=0 is passed (:issue:`683`) + - Fix get/set inconsistency with .ix property and integer location but + non-integer index (:issue:`707`) + - Use right dropna function for SparseSeries. Return dense Series for NA fill + value (:issue:`730`) + - Fix Index.format bug causing incorrectly string-formatted Series with + datetime indexes (:issue:`726`, :issue:`758`) + - Fix errors caused by object dtype arrays passed to ols (:issue:`759`) + - Fix error where column names lost when passing list of labels to + DataFrame.__getitem__, (:issue:`662`) + - Fix error whereby top-level week iterator overwrote week instance + - Fix circular reference causing memory leak in sparse array / series / + frame, (:issue:`663`) + - Fix integer-slicing from integers-as-floats (:issue:`670`) + - Fix zero division errors in nanops from object dtype arrays in all NA case + (:issue:`676`) + - Fix csv encoding when using unicode (:issue:`705`, :issue:`717`, :issue:`738`) + - Fix assumption that each object contains every unique block type in concat, + (:issue:`708`) + - Fix sortedness check of multiindex in to_panel (:issue:`719`, 720) + - Fix that None was not treated as NA in PyObjectHashtable + - Fix hashing dtype because of endianness confusion (:issue:`747`, :issue:`748`) + - Fix SparseSeries.dropna to return dense Series in case of NA fill value (GH + :issue:`730`) + - Use map_infer instead of np.vectorize. handle NA sentinels if converter + yields numeric array, (:issue:`753`) + - Fixes and improvements to DataFrame.rank (:issue:`742`) + - Fix catching AttributeError instead of NameError for bottleneck + - Try to cast non-MultiIndex to better dtype when calling reset_index (:issue:`726` + :issue:`440`) + - Fix #1.QNAN0' float bug on 2.6/win64 + - Allow subclasses of dicts in DataFrame constructor, with tests + - Fix problem whereby set_index destroys column multiindex (:issue:`764`) + - Hack around bug in generating DateRange from naive DateOffset (:issue:`770`) + - Fix bug in DateRange.intersection causing incorrect results with some + overlapping ranges (:issue:`771`) + +Thanks +------ +- Craig Austin +- Chris Billington +- Marius Cobzarenco +- Mario Gamboa-Cavazos +- Hans-Martin Gaudecker +- Arthur Gerigk +- Yaroslav Halchenko +- Jeff Hammerbacher +- Matt Harrison +- Andreas Hilboll +- Luc Kesters +- Adam Klein +- Gregg Lind +- Solomon Negusse +- Wouter Overmeire +- Christian Prinoth +- Jeff Reback +- Sam Reckoner +- Craig Reeson +- Jan Schulz +- Skipper Seabold +- Ted Square +- Graham Taylor +- Aman Thakral +- Chris Uga +- Dieter Vandenbussche +- Texas P. +- Pinxing Ye +- ... and everyone I forgot + + + +pandas 0.6.1 +============ + +**Release date:** 12/13/2011 + +**API Changes** + + - Rename `names` argument in DataFrame.from_records to `columns`. Add + deprecation warning + - Boolean get/set operations on Series with boolean Series will reindex + instead of requiring that the indexes be exactly equal (:issue:`429`) + +**New features / modules** + + - Can pass Series to DataFrame.append with ignore_index=True for appending a + single row (:issue:`430`) + - Add Spearman and Kendall correlation options to Series.corr and + DataFrame.corr (:issue:`428`) + - Add new `get_value` and `set_value` methods to Series, DataFrame, and Panel + to very low-overhead access to scalar elements. df.get_value(row, column) + is about 3x faster than df[column][row] by handling fewer cases (:issue:`437`, + :issue:`438`). Add similar methods to sparse data structures for compatibility + - Add Qt table widget to sandbox (:issue:`435`) + - DataFrame.align can accept Series arguments, add axis keyword (:issue:`461`) + - Implement new SparseList and SparseArray data structures. SparseSeries now + derives from SparseArray (:issue:`463`) + - max_columns / max_rows options in set_printoptions (:issue:`453`) + - Implement Series.rank and DataFrame.rank, fast versions of + scipy.stats.rankdata (:issue:`428`) + - Implement DataFrame.from_items alternate constructor (:issue:`444`) + - DataFrame.convert_objects method for inferring better dtypes for object + columns (:issue:`302`) + - Add rolling_corr_pairwise function for computing Panel of correlation + matrices (:issue:`189`) + - Add `margins` option to `pivot_table` for computing subgroup aggregates (GH + :issue:`114`) + - Add `Series.from_csv` function (:issue:`482`) + +**Improvements to existing features** + + - Improve memory usage of `DataFrame.describe` (do not copy data + unnecessarily) (:issue:`425`) + - Use same formatting function for outputting floating point Series to console + as in DataFrame (:issue:`420`) + - DataFrame.delevel will try to infer better dtype for new columns (:issue:`440`) + - Exclude non-numeric types in DataFrame.{corr, cov} + - Override Index.astype to enable dtype casting (:issue:`412`) + - Use same float formatting function for Series.__repr__ (:issue:`420`) + - Use available console width to output DataFrame columns (:issue:`453`) + - Accept ndarrays when setting items in Panel (:issue:`452`) + - Infer console width when printing __repr__ of DataFrame to console (PR + :issue:`453`) + - Optimize scalar value lookups in the general case by 25% or more in Series + and DataFrame + - Can pass DataFrame/DataFrame and DataFrame/Series to + rolling_corr/rolling_cov (:issue:`462`) + - Fix performance regression in cross-sectional count in DataFrame, affecting + DataFrame.dropna speed + - Column deletion in DataFrame copies no data (computes views on blocks) (GH + :issue:`158`) + - MultiIndex.get_level_values can take the level name + - More helpful error message when DataFrame.plot fails on one of the columns + (:issue:`478`) + - Improve performance of DataFrame.{index, columns} attribute lookup + +**Bug fixes** + + - Fix O(K^2) memory leak caused by inserting many columns without + consolidating, had been present since 0.4.0 (:issue:`467`) + - `DataFrame.count` should return Series with zero instead of NA with length-0 + axis (:issue:`423`) + - Fix Yahoo! Finance API usage in pandas.io.data (:issue:`419`, :issue:`427`) + - Fix upstream bug causing failure in Series.align with empty Series (:issue:`434`) + - Function passed to DataFrame.apply can return a list, as long as it's the + right length. Regression from 0.4 (:issue:`432`) + - Don't "accidentally" upcast scalar values when indexing using .ix (:issue:`431`) + - Fix groupby exception raised with as_index=False and single column selected + (:issue:`421`) + - Implement DateOffset.__ne__ causing downstream bug (:issue:`456`) + - Fix __doc__-related issue when converting py -> pyo with py2exe + - Bug fix in left join Cython code with duplicate monotonic labels + - Fix bug when unstacking multiple levels described in :issue:`451` + - Exclude NA values in dtype=object arrays, regression from 0.5.0 (:issue:`469`) + - Use Cython map_infer function in DataFrame.applymap to properly infer + output type, handle tuple return values and other things that were breaking + (:issue:`465`) + - Handle floating point index values in HDFStore (:issue:`454`) + - Fixed stale column reference bug (cached Series object) caused by type + change / item deletion in DataFrame (:issue:`473`) + - Index.get_loc should always raise Exception when there are duplicates + - Handle differently-indexed Series input to DataFrame constructor (:issue:`475`) + - Omit nuisance columns in multi-groupby with Python function + - Buglet in handling of single grouping in general apply + - Handle type inference properly when passing list of lists or tuples to + DataFrame constructor (:issue:`484`) + - Preserve Index / MultiIndex names in GroupBy.apply concatenation step (GH + :issue:`481`) + +Thanks +------ +- Ralph Bean +- Luca Beltrame +- Marius Cobzarenco +- Andreas Hilboll +- Jev Kuznetsov +- Adam Lichtenstein +- Wouter Overmeire +- Fernando Perez +- Nathan Pinger +- Christian Prinoth +- Alex Reyfman +- Joon Ro +- Chang She +- Ted Square +- Chris Uga +- Dieter Vandenbussche + + + +pandas 0.6.0 +============ + +**Release date:** 11/25/2011 + +**API Changes** + + - Arithmetic methods like `sum` will attempt to sum dtype=object values by + default instead of excluding them (:issue:`382`) + +**New features / modules** + + - Add `melt` function to `pandas.core.reshape` + - Add `level` parameter to group by level in Series and DataFrame + descriptive statistics (:issue:`313`) + - Add `head` and `tail` methods to Series, analogous to to DataFrame (PR + :issue:`296`) + - Add `Series.isin` function which checks if each value is contained in a + passed sequence (:issue:`289`) + - Add `float_format` option to `Series.to_string` + - Add `skip_footer` (:issue:`291`) and `converters` (:issue:`343`) options to + `read_csv` and `read_table` + - Add proper, tested weighted least squares to standard and panel OLS (GH + :issue:`303`) + - Add `drop_duplicates` and `duplicated` functions for removing duplicate + DataFrame rows and checking for duplicate rows, respectively (:issue:`319`) + - Implement logical (boolean) operators ``&``, ``|``, ``^`` on DataFrame + (:issue:`347`) + - Add `Series.mad`, mean absolute deviation, matching DataFrame + - Add `QuarterEnd` DateOffset (:issue:`321`) + - Add matrix multiplication function `dot` to DataFrame (:issue:`65`) + - Add `orient` option to `Panel.from_dict` to ease creation of mixed-type + Panels (:issue:`359`, :issue:`301`) + - Add `DataFrame.from_dict` with similar `orient` option + - Can now pass list of tuples or list of lists to `DataFrame.from_records` + for fast conversion to DataFrame (:issue:`357`) + - Can pass multiple levels to groupby, e.g. `df.groupby(level=[0, 1])` (GH + :issue:`103`) + - Can sort by multiple columns in `DataFrame.sort_index` (:issue:`92`, :issue:`362`) + - Add fast `get_value` and `put_value` methods to DataFrame and + micro-performance tweaks (:issue:`360`) + - Add `cov` instance methods to Series and DataFrame (:issue:`194`, :issue:`362`) + - Add bar plot option to `DataFrame.plot` (:issue:`348`) + - Add `idxmin` and `idxmax` functions to Series and DataFrame for computing + index labels achieving maximum and minimum values (:issue:`286`) + - Add `read_clipboard` function for parsing DataFrame from OS clipboard, + should work across platforms (:issue:`300`) + - Add `nunique` function to Series for counting unique elements (:issue:`297`) + - DataFrame constructor will use Series name if no columns passed (:issue:`373`) + - Support regular expressions and longer delimiters in read_table/read_csv, + but does not handle quoted strings yet (:issue:`364`) + - Add `DataFrame.to_html` for formatting DataFrame to HTML (:issue:`387`) + - MaskedArray can be passed to DataFrame constructor and masked values will be + converted to NaN (:issue:`396`) + - Add `DataFrame.boxplot` function (:issue:`368`, others) + - Can pass extra args, kwds to DataFrame.apply (:issue:`376`) + +**Improvements to existing features** + + - Raise more helpful exception if date parsing fails in DateRange (:issue:`298`) + - Vastly improved performance of GroupBy on axes with a MultiIndex (:issue:`299`) + - Print level names in hierarchical index in Series repr (:issue:`305`) + - Return DataFrame when performing GroupBy on selected column and + as_index=False (:issue:`308`) + - Can pass vector to `on` argument in `DataFrame.join` (:issue:`312`) + - Don't show Series name if it's None in the repr, also omit length for short + Series (:issue:`317`) + - Show legend by default in `DataFrame.plot`, add `legend` boolean flag (GH + :issue:`324`) + - Significantly improved performance of `Series.order`, which also makes + np.unique called on a Series faster (:issue:`327`) + - Faster cythonized count by level in Series and DataFrame (:issue:`341`) + - Raise exception if dateutil 2.0 installed on Python 2.x runtime (:issue:`346`) + - Significant GroupBy performance enhancement with multiple keys with many + "empty" combinations + - New Cython vectorized function `map_infer` speeds up `Series.apply` and + `Series.map` significantly when passed elementwise Python function, + motivated by :issue:`355` + - Cythonized `cache_readonly`, resulting in substantial micro-performance + enhancements throughout the codebase (:issue:`361`) + - Special Cython matrix iterator for applying arbitrary reduction operations + with 3-5x better performance than `np.apply_along_axis` (:issue:`309`) + - Add `raw` option to `DataFrame.apply` for getting better performance when + the passed function only requires an ndarray (:issue:`309`) + - Improve performance of `MultiIndex.from_tuples` + - Can pass multiple levels to `stack` and `unstack` (:issue:`370`) + - Can pass multiple values columns to `pivot_table` (:issue:`381`) + - Can call `DataFrame.delevel` with standard Index with name set (:issue:`393`) + - Use Series name in GroupBy for result index (:issue:`363`) + - Refactor Series/DataFrame stat methods to use common set of NaN-friendly + function + - Handle NumPy scalar integers at C level in Cython conversion routines + +**Bug fixes** + + - Fix bug in `DataFrame.to_csv` when writing a DataFrame with an index + name (:issue:`290`) + - DataFrame should clear its Series caches on consolidation, was causing + "stale" Series to be returned in some corner cases (:issue:`304`) + - DataFrame constructor failed if a column had a list of tuples (:issue:`293`) + - Ensure that `Series.apply` always returns a Series and implement + `Series.round` (:issue:`314`) + - Support boolean columns in Cythonized groupby functions (:issue:`315`) + - `DataFrame.describe` should not fail if there are no numeric columns, + instead return categorical describe (:issue:`323`) + - Fixed bug which could cause columns to be printed in wrong order in + `DataFrame.to_string` if specific list of columns passed (:issue:`325`) + - Fix legend plotting failure if DataFrame columns are integers (:issue:`326`) + - Shift start date back by one month for Yahoo! Finance API in pandas.io.data + (:issue:`329`) + - Fix `DataFrame.join` failure on unconsolidated inputs (:issue:`331`) + - DataFrame.min/max will no longer fail on mixed-type DataFrame (:issue:`337`) + - Fix `read_csv` / `read_table` failure when passing list to index_col that is + not in ascending order (:issue:`349`) + - Fix failure passing Int64Index to Index.union when both are monotonic + - Fix error when passing SparseSeries to (dense) DataFrame constructor + - Added missing bang at top of setup.py (:issue:`352`) + - Change `is_monotonic` on MultiIndex so it properly compares the tuples + - Fix MultiIndex outer join logic (:issue:`351`) + - Set index name attribute with single-key groupby (:issue:`358`) + - Bug fix in reflexive binary addition in Series and DataFrame for + non-commutative operations (like string concatenation) (:issue:`353`) + - setupegg.py will invoke Cython (:issue:`192`) + - Fix block consolidation bug after inserting column into MultiIndex (:issue:`366`) + - Fix bug in join operations between Index and Int64Index (:issue:`367`) + - Handle min_periods=0 case in moving window functions (:issue:`365`) + - Fixed corner cases in DataFrame.apply/pivot with empty DataFrame (:issue:`378`) + - Fixed repr exception when Series name is a tuple + - Always return DateRange from `asfreq` (:issue:`390`) + - Pass level names to `swaplavel` (:issue:`379`) + - Don't lose index names in `MultiIndex.droplevel` (:issue:`394`) + - Infer more proper return type in `DataFrame.apply` when no columns or rows + depending on whether the passed function is a reduction (:issue:`389`) + - Always return NA/NaN from Series.min/max and DataFrame.min/max when all of a + row/column/values are NA (:issue:`384`) + - Enable partial setting with .ix / advanced indexing (:issue:`397`) + - Handle mixed-type DataFrames correctly in unstack, do not lose type + information (:issue:`403`) + - Fix integer name formatting bug in Index.format and in Series.__repr__ + - Handle label types other than string passed to groupby (:issue:`405`) + - Fix bug in .ix-based indexing with partial retrieval when a label is not + contained in a level + - Index name was not being pickled (:issue:`408`) + - Level name should be passed to result index in GroupBy.apply (:issue:`416`) + +Thanks +------ + +- Craig Austin +- Marius Cobzarenco +- Joel Cross +- Jeff Hammerbacher +- Adam Klein +- Thomas Kluyver +- Jev Kuznetsov +- Kieran O'Mahony +- Wouter Overmeire +- Nathan Pinger +- Christian Prinoth +- Skipper Seabold +- Chang She +- Ted Square +- Aman Thakral +- Chris Uga +- Dieter Vandenbussche +- carljv +- rsamson + + + +pandas 0.5.0 +============ + +**Release date:** 10/24/2011 + +This release of pandas includes a number of API changes (see below) and cleanup +of deprecated APIs from pre-0.4.0 releases. There are also bug fixes, new +features, numerous significant performance enhancements, and includes a new +IPython completer hook to enable tab completion of DataFrame columns accesses +as attributes (a new feature). + +In addition to the changes listed here from 0.4.3 to 0.5.0, the minor releases +0.4.1, 0.4.2, and 0.4.3 brought some significant new functionality and +performance improvements that are worth taking a look at. + +Thanks to all for bug reports, contributed patches and generally providing +feedback on the library. + +**API Changes** + + - `read_table`, `read_csv`, and `ExcelFile.parse` default arguments for + `index_col` is now None. To use one or more of the columns as the resulting + DataFrame's index, these must be explicitly specified now + - Parsing functions like `read_csv` no longer parse dates by default (GH + :issue:`225`) + - Removed `weights` option in panel regression which was not doing anything + principled (:issue:`155`) + - Changed `buffer` argument name in `Series.to_string` to `buf` + - `Series.to_string` and `DataFrame.to_string` now return strings by default + instead of printing to sys.stdout + - Deprecated `nanRep` argument in various `to_string` and `to_csv` functions + in favor of `na_rep`. Will be removed in 0.6 (:issue:`275`) + - Renamed `delimiter` to `sep` in `DataFrame.from_csv` for consistency + - Changed order of `Series.clip` arguments to match those of `numpy.clip` and + added (unimplemented) `out` argument so `numpy.clip` can be called on a + Series (:issue:`272`) + - Series functions renamed (and thus deprecated) in 0.4 series have been + removed: + + * `asOf`, use `asof` + * `toDict`, use `to_dict` + * `toString`, use `to_string` + * `toCSV`, use `to_csv` + * `merge`, use `map` + * `applymap`, use `apply` + * `combineFirst`, use `combine_first` + * `_firstTimeWithValue` use `first_valid_index` + * `_lastTimeWithValue` use `last_valid_index` + + - DataFrame functions renamed / deprecated in 0.4 series have been removed: + + * `asMatrix` method, use `as_matrix` or `values` attribute + * `combineFirst`, use `combine_first` + * `getXS`, use `xs` + * `merge`, use `join` + * `fromRecords`, use `from_records` + * `fromcsv`, use `from_csv` + * `toRecords`, use `to_records` + * `toDict`, use `to_dict` + * `toString`, use `to_string` + * `toCSV`, use `to_csv` + * `_firstTimeWithValue` use `first_valid_index` + * `_lastTimeWithValue` use `last_valid_index` + * `toDataMatrix` is no longer needed + * `rows()` method, use `index` attribute + * `cols()` method, use `columns` attribute + * `dropEmptyRows()`, use `dropna(how='all')` + * `dropIncompleteRows()`, use `dropna()` + * `tapply(f)`, use `apply(f, axis=1)` + * `tgroupby(keyfunc, aggfunc)`, use `groupby` with `axis=1` + + - Other outstanding deprecations have been removed: + + * `indexField` argument in `DataFrame.from_records` + * `missingAtEnd` argument in `Series.order`. Use `na_last` instead + * `Series.fromValue` classmethod, use regular `Series` constructor instead + * Functions `parseCSV`, `parseText`, and `parseExcel` methods in + `pandas.io.parsers` have been removed + * `Index.asOfDate` function + * `Panel.getMinorXS` (use `minor_xs`) and `Panel.getMajorXS` (use + `major_xs`) + * `Panel.toWide`, use `Panel.to_wide` instead + +**New features / modules** + + - Added `DataFrame.align` method with standard join options + - Added `parse_dates` option to `read_csv` and `read_table` methods to + optionally try to parse dates in the index columns + - Add `nrows`, `chunksize`, and `iterator` arguments to `read_csv` and + `read_table`. The last two return a new `TextParser` class capable of + lazily iterating through chunks of a flat file (:issue:`242`) + - Added ability to join on multiple columns in `DataFrame.join` (:issue:`214`) + - Added private `_get_duplicates` function to `Index` for identifying + duplicate values more easily + - Added column attribute access to DataFrame, e.g. df.A equivalent to df['A'] + if 'A' is a column in the DataFrame (:issue:`213`) + - Added IPython tab completion hook for DataFrame columns. (:issue:`233`, :issue:`230`) + - Implement `Series.describe` for Series containing objects (:issue:`241`) + - Add inner join option to `DataFrame.join` when joining on key(s) (:issue:`248`) + - Can select set of DataFrame columns by passing a list to `__getitem__` (GH + :issue:`253`) + - Can use & and | to intersection / union Index objects, respectively (GH + :issue:`261`) + - Added `pivot_table` convenience function to pandas namespace (:issue:`234`) + - Implemented `Panel.rename_axis` function (:issue:`243`) + - DataFrame will show index level names in console output + - Implemented `Panel.take` + - Add `set_eng_float_format` function for setting alternate DataFrame + floating point string formatting + - Add convenience `set_index` function for creating a DataFrame index from + its existing columns + +**Improvements to existing features** + + - Major performance improvements in file parsing functions `read_csv` and + `read_table` + - Added Cython function for converting tuples to ndarray very fast. Speeds up + many MultiIndex-related operations + - File parsing functions like `read_csv` and `read_table` will explicitly + check if a parsed index has duplicates and raise a more helpful exception + rather than deferring the check until later + - Refactored merging / joining code into a tidy class and disabled unnecessary + computations in the float/object case, thus getting about 10% better + performance (:issue:`211`) + - Improved speed of `DataFrame.xs` on mixed-type DataFrame objects by about + 5x, regression from 0.3.0 (:issue:`215`) + - With new `DataFrame.align` method, speeding up binary operations between + differently-indexed DataFrame objects by 10-25%. + - Significantly sped up conversion of nested dict into DataFrame (:issue:`212`) + - Can pass hierarchical index level name to `groupby` instead of the level + number if desired (:issue:`223`) + - Add support for different delimiters in `DataFrame.to_csv` (:issue:`244`) + - Add more helpful error message when importing pandas post-installation from + the source directory (:issue:`250`) + - Significantly speed up DataFrame `__repr__` and `count` on large mixed-type + DataFrame objects + - Better handling of pyx file dependencies in Cython module build (:issue:`271`) + +**Bug fixes** + + - `read_csv` / `read_table` fixes + + - Be less aggressive about converting float->int in cases of floating point + representations of integers like 1.0, 2.0, etc. + - "True"/"False" will not get correctly converted to boolean + - Index name attribute will get set when specifying an index column + - Passing column names should force `header=None` (:issue:`257`) + - Don't modify passed column names when `index_col` is not None + (:issue:`258`) + - Can sniff CSV separator in zip file (since seek is not supported, was + failing before) + + - Worked around matplotlib "bug" in which series[:, np.newaxis] fails. Should + be reported upstream to matplotlib (:issue:`224`) + - DataFrame.iteritems was not returning Series with the name attribute + set. Also neither was DataFrame._series + - Can store datetime.date objects in HDFStore (:issue:`231`) + - Index and Series names are now stored in HDFStore + - Fixed problem in which data would get upcasted to object dtype in + GroupBy.apply operations (:issue:`237`) + - Fixed outer join bug with empty DataFrame (:issue:`238`) + - Can create empty Panel (:issue:`239`) + - Fix join on single key when passing list with 1 entry (:issue:`246`) + - Don't raise Exception on plotting DataFrame with an all-NA column (:issue:`251`, + :issue:`254`) + - Bug min/max errors when called on integer DataFrames (:issue:`241`) + - `DataFrame.iteritems` and `DataFrame._series` not assigning name attribute + - Panel.__repr__ raised exception on length-0 major/minor axes + - `DataFrame.join` on key with empty DataFrame produced incorrect columns + - Implemented `MultiIndex.diff` (:issue:`260`) + - `Int64Index.take` and `MultiIndex.take` lost name field, fix downstream + issue :issue:`262` + - Can pass list of tuples to `Series` (:issue:`270`) + - Can pass level name to `DataFrame.stack` + - Support set operations between MultiIndex and Index + - Fix many corner cases in MultiIndex set operations + - Fix MultiIndex-handling bug with GroupBy.apply when returned groups are not + indexed the same + - Fix corner case bugs in DataFrame.apply + - Setting DataFrame index did not cause Series cache to get cleared + - Various int32 -> int64 platform-specific issues + - Don't be too aggressive converting to integer when parsing file with + MultiIndex (:issue:`285`) + - Fix bug when slicing Series with negative indices before beginning + +Thanks +------ + +- Thomas Kluyver +- Daniel Fortunov +- Aman Thakral +- Luca Beltrame +- Wouter Overmeire + + + +pandas 0.4.3 +============ + +Release notes +------------- + +**Release date:** 10/9/2011 + +This is largely a bugfix release from 0.4.2 but also includes a handful of new +and enhanced features. Also, pandas can now be installed and used on Python 3 +(thanks Thomas Kluyver!). + +**New features / modules** + + - Python 3 support using 2to3 (:issue:`200`, Thomas Kluyver) + - Add `name` attribute to `Series` and added relevant logic and tests. Name + now prints as part of `Series.__repr__` + - Add `name` attribute to standard Index so that stacking / unstacking does + not discard names and so that indexed DataFrame objects can be reliably + round-tripped to flat files, pickle, HDF5, etc. + - Add `isnull` and `notnull` as instance methods on Series (:issue:`209`, :issue:`203`) + +**Improvements to existing features** + + - Skip xlrd-related unit tests if not installed + - `Index.append` and `MultiIndex.append` can accept a list of Index objects to + concatenate together + - Altered binary operations on differently-indexed SparseSeries objects to use + the integer-based (dense) alignment logic which is faster with a larger + number of blocks (:issue:`205`) + - Refactored `Series.__repr__` to be a bit more clean and consistent + +**API Changes** + + - `Series.describe` and `DataFrame.describe` now bring the 25% and 75% + quartiles instead of the 10% and 90% deciles. The other outputs have not + changed + - `Series.toString` will print deprecation warning, has been de-camelCased to + `to_string` + +**Bug fixes** + + - Fix broken interaction between `Index` and `Int64Index` when calling + intersection. Implement `Int64Index.intersection` + - `MultiIndex.sortlevel` discarded the level names (:issue:`202`) + - Fix bugs in groupby, join, and append due to improper concatenation of + `MultiIndex` objects (:issue:`201`) + - Fix regression from 0.4.1, `isnull` and `notnull` ceased to work on other + kinds of Python scalar objects like `datetime.datetime` + - Raise more helpful exception when attempting to write empty DataFrame or + LongPanel to `HDFStore` (:issue:`204`) + - Use stdlib csv module to properly escape strings with commas in + `DataFrame.to_csv` (:issue:`206`, Thomas Kluyver) + - Fix Python ndarray access in Cython code for sparse blocked index integrity + check + - Fix bug writing Series to CSV in Python 3 (:issue:`209`) + - Miscellaneous Python 3 bugfixes + +Thanks +------ + + - Thomas Kluyver + - rsamson + + + +pandas 0.4.2 +============ + +Release notes +------------- + +**Release date:** 10/3/2011 + +This is a performance optimization release with several bug fixes. The new +Int64Index and new merging / joining Cython code and related Python +infrastructure are the main new additions + +**New features / modules** + + - Added fast `Int64Index` type with specialized join, union, + intersection. Will result in significant performance enhancements for + int64-based time series (e.g. using NumPy's datetime64 one day) and also + faster operations on DataFrame objects storing record array-like data. + - Refactored `Index` classes to have a `join` method and associated data + alignment routines throughout the codebase to be able to leverage optimized + joining / merging routines. + - Added `Series.align` method for aligning two series with choice of join + method + - Wrote faster Cython data alignment / merging routines resulting in + substantial speed increases + - Added `is_monotonic` property to `Index` classes with associated Cython + code to evaluate the monotonicity of the `Index` values + - Add method `get_level_values` to `MultiIndex` + - Implemented shallow copy of `BlockManager` object in `DataFrame` internals + +**Improvements to existing features** + + - Improved performance of `isnull` and `notnull`, a regression from v0.3.0 + (:issue:`187`) + - Wrote templating / code generation script to auto-generate Cython code for + various functions which need to be available for the 4 major data types + used in pandas (float64, bool, object, int64) + - Refactored code related to `DataFrame.join` so that intermediate aligned + copies of the data in each `DataFrame` argument do not need to be + created. Substantial performance increases result (:issue:`176`) + - Substantially improved performance of generic `Index.intersection` and + `Index.union` + - Improved performance of `DateRange.union` with overlapping ranges and + non-cacheable offsets (like Minute). Implemented analogous fast + `DateRange.intersection` for overlapping ranges. + - Implemented `BlockManager.take` resulting in significantly faster `take` + performance on mixed-type `DataFrame` objects (:issue:`104`) + - Improved performance of `Series.sort_index` + - Significant groupby performance enhancement: removed unnecessary integrity + checks in DataFrame internals that were slowing down slicing operations to + retrieve groups + - Added informative Exception when passing dict to DataFrame groupby + aggregation with axis != 0 + +**API Changes** + +None + +**Bug fixes** + + - Fixed minor unhandled exception in Cython code implementing fast groupby + aggregation operations + - Fixed bug in unstacking code manifesting with more than 3 hierarchical + levels + - Throw exception when step specified in label-based slice (:issue:`185`) + - Fix isnull to correctly work with np.float32. Fix upstream bug described in + :issue:`182` + - Finish implementation of as_index=False in groupby for DataFrame + aggregation (:issue:`181`) + - Raise SkipTest for pre-epoch HDFStore failure. Real fix will be sorted out + via datetime64 dtype + +Thanks +------ + +- Uri Laserson +- Scott Sinclair + + + +pandas 0.4.1 +============ + +Release notes +------------- + +**Release date:** 9/25/2011 + +This is primarily a bug fix release but includes some new features and +improvements + +**New features / modules** + + - Added new `DataFrame` methods `get_dtype_counts` and property `dtypes` + - Setting of values using ``.ix`` indexing attribute in mixed-type DataFrame + objects has been implemented (fixes :issue:`135`) + - `read_csv` can read multiple columns into a `MultiIndex`. DataFrame's + `to_csv` method will properly write out a `MultiIndex` which can be read + back (:issue:`151`, thanks to Skipper Seabold) + - Wrote fast time series merging / joining methods in Cython. Will be + integrated later into DataFrame.join and related functions + - Added `ignore_index` option to `DataFrame.append` for combining unindexed + records stored in a DataFrame + +**Improvements to existing features** + + - Some speed enhancements with internal Index type-checking function + - `DataFrame.rename` has a new `copy` parameter which can rename a DataFrame + in place + - Enable unstacking by level name (:issue:`142`) + - Enable sortlevel to work by level name (:issue:`141`) + - `read_csv` can automatically "sniff" other kinds of delimiters using + `csv.Sniffer` (:issue:`146`) + - Improved speed of unit test suite by about 40% + - Exception will not be raised calling `HDFStore.remove` on non-existent node + with where clause + - Optimized `_ensure_index` function resulting in performance savings in + type-checking Index objects + +**API Changes** + +None + +**Bug fixes** + + - Fixed DataFrame constructor bug causing downstream problems (e.g. .copy() + failing) when passing a Series as the values along with a column name and + index + - Fixed single-key groupby on DataFrame with as_index=False (:issue:`160`) + - `Series.shift` was failing on integer Series (:issue:`154`) + - `unstack` methods were producing incorrect output in the case of duplicate + hierarchical labels. An exception will now be raised (:issue:`147`) + - Calling `count` with level argument caused reduceat failure or segfault in + earlier NumPy (:issue:`169`) + - Fixed `DataFrame.corrwith` to automatically exclude non-numeric data (GH + :issue:`144`) + - Unicode handling bug fixes in `DataFrame.to_string` (:issue:`138`) + - Excluding OLS degenerate unit test case that was causing platform specific + failure (:issue:`149`) + - Skip blosc-dependent unit tests for PyTables < 2.2 (:issue:`137`) + - Calling `copy` on `DateRange` did not copy over attributes to the new object + (:issue:`168`) + - Fix bug in `HDFStore` in which Panel data could be appended to a Table with + different item order, thus resulting in an incorrect result read back + +Thanks +------ +- Yaroslav Halchenko +- Jeff Reback +- Skipper Seabold +- Dan Lovell +- Nick Pentreath + + + +pandas 0.4.0 +============ + +Release notes +------------- + +**Release date:** 9/12/2011 + +**New features / modules** + + - `pandas.core.sparse` module: "Sparse" (mostly-NA, or some other fill value) + versions of `Series`, `DataFrame`, and `Panel`. For low-density data, this + will result in significant performance boosts, and smaller memory + footprint. Added `to_sparse` methods to `Series`, `DataFrame`, and + `Panel`. See online documentation for more on these + - Fancy indexing operator on Series / DataFrame, e.g. via .ix operator. Both + getting and setting of values is supported; however, setting values will only + currently work on homogeneously-typed DataFrame objects. Things like: + + * series.ix[[d1, d2, d3]] + * frame.ix[5:10, ['C', 'B', 'A']], frame.ix[5:10, 'A':'C'] + * frame.ix[date1:date2] + + - Significantly enhanced `groupby` functionality + + * Can groupby multiple keys, e.g. df.groupby(['key1', 'key2']). Iteration with + multiple groupings products a flattened tuple + * "Nuisance" columns (non-aggregatable) will automatically be excluded from + DataFrame aggregation operations + * Added automatic "dispatching to Series / DataFrame methods to more easily + invoke methods on groups. e.g. s.groupby(crit).std() will work even though + `std` is not implemented on the `GroupBy` class + + - Hierarchical / multi-level indexing + + * New the `MultiIndex` class. Integrated `MultiIndex` into `Series` and + `DataFrame` fancy indexing, slicing, __getitem__ and __setitem, + reindexing, etc. Added `level` keyword argument to `groupby` to enable + grouping by a level of a `MultiIndex` + + - New data reshaping functions: `stack` and `unstack` on DataFrame and Series + + * Integrate with MultiIndex to enable sophisticated reshaping of data + + - `Index` objects (labels for axes) are now capable of holding tuples + - `Series.describe`, `DataFrame.describe`: produces an R-like table of summary + statistics about each data column + - `DataFrame.quantile`, `Series.quantile` for computing sample quantiles of data + across requested axis + - Added general `DataFrame.dropna` method to replace `dropIncompleteRows` and + `dropEmptyRows`, deprecated those. + - `Series` arithmetic methods with optional fill_value for missing data, + e.g. a.add(b, fill_value=0). If a location is missing for both it will still + be missing in the result though. + - fill_value option has been added to `DataFrame`.{add, mul, sub, div} methods + similar to `Series` + - Boolean indexing with `DataFrame` objects: data[data > 0.1] = 0.1 or + data[data> other] = 1. + - `pytz` / tzinfo support in `DateRange` + + * `tz_localize`, `tz_normalize`, and `tz_validate` methods added + + - Added `ExcelFile` class to `pandas.io.parsers` for parsing multiple sheets out + of a single Excel 2003 document + - `GroupBy` aggregations can now optionally *broadcast*, e.g. produce an object + of the same size with the aggregated value propagated + - Added `select` function in all data structures: reindex axis based on + arbitrary criterion (function returning boolean value), + e.g. frame.select(lambda x: 'foo' in x, axis=1) + - `DataFrame.consolidate` method, API function relating to redesigned internals + - `DataFrame.insert` method for inserting column at a specified location rather + than the default __setitem__ behavior (which puts it at the end) + - `HDFStore` class in `pandas.io.pytables` has been largely rewritten using + patches from Jeff Reback from others. It now supports mixed-type `DataFrame` + and `Series` data and can store `Panel` objects. It also has the option to + query `DataFrame` and `Panel` data. Loading data from legacy `HDFStore` + files is supported explicitly in the code + - Added `set_printoptions` method to modify appearance of DataFrame tabular + output + - `rolling_quantile` functions; a moving version of `Series.quantile` / + `DataFrame.quantile` + - Generic `rolling_apply` moving window function + - New `drop` method added to `Series`, `DataFrame`, etc. which can drop a set of + labels from an axis, producing a new object + - `reindex` methods now sport a `copy` option so that data is not forced to be + copied then the resulting object is indexed the same + - Added `sort_index` methods to Series and Panel. Renamed `DataFrame.sort` + to `sort_index`. Leaving `DataFrame.sort` for now. + - Added ``skipna`` option to statistical instance methods on all the data + structures + - `pandas.io.data` module providing a consistent interface for reading time + series data from several different sources + +**Improvements to existing features** + + * The 2-dimensional `DataFrame` and `DataMatrix` classes have been extensively + redesigned internally into a single class `DataFrame`, preserving where + possible their optimal performance characteristics. This should reduce + confusion from users about which class to use. + + * Note that under the hood there is a new essentially "lazy evaluation" + scheme within respect to adding columns to DataFrame. During some + operations, like-typed blocks will be "consolidated" but not before. + + * `DataFrame` accessing columns repeatedly is now significantly faster than + `DataMatrix` used to be in 0.3.0 due to an internal Series caching mechanism + (which are all views on the underlying data) + * Column ordering for mixed type data is now completely consistent in + `DataFrame`. In prior releases, there was inconsistent column ordering in + `DataMatrix` + * Improved console / string formatting of DataMatrix with negative numbers + * Improved tabular data parsing functions, `read_table` and `read_csv`: + + * Added `skiprows` and `na_values` arguments to `pandas.io.parsers` functions + for more flexible IO + * `parseCSV` / `read_csv` functions and others in `pandas.io.parsers` now can + take a list of custom NA values, and also a list of rows to skip + + * Can slice `DataFrame` and get a view of the data (when homogeneously typed), + e.g. frame.xs(idx, copy=False) or frame.ix[idx] + * Many speed optimizations throughout `Series` and `DataFrame` + * Eager evaluation of groups when calling ``groupby`` functions, so if there is + an exception with the grouping function it will raised immediately versus + sometime later on when the groups are needed + * `datetools.WeekOfMonth` offset can be parameterized with `n` different than 1 + or -1. + * Statistical methods on DataFrame like `mean`, `std`, `var`, `skew` will now + ignore non-numerical data. Before a not very useful error message was + generated. A flag `numeric_only` has been added to `DataFrame.sum` and + `DataFrame.count` to enable this behavior in those methods if so desired + (disabled by default) + * `DataFrame.pivot` generalized to enable pivoting multiple columns into a + `DataFrame` with hierarchical columns + * `DataFrame` constructor can accept structured / record arrays + * `Panel` constructor can accept a dict of DataFrame-like objects. Do not + need to use `from_dict` anymore (`from_dict` is there to stay, though). + +**API Changes** + + * The `DataMatrix` variable now refers to `DataFrame`, will be removed within + two releases + * `WidePanel` is now known as `Panel`. The `WidePanel` variable in the pandas + namespace now refers to the renamed `Panel` class + * `LongPanel` and `Panel` / `WidePanel` now no longer have a common + subclass. `LongPanel` is now a subclass of `DataFrame` having a number of + additional methods and a hierarchical index instead of the old + `LongPanelIndex` object, which has been removed. Legacy `LongPanel` pickles + may not load properly + * Cython is now required to build `pandas` from a development branch. This was + done to avoid continuing to check in cythonized C files into source + control. Builds from released source distributions will not require Cython + * Cython code has been moved up to a top level `pandas/src` directory. Cython + extension modules have been renamed and promoted from the `lib` subpackage to + the top level, i.e. + + * `pandas.lib.tseries` -> `pandas._tseries` + * `pandas.lib.sparse` -> `pandas._sparse` + + * `DataFrame` pickling format has changed. Backwards compatibility for legacy + pickles is provided, but it's recommended to consider PyTables-based + `HDFStore` for storing data with a longer expected shelf life + * A `copy` argument has been added to the `DataFrame` constructor to avoid + unnecessary copying of data. Data is no longer copied by default when passed + into the constructor + * Handling of boolean dtype in `DataFrame` has been improved to support storage + of boolean data with NA / NaN values. Before it was being converted to float64 + so this should not (in theory) cause API breakage + * To optimize performance, Index objects now only check that their labels are + unique when uniqueness matters (i.e. when someone goes to perform a + lookup). This is a potentially dangerous tradeoff, but will lead to much + better performance in many places (like groupby). + * Boolean indexing using Series must now have the same indices (labels) + * Backwards compatibility support for begin/end/nPeriods keyword arguments in + DateRange class has been removed + * More intuitive / shorter filling aliases `ffill` (for `pad`) and `bfill` (for + `backfill`) have been added to the functions that use them: `reindex`, + `asfreq`, `fillna`. + * `pandas.core.mixins` code moved to `pandas.core.generic` + * `buffer` keyword arguments (e.g. `DataFrame.toString`) renamed to `buf` to + avoid using Python built-in name + * `DataFrame.rows()` removed (use `DataFrame.index`) + * Added deprecation warning to `DataFrame.cols()`, to be removed in next release + * `DataFrame` deprecations and de-camelCasing: `merge`, `asMatrix`, + `toDataMatrix`, `_firstTimeWithValue`, `_lastTimeWithValue`, `toRecords`, + `fromRecords`, `tgroupby`, `toString` + * `pandas.io.parsers` method deprecations + + * `parseCSV` is now `read_csv` and keyword arguments have been de-camelCased + * `parseText` is now `read_table` + * `parseExcel` is replaced by the `ExcelFile` class and its `parse` method + + * `fillMethod` arguments (deprecated in prior release) removed, should be + replaced with `method` + * `Series.fill`, `DataFrame.fill`, and `Panel.fill` removed, use `fillna` + instead + * `groupby` functions now exclude NA / NaN values from the list of groups. This + matches R behavior with NAs in factors e.g. with the `tapply` function + * Removed `parseText`, `parseCSV` and `parseExcel` from pandas namespace + * `Series.combineFunc` renamed to `Series.combine` and made a bit more general + with a `fill_value` keyword argument defaulting to NaN + * Removed `pandas.core.pytools` module. Code has been moved to + `pandas.core.common` + * Tacked on `groupName` attribute for groups in GroupBy renamed to `name` + * Panel/LongPanel `dims` attribute renamed to `shape` to be more conformant + * Slicing a `Series` returns a view now + * More Series deprecations / renaming: `toCSV` to `to_csv`, `asOf` to `asof`, + `merge` to `map`, `applymap` to `apply`, `toDict` to `to_dict`, + `combineFirst` to `combine_first`. Will print `FutureWarning`. + * `DataFrame.to_csv` does not write an "index" column label by default + anymore since the output file can be read back without it. However, there + is a new ``index_label`` argument. So you can do ``index_label='index'`` to + emulate the old behavior + * `datetools.Week` argument renamed from `dayOfWeek` to `weekday` + * `timeRule` argument in `shift` has been deprecated in favor of using the + `offset` argument for everything. So you can still pass a time rule string + to `offset` + * Added optional `encoding` argument to `read_csv`, `read_table`, `to_csv`, + `from_csv` to handle unicode in python 2.x + +**Bug fixes** + + * Column ordering in `pandas.io.parsers.parseCSV` will match CSV in the presence + of mixed-type data + * Fixed handling of Excel 2003 dates in `pandas.io.parsers` + * `DateRange` caching was happening with high resolution `DateOffset` objects, + e.g. `DateOffset(seconds=1)`. This has been fixed + * Fixed __truediv__ issue in `DataFrame` + * Fixed `DataFrame.toCSV` bug preventing IO round trips in some cases + * Fixed bug in `Series.plot` causing matplotlib to barf in exceptional cases + * Disabled `Index` objects from being hashable, like ndarrays + * Added `__ne__` implementation to `Index` so that operations like ts[ts != idx] + will work + * Added `__ne__` implementation to `DataFrame` + * Bug / unintuitive result when calling `fillna` on unordered labels + * Bug calling `sum` on boolean DataFrame + * Bug fix when creating a DataFrame from a dict with scalar values + * Series.{sum, mean, std, ...} now return NA/NaN when the whole Series is NA + * NumPy 1.4 through 1.6 compatibility fixes + * Fixed bug in bias correction in `rolling_cov`, was affecting `rolling_corr` + too + * R-square value was incorrect in the presence of fixed and time effects in + the `PanelOLS` classes + * `HDFStore` can handle duplicates in table format, will take + +Thanks +------ + - Joon Ro + - Michael Pennington + - Chris Uga + - Chris Withers + - Jeff Reback + - Ted Square + - Craig Austin + - William Ferreira + - Daniel Fortunov + - Tony Roberts + - Martin Felder + - John Marino + - Tim McNamara + - Justin Berka + - Dieter Vandenbussche + - Shane Conway + - Skipper Seabold + - Chris Jordan-Squire + +pandas 0.3.0 +============ + +Release notes +------------- + +**Release date:** February 20, 2011 + +**New features / modules** + + - `corrwith` function to compute column- or row-wise correlations between two + DataFrame objects + - Can boolean-index DataFrame objects, e.g. df[df > 2] = 2, px[px > last_px] = 0 + - Added comparison magic methods (__lt__, __gt__, etc.) + - Flexible explicit arithmetic methods (add, mul, sub, div, etc.) + - Added `reindex_like` method + - Added `reindex_like` method to WidePanel + - Convenience functions for accessing SQL-like databases in `pandas.io.sql` + module + - Added (still experimental) HDFStore class for storing pandas data + structures using HDF5 / PyTables in `pandas.io.pytables` module + - Added WeekOfMonth date offset + - `pandas.rpy` (experimental) module created, provide some interfacing / + conversion between rpy2 and pandas + +**Improvements** + + - Unit test coverage: 100% line coverage of core data structures + - Speed enhancement to rolling_{median, max, min} + - Column ordering between DataFrame and DataMatrix is now consistent: before + DataFrame would not respect column order + - Improved {Series, DataFrame}.plot methods to be more flexible (can pass + matplotlib Axis arguments, plot DataFrame columns in multiple subplots, + etc.) + +**API Changes** + + - Exponentially-weighted moment functions in `pandas.stats.moments` have a + more consistent API and accept a min_periods argument like their regular + moving counterparts. + - **fillMethod** argument in Series, DataFrame changed to **method**, + `FutureWarning` added. + - **fill** method in Series, DataFrame/DataMatrix, WidePanel renamed to + **fillna**, `FutureWarning` added to **fill** + - Renamed **DataFrame.getXS** to **xs**, `FutureWarning` added + - Removed **cap** and **floor** functions from DataFrame, renamed to + **clip_upper** and **clip_lower** for consistency with NumPy + +**Bug fixes** + + - Fixed bug in IndexableSkiplist Cython code that was breaking rolling_max + function + - Numerous numpy.int64-related indexing fixes + - Several NumPy 1.4.0 NaN-handling fixes + - Bug fixes to pandas.io.parsers.parseCSV + - Fixed `DateRange` caching issue with unusual date offsets + - Fixed bug in `DateRange.union` + - Fixed corner case in `IndexableSkiplist` implementation diff --git a/doc/source/v0.10.0.txt b/doc/source/v0.10.0.txt index 51075a61bec4d..d0c0ecc148239 100644 --- a/doc/source/v0.10.0.txt +++ b/doc/source/v0.10.0.txt @@ -159,7 +159,7 @@ Convenience methods ``ffill`` and ``bfill`` have been added: s s.apply(f) -- New API functions for working with pandas options (GH2097_): +- New API functions for working with pandas options (:issue:`2097`): - ``get_option`` / ``set_option`` - get/set the value of an option. Partial names are accepted. - ``reset_option`` - reset one or more options to @@ -174,7 +174,7 @@ Convenience methods ``ffill`` and ``bfill`` have been added: get_option("display.max_rows") -- to_string() methods now always return unicode strings (GH2224_). +- to_string() methods now always return unicode strings (:issue:`2224`). New features ~~~~~~~~~~~~ @@ -297,22 +297,22 @@ Updated PyTables Support - performance improvments on table writing - support for arbitrarily indexed dimensions -- ``SparseSeries`` now has a ``density`` property (GH2384_) +- ``SparseSeries`` now has a ``density`` property (:issue:`2384`) - enable ``Series.str.strip/lstrip/rstrip`` methods to take an input argument - to strip arbitrary characters (GH2411_) + to strip arbitrary characters (:issue:`2411`) - implement ``value_vars`` in ``melt`` to limit values to certain columns - and add ``melt`` to pandas namespace (GH2412_) + and add ``melt`` to pandas namespace (:issue:`2412`) **Bug Fixes** -- added ``Term`` method of specifying where conditions (GH1996_). +- added ``Term`` method of specifying where conditions (:issue:`1996`). - ``del store['df']`` now call ``store.remove('df')`` for store deletion - deleting of consecutive rows is much faster than before - ``min_itemsize`` parameter can be specified in table creation to force a minimum size for indexing columns (the previous implementation would set the column size based on the first append) - indexing support via ``create_table_index`` (requires PyTables >= 2.3) - (GH698_). + (:issue:`698`). - appending on a store would fail if the table was not first created via ``put`` - fixed issue with missing attributes after loading a pickled dataframe (GH2431) - minor change to select and remove: require a table ONLY if where is also @@ -350,16 +350,7 @@ Adding experimental support for Panel4D and factory functions to create n-dimens -See the `full release notes -<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker +See the :ref:`full release notes +<release>` or issue tracker on GitHub for a complete list. -.. _GH698: https://github.com/pydata/pandas/issues/698 -.. _GH1996: https://github.com/pydata/pandas/issues/1996 -.. _GH2316: https://github.com/pydata/pandas/issues/2316 -.. _GH2097: https://github.com/pydata/pandas/issues/2097 -.. _GH2224: https://github.com/pydata/pandas/issues/2224 -.. _GH2431: https://github.com/pydata/pandas/issues/2431 -.. _GH2412: https://github.com/pydata/pandas/issues/2412 -.. _GH2411: https://github.com/pydata/pandas/issues/2411 -.. _GH2384: https://github.com/pydata/pandas/issues/2384 diff --git a/doc/source/v0.10.1.txt b/doc/source/v0.10.1.txt index dafa4300af0e3..0d92e359c2a4a 100644 --- a/doc/source/v0.10.1.txt +++ b/doc/source/v0.10.1.txt @@ -15,14 +15,14 @@ API changes - Functions taking an ``inplace`` option return the calling object as before. A deprecation message has been added -- Groupby aggregations Max/Min no longer exclude non-numeric data (GH2700_) +- Groupby aggregations Max/Min no longer exclude non-numeric data (:issue:`2700`) - Resampling an empty DataFrame now returns an empty DataFrame instead of - raising an exception (GH2640_) + raising an exception (:issue:`2640`) - The file reader will now raise an exception when NA values are found in an explicitly specified integer column instead of converting the column to float - (GH2631_) + (:issue:`2631`) - DatetimeIndex.unique now returns a DatetimeIndex with the same name and -- timezone instead of an array (GH2563_) +- timezone instead of an array (:issue:`2563`) New features ~~~~~~~~~~~~ @@ -164,76 +164,49 @@ combined result, by using ``where`` on a selector table. - ``Select`` now supports passing ``start`` and ``stop`` to provide selection space limiting in selection. -- Greatly improved ISO8601 (e.g., yyyy-mm-dd) date parsing for file parsers (GH2698_) +- Greatly improved ISO8601 (e.g., yyyy-mm-dd) date parsing for file parsers (:issue:`2698`) - Allow ``DataFrame.merge`` to handle combinatorial sizes too large for 64-bit - integer (GH2690_) -- Series now has unary negation (-series) and inversion (~series) operators (GH2686_) -- DataFrame.plot now includes a ``logx`` parameter to change the x-axis to log scale (GH2327_) -- Series arithmetic operators can now handle constant and ndarray input (GH2574_) -- ExcelFile now takes a ``kind`` argument to specify the file type (GH2613_) -- A faster implementation for Series.str methods (GH2602_) + integer (:issue:`2690`) +- Series now has unary negation (-series) and inversion (~series) operators (:issue:`2686`) +- DataFrame.plot now includes a ``logx`` parameter to change the x-axis to log scale (:issue:`2327`) +- Series arithmetic operators can now handle constant and ndarray input (:issue:`2574`) +- ExcelFile now takes a ``kind`` argument to specify the file type (:issue:`2613`) +- A faster implementation for Series.str methods (:issue:`2602`) **Bug Fixes** - ``HDFStore`` tables can now store ``float32`` types correctly (cannot be mixed with ``float64`` however) -- Fixed Google Analytics prefix when specifying request segment (GH2713_). +- Fixed Google Analytics prefix when specifying request segment (:issue:`2713`). - Function to reset Google Analytics token store so users can recover from - improperly setup client secrets (GH2687_). -- Fixed groupby bug resulting in segfault when passing in MultiIndex (GH2706_) + improperly setup client secrets (:issue:`2687`). +- Fixed groupby bug resulting in segfault when passing in MultiIndex (:issue:`2706`) - Fixed bug where passing a Series with datetime64 values into `to_datetime` - results in bogus output values (GH2699_) + results in bogus output values (:issue:`2699`) - Fixed bug in ``pattern in HDFStore`` expressions when pattern is not a valid - regex (GH2694_) -- Fixed performance issues while aggregating boolean data (GH2692_) + regex (:issue:`2694`) +- Fixed performance issues while aggregating boolean data (:issue:`2692`) - When given a boolean mask key and a Series of new values, Series __setitem__ - will now align the incoming values with the original Series (GH2686_) + will now align the incoming values with the original Series (:issue:`2686`) - Fixed MemoryError caused by performing counting sort on sorting MultiIndex - levels with a very large number of combinatorial values (GH2684_) + levels with a very large number of combinatorial values (:issue:`2684`) - Fixed bug that causes plotting to fail when the index is a DatetimeIndex with - a fixed-offset timezone (GH2683_) + a fixed-offset timezone (:issue:`2683`) - Corrected businessday subtraction logic when the offset is more than 5 bdays - and the starting date is on a weekend (GH2680_) + and the starting date is on a weekend (:issue:`2680`) - Fixed C file parser behavior when the file has more columns than data - (GH2668_) + (:issue:`2668`) - Fixed file reader bug that misaligned columns with data in the presence of an implicit column and a specified `usecols` value - DataFrames with numerical or datetime indices are now sorted prior to - plotting (GH2609_) + plotting (:issue:`2609`) - Fixed DataFrame.from_records error when passed columns, index, but empty - records (GH2633_) -- Several bug fixed for Series operations when dtype is datetime64 (GH2689_, - GH2629_, GH2626_) + records (:issue:`2633`) +- Several bug fixed for Series operations when dtype is datetime64 (:issue:`2689`, + :issue:`2629`, :issue:`2626`) -See the `full release notes -<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker +See the :ref:`full release notes +<release>` or issue tracker on GitHub for a complete list. -.. _GH2706: https://github.com/pydata/pandas/issues/2706 -.. _GH2700: https://github.com/pydata/pandas/issues/2700 -.. _GH2699: https://github.com/pydata/pandas/issues/2699 -.. _GH2698: https://github.com/pydata/pandas/issues/2698 -.. _GH2694: https://github.com/pydata/pandas/issues/2694 -.. _GH2692: https://github.com/pydata/pandas/issues/2692 -.. _GH2690: https://github.com/pydata/pandas/issues/2690 -.. _GH2713: https://github.com/pydata/pandas/issues/2713 -.. _GH2689: https://github.com/pydata/pandas/issues/2689 -.. _GH2686: https://github.com/pydata/pandas/issues/2686 -.. _GH2684: https://github.com/pydata/pandas/issues/2684 -.. _GH2683: https://github.com/pydata/pandas/issues/2683 -.. _GH2680: https://github.com/pydata/pandas/issues/2680 -.. _GH2668: https://github.com/pydata/pandas/issues/2668 -.. _GH2640: https://github.com/pydata/pandas/issues/2640 -.. _GH2609: https://github.com/pydata/pandas/issues/2609 -.. _GH2327: https://github.com/pydata/pandas/issues/2327 -.. _GH2574: https://github.com/pydata/pandas/issues/2574 -.. _GH2609: https://github.com/pydata/pandas/issues/2609 -.. _GH2631: https://github.com/pydata/pandas/issues/2631 -.. _GH2633: https://github.com/pydata/pandas/issues/2633 -.. _GH2629: https://github.com/pydata/pandas/issues/2629 -.. _GH2626: https://github.com/pydata/pandas/issues/2626 -.. _GH2613: https://github.com/pydata/pandas/issues/2613 -.. _GH2602: https://github.com/pydata/pandas/issues/2602 -.. _GH2687: https://github.com/pydata/pandas/issues/2687 -.. _GH2563: https://github.com/pydata/pandas/issues/2563 diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt index b6b35fddab974..6b7fac0fc12dc 100644 --- a/doc/source/v0.11.0.txt +++ b/doc/source/v0.11.0.txt @@ -129,7 +129,7 @@ Dtype Gotchas Starting in 0.11.0, construction of DataFrame/Series will use default dtypes of ``int64`` and ``float64``, *regardless of platform*. This is not an apparent change from earlier versions of pandas. If you specify -dtypes, they *WILL* be respected, however (GH2837_) +dtypes, they *WILL* be respected, however (:issue:`2837`) The following will all result in ``int64`` dtypes @@ -176,7 +176,7 @@ Datetimes Conversion Datetime64[ns] columns in a DataFrame (or a Series) allow the use of ``np.nan`` to indicate a nan value, in addition to the traditional ``NaT``, or not-a-time. This allows convenient nan setting in a generic way. Furthermore ``datetime64[ns]`` columns are created by default, when passed datetimelike objects (*this change was introduced in 0.10.1*) -(GH2809_, GH2810_) +(:issue:`2809`, :issue:`2810`) .. ipython:: python @@ -210,7 +210,7 @@ API changes ~~~~~~~~~~~ - Added to_series() method to indicies, to facilitate the creation of indexers - (GH3275_) + (:issue:`3275`) - ``HDFStore`` @@ -221,7 +221,7 @@ API changes Enhancements ~~~~~~~~~~~~ - - Improved performance of df.to_csv() by up to 10x in some cases. (GH3059_) + - Improved performance of df.to_csv() by up to 10x in some cases. (:issue:`3059`) - Numexpr is now a :ref:`Recommended Dependencies <install.recommended_dependencies>`, to accelerate certain types of numerical and boolean operations @@ -248,11 +248,11 @@ Enhancements - provide dotted attribute access to ``get`` from stores, e.g. ``store.df == store['df']`` - new keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` are - provided to support iteration on ``select`` and ``select_as_multiple`` (GH3076_) + provided to support iteration on ``select`` and ``select_as_multiple`` (:issue:`3076`) - - You can now select timestamps from an *unordered* timeseries similarly to an *ordered* timeseries (GH2437_) + - You can now select timestamps from an *unordered* timeseries similarly to an *ordered* timeseries (:issue:`2437`) - - You can now select with a string from a DataFrame with a datelike index, in a similar way to a Series (GH3070_) + - You can now select with a string from a DataFrame with a datelike index, in a similar way to a Series (:issue:`3070`) .. ipython:: python @@ -291,59 +291,36 @@ Enhancements ``above_below``. This allows the user to specify if they would like to only return forward looking data for options near the current stock price. This just obtains the data from Options.get_near_stock_price - instead of Options.get_xxx_data() (GH2758_). + instead of Options.get_xxx_data() (:issue:`2758`). - Cursor coordinate information is now displayed in time-series plots. - added option `display.max_seq_items` to control the number of - elements printed per sequence pprinting it. (GH2979_) + elements printed per sequence pprinting it. (:issue:`2979`) - added option `display.chop_threshold` to control display of small numerical - values. (GH2739_) + values. (:issue:`2739`) - added option `display.max_info_rows` to prevent verbose_info from being - calculated for frames above 1M rows (configurable). (GH2807_, GH2918_) + calculated for frames above 1M rows (configurable). (:issue:`2807`, :issue:`2918`) - value_counts() now accepts a "normalize" argument, for normalized - histograms. (GH2710_). + histograms. (:issue:`2710`). - DataFrame.from_records now accepts not only dicts but any instance of the collections.Mapping ABC. - added option `display.mpl_style` providing a sleeker visual style - for plots. Based on https://gist.github.com/huyng/816622 (GH3075_). + for plots. Based on https://gist.github.com/huyng/816622 (:issue:`3075`). - Treat boolean values as integers (values 1 and 0) for numeric - operations. (GH2641_) + operations. (:issue:`2641`) - to_html() now accepts an optional "escape" argument to control reserved HTML character escaping (enabled by default) and escapes ``&``, in addition - to ``<`` and ``>``. (GH2919_) + to ``<`` and ``>``. (:issue:`2919`) -See the `full release notes -<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker +See the :ref:`full release notes +<release>` or issue tracker on GitHub for a complete list. -.. _GH2437: https://github.com/pydata/pandas/issues/2437 -.. _GH2809: https://github.com/pydata/pandas/issues/2809 -.. _GH2810: https://github.com/pydata/pandas/issues/2810 -.. _GH2837: https://github.com/pydata/pandas/issues/2837 -.. _GH2898: https://github.com/pydata/pandas/issues/2898 -.. _GH3035: https://github.com/pydata/pandas/issues/3035 -.. _GH2978: https://github.com/pydata/pandas/issues/2978 -.. _GH2877: https://github.com/pydata/pandas/issues/2877 -.. _GH2739: https://github.com/pydata/pandas/issues/2739 -.. _GH2710: https://github.com/pydata/pandas/issues/2710 -.. _GH2806: https://github.com/pydata/pandas/issues/2806 -.. _GH2807: https://github.com/pydata/pandas/issues/2807 -.. _GH2918: https://github.com/pydata/pandas/issues/2918 -.. _GH2758: https://github.com/pydata/pandas/issues/2758 -.. _GH3275: https://github.com/pydata/pandas/issues/3275 -.. _GH2979: https://github.com/pydata/pandas/issues/2979 -.. _GH3011: https://github.com/pydata/pandas/issues/3011 -.. _GH3076: https://github.com/pydata/pandas/issues/3076 -.. _GH3059: https://github.com/pydata/pandas/issues/3059 -.. _GH3070: https://github.com/pydata/pandas/issues/3070 -.. _GH3075: https://github.com/pydata/pandas/issues/3075 -.. _GH2641: https://github.com/pydata/pandas/issues/2641 -.. _GH2919: https://github.com/pydata/pandas/issues/2919 diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index 97f236166be45..76c439afc452c 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -39,7 +39,7 @@ API changes - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return - ``np.nan`` or ``np.inf`` as appropriate (GH3590_). This correct a numpy bug that treats ``integer`` + ``np.nan`` or ``np.inf`` as appropriate (:issue:`3590`). This correct a numpy bug that treats ``integer`` and ``float`` dtypes differently. .. ipython:: python @@ -53,8 +53,8 @@ API changes - Add ``squeeze`` keyword to ``groupby`` to allow reduction from DataFrame -> Series if groups are unique. This is a Regression from 0.10.1. We are reverting back to the prior behavior. This means groupby will return the - same shaped objects whether the groups are unique or not. Revert this issue (GH2893_) - with (GH3596_). + same shaped objects whether the groups are unique or not. Revert this issue (:issue:`2893`) + with (:issue:`3596`). .. ipython:: python @@ -71,7 +71,7 @@ API changes - Raise on ``iloc`` when boolean indexing with a label based indexer mask e.g. a boolean Series, even with integer labels, will raise. Since ``iloc`` - is purely positional based, the labels on the Series are not alignable (GH3631_) + is purely positional based, the labels on the Series are not alignable (:issue:`3631`) This case is rarely used, and there are plently of alternatives. This preserves the ``iloc`` API to be *purely* positional based. @@ -97,18 +97,18 @@ API changes plot something. - ``DataFrame.interpolate()`` is now deprecated. Please use - ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (GH3582_, - GH3675_, GH3676_) + ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (:issue:`3582`, + :issue:`3675`, :issue:`3676`) - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are deprecated - ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now - performs conversion by default. (GH3907_) + performs conversion by default. (:issue:`3907`) - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column - to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_) - - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_) + to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (:issue:`3679`) + - Implement ``__nonzero__`` for ``NDFrame`` objects (:issue:`3691`, :issue:`3696`) - IO api @@ -136,13 +136,13 @@ API changes read_frame(....) - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for - their first argument (GH3702_) + their first argument (:issue:`3702`) - Do not allow astypes on ``datetime64[ns]`` except to ``object``, and - ``timedelta64[ns]`` to ``object/int`` (GH3425_) + ``timedelta64[ns]`` to ``object/int`` (:issue:`3425`) - The behavior of ``datetime64`` dtypes has changed with respect to certain - so-called reduction operations (GH3726_). The following operations now + so-called reduction operations (:issue:`3726`). The following operations now raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty* ``Series`` when performed on a ``DataFrame`` similar to performing these operations on, for example, a ``DataFrame`` of ``slice`` objects: @@ -157,7 +157,7 @@ I/O Enhancements ~~~~~~~~~~~~~~~~ - ``pd.read_html()`` can now parse HTML strings, files or urls and return - DataFrames, courtesy of @cpcloud. (GH3477_, GH3605_, GH3606_, GH3616_). + DataFrames, courtesy of @cpcloud. (:issue:`3477`, :issue:`3605`, :issue:`3606`, :issue:`3616`). It works with a *single* parser backend: BeautifulSoup4 + html5lib :ref:`See the docs<io.html>` You can use ``pd.read_html()`` to read the output from ``DataFrame.to_html()`` like so @@ -174,14 +174,14 @@ I/O Enhancements ``DataFrame.to_html()`` are not inverses. - ``pd.read_html()`` no longer performs hard conversion of date strings - (GH3656_). + (:issue:`3656`). .. warning:: You may have to install an older version of BeautifulSoup4, :ref:`See the installation docs<install.optional_dependencies>` - - Added module for reading and writing Stata files: ``pandas.io.stata`` (GH1512_) + - Added module for reading and writing Stata files: ``pandas.io.stata`` (:issue:`1512`) accessable via ``read_stata`` top-level function for reading, and ``to_stata`` DataFrame method for writing, :ref:`See the docs<io.stata>` @@ -202,7 +202,7 @@ I/O Enhancements Note: The default behavior in 0.11.1 remains unchanged, but starting with 0.12, the default *to* write and read multi-index columns will be in the new - format. (GH3571_, GH1651_, GH3141_) + format. (:issue:`3571`, :issue:`1651`, :issue:`3141`) - If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will @@ -268,7 +268,7 @@ Other Enhancements - ``pd.melt()`` now accepts the optional parameters ``var_name`` and ``value_name`` to specify custom column names of the returned DataFrame. - - ``pd.set_option()`` now allows N option, value pairs (GH3667_). + - ``pd.set_option()`` now allows N option, value pairs (:issue:`3667`). Let's say that we had an option ``'a.b'`` and another option ``'b.c'``. We can set them at the same time: @@ -315,16 +315,16 @@ Other Enhancements dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False) - - Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_) + - Series and DataFrame hist methods now take a ``figsize`` argument (:issue:`3834`) - DatetimeIndexes no longer try to convert mixed-integer indexes during join - operations (GH3877_) + operations (:issue:`3877`) Bug Fixes ~~~~~~~~~ - Plotting functions now raise a ``TypeError`` before trying to plot anything - if the associated objects have have a dtype of ``object`` (GH1818_, - GH3572_, GH3911_, GH3912_), but they will try to convert object arrays to + if the associated objects have have a dtype of ``object`` (:issue:`1818`, + :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to numeric arrays if possible so that you can still plot, for example, an object array with floats. This happens before any drawing takes place which elimnates any spurious plots from showing up. @@ -332,7 +332,7 @@ Bug Fixes - ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is a list or tuple. - - ``Series.str`` now supports iteration (GH3638_). You can iterate over the + - ``Series.str`` now supports iteration (:issue:`3638`). You can iterate over the individual elements of each string in the ``Series``. Each iteration yields yields a ``Series`` with either a single character at each index of the original ``Series`` or ``NaN``. For example, @@ -356,90 +356,37 @@ Bug Fixes - ``HDFStore`` - - will retain index attributes (freq,tz,name) on recreation (GH3499_) + - will retain index attributes (freq,tz,name) on recreation (:issue:`3499`) - will warn with a ``AttributeConflictWarning`` if you are attempting to append an index with a different frequency than the existing, or attempting to append an index with a different name than the existing - - support datelike columns with a timezone as data_columns (GH2852_) + - support datelike columns with a timezone as data_columns (:issue:`2852`) - - Non-unique index support clarified (GH3468_). + - Non-unique index support clarified (:issue:`3468`). - - Fix assigning a new index to a duplicate index in a DataFrame would fail (GH3468_) + - Fix assigning a new index to a duplicate index in a DataFrame would fail (:issue:`3468`) - Fix construction of a DataFrame with a duplicate index - ref_locs support to allow duplicative indices across dtypes, - allows iget support to always find the index (even across dtypes) (GH2194_) + allows iget support to always find the index (even across dtypes) (:issue:`2194`) - applymap on a DataFrame with a non-unique index now works - (removed warning) (GH2786_), and fix (GH3230_) - - Fix to_csv to handle non-unique columns (GH3495_) - - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_) - and handle missing elements like unique indices (GH3561_) - - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_) - - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_) - - Allow insert/delete to non-unique columns (GH3679_) - - Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_) - - Allow insert/delete to non-unique columns (GH3679_) - - Extend ``reindex`` to correctly deal with non-unique indices (GH3679_) + (removed warning) (:issue:`2786`), and fix (:issue:`3230`) + - Fix to_csv to handle non-unique columns (:issue:`3495`) + - Duplicate indexes with getitem will return items in the correct order (:issue:`3455`, :issue:`3457`) + and handle missing elements like unique indices (:issue:`3561`) + - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (:issue:`3562`) + - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (:issue:`3602`) + - Allow insert/delete to non-unique columns (:issue:`3679`) + - Non-unique indexing with a slice via ``loc`` and friends fixed (:issue:`3659`) + - Allow insert/delete to non-unique columns (:issue:`3679`) + - Extend ``reindex`` to correctly deal with non-unique indices (:issue:`3679`) - ``DataFrame.itertuples()`` now works with frames with duplicate column - names (GH3873_) + names (:issue:`3873`) - - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_) - - ``read_html`` now correctly skips tests (GH3741_) + - ``DataFrame.from_records`` did not accept empty recarrays (:issue:`3682`) + - ``read_html`` now correctly skips tests (:issue:`3741`) - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression - in the ``to_replace`` argument wasn't working (GH3907_) + in the ``to_replace`` argument wasn't working (:issue:`3907`) -See the `full release notes -<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker +See the :ref:`full release notes +<release>` or issue tracker on GitHub for a complete list. - -.. _GH3468: https://github.com/pydata/pandas/issues/3468 -.. _GH2194: https://github.com/pydata/pandas/issues/2194 -.. _GH2786: https://github.com/pydata/pandas/issues/2786 -.. _GH3230: https://github.com/pydata/pandas/issues/3230 -.. _GH3495: https://github.com/pydata/pandas/issues/3495 -.. _GH3455: https://github.com/pydata/pandas/issues/3455 -.. _GH3457: https://github.com/pydata/pandas/issues/3457 -.. _GH3561: https://github.com/pydata/pandas/issues/3561 -.. _GH3562: https://github.com/pydata/pandas/issues/3562 -.. _GH3602: https://github.com/pydata/pandas/issues/3602 -.. _GH2437: https://github.com/pydata/pandas/issues/2437 -.. _GH2852: https://github.com/pydata/pandas/issues/2852 -.. _GH3477: https://github.com/pydata/pandas/issues/3477 -.. _GH3492: https://github.com/pydata/pandas/issues/3492 -.. _GH3499: https://github.com/pydata/pandas/issues/3499 -.. _GH2893: https://github.com/pydata/pandas/issues/2893 -.. _GH3596: https://github.com/pydata/pandas/issues/3596 -.. _GH3590: https://github.com/pydata/pandas/issues/3590 -.. _GH3435: https://github.com/pydata/pandas/issues/3435 -.. _GH1512: https://github.com/pydata/pandas/issues/1512 -.. _GH2285: https://github.com/pydata/pandas/issues/2285 -.. _GH3631: https://github.com/pydata/pandas/issues/3631 -.. _GH3571: https://github.com/pydata/pandas/issues/3571 -.. _GH1651: https://github.com/pydata/pandas/issues/1651 -.. _GH3141: https://github.com/pydata/pandas/issues/3141 -.. _GH3638: https://github.com/pydata/pandas/issues/3638 -.. _GH3616: https://github.com/pydata/pandas/issues/3616 -.. _GH3605: https://github.com/pydata/pandas/issues/3605 -.. _GH3606: https://github.com/pydata/pandas/issues/3606 -.. _GH3656: https://github.com/pydata/pandas/issues/3656 -.. _GH1818: https://github.com/pydata/pandas/issues/1818 -.. _GH3572: https://github.com/pydata/pandas/issues/3572 -.. _GH3582: https://github.com/pydata/pandas/issues/3582 -.. _GH3676: https://github.com/pydata/pandas/issues/3676 -.. _GH3675: https://github.com/pydata/pandas/issues/3675 -.. _GH3682: https://github.com/pydata/pandas/issues/3682 -.. _GH3679: https://github.com/pydata/pandas/issues/3679 -.. _GH3702: https://github.com/pydata/pandas/issues/3702 -.. _GH3691: https://github.com/pydata/pandas/issues/3691 -.. _GH3696: https://github.com/pydata/pandas/issues/3696 -.. _GH3667: https://github.com/pydata/pandas/issues/3667 -.. _GH3741: https://github.com/pydata/pandas/issues/3741 -.. _GH3726: https://github.com/pydata/pandas/issues/3726 -.. _GH3425: https://github.com/pydata/pandas/issues/3425 -.. _GH3834: https://github.com/pydata/pandas/issues/3834 -.. _GH3873: https://github.com/pydata/pandas/issues/3873 -.. _GH3877: https://github.com/pydata/pandas/issues/3877 -.. _GH3659: https://github.com/pydata/pandas/issues/3659 -.. _GH3679: https://github.com/pydata/pandas/issues/3679 -.. _GH3907: https://github.com/pydata/pandas/issues/3907 -.. _GH3911: https://github.com/pydata/pandas/issues/3911 -.. _GH3912: https://github.com/pydata/pandas/issues/3912 diff --git a/doc/source/v0.4.x.txt b/doc/source/v0.4.x.txt index 19293887089ba..249dec5fd647b 100644 --- a/doc/source/v0.4.x.txt +++ b/doc/source/v0.4.x.txt @@ -6,48 +6,48 @@ v.0.4.3 through v0.4.1 (September 25 - October 9, 2011) New Features ~~~~~~~~~~~~ -- Added Python 3 support using 2to3 (PR200_) +- Added Python 3 support using 2to3 (:issue:`200`) - :ref:`Added <dsintro.name_attribute>` ``name`` attribute to ``Series``, now prints as part of ``Series.__repr__`` - :ref:`Added <missing.isnull>` instance methods ``isnull`` and ``notnull`` to - Series (PR209_, GH203_) + Series (:issue:`209`, :issue:`203`) - :ref:`Added <basics.align>` ``Series.align`` method for aligning two series with choice of join method (ENH56_) - :ref:`Added <indexing.get_level_values>` method ``get_level_values`` to - ``MultiIndex`` (IS188_) + ``MultiIndex`` (:issue:`188`) - :ref:`Set <indexing.mixed_type_setting>` values in mixed-type - ``DataFrame`` objects via ``.ix`` indexing attribute (GH135_) + ``DataFrame`` objects via ``.ix`` indexing attribute (:issue:`135`) - Added new ``DataFrame`` :ref:`methods <basics.dtypes>` ``get_dtype_counts`` and property ``dtypes`` (ENHdc_) - Added :ref:`ignore_index <merging.ignore_index>` option to ``DataFrame.append`` to stack DataFrames (ENH1b_) - ``read_csv`` tries to :ref:`sniff <io.sniff>` delimiters using - ``csv.Sniffer`` (PR146_) + ``csv.Sniffer`` (:issue:`146`) - ``read_csv`` can :ref:`read <io.csv_multiindex>` multiple columns into a ``MultiIndex``; DataFrame's ``to_csv`` method writes out a corresponding - ``MultiIndex`` (PR151_) + ``MultiIndex`` (:issue:`151`) - ``DataFrame.rename`` has a new ``copy`` parameter to :ref:`rename <basics.rename>` a DataFrame in place (ENHed_) -- :ref:`Enable <reshaping.unstack_by_name>` unstacking by name (PR142_) -- :ref:`Enable <indexing.sortlevel_byname>` ``sortlevel`` to work by level (PR141_) +- :ref:`Enable <reshaping.unstack_by_name>` unstacking by name (:issue:`142`) +- :ref:`Enable <indexing.sortlevel_byname>` ``sortlevel`` to work by level (:issue:`141`) Performance Enhancements ~~~~~~~~~~~~~~~~~~~~~~~~ - Altered binary operations on differently-indexed SparseSeries objects to use the integer-based (dense) alignment logic which is faster with a - larger number of blocks (GH205_) + larger number of blocks (:issue:`205`) - Wrote faster Cython data alignment / merging routines resulting in substantial speed increases - Improved performance of ``isnull`` and ``notnull``, a regression from v0.3.0 - (GH187_) + (:issue:`187`) - Refactored code related to ``DataFrame.join`` so that intermediate aligned copies of the data in each ``DataFrame`` argument do not need to be created. - Substantial performance increases result (GH176_) + Substantial performance increases result (:issue:`176`) - Substantially improved performance of generic ``Index.intersection`` and ``Index.union`` - Implemented ``BlockManager.take`` resulting in significantly faster ``take`` - performance on mixed-type ``DataFrame`` objects (GH104_) + performance on mixed-type ``DataFrame`` objects (:issue:`104`) - Improved performance of ``Series.sort_index`` - Significant groupby performance enhancement: removed unnecessary integrity checks in DataFrame internals that were slowing down slicing operations to @@ -57,21 +57,8 @@ Performance Enhancements - Wrote fast time series merging / joining methods in Cython. Will be integrated later into DataFrame.join and related functions -.. _PR146: https://github.com/pydata/pandas/pull/146 .. _ENH1b: https://github.com/pydata/pandas/commit/1ba56251f0013ff7cd8834e9486cef2b10098371 .. _ENHdc: https://github.com/pydata/pandas/commit/dca3c5c5a6a3769ee01465baca04cfdfa66a4f76 -.. _GH135: https://github.com/pydata/pandas/issues/135 -.. _PR151: https://github.com/pydata/pandas/pull/151 .. _ENHed: https://github.com/pydata/pandas/commit/edd9f1945fc010a57fa0ae3b3444d1fffe592591 -.. _PR142: https://github.com/pydata/pandas/pull/142 -.. _PR141: https://github.com/pydata/pandas/pull/141 -.. _IS188: https://github.com/pydata/pandas/issues/188 .. _ENH56: https://github.com/pydata/pandas/commit/56e0c9ffafac79ce262b55a6a13e1b10a88fbe93 -.. _GH187: https://github.com/pydata/pandas/issues/187 -.. _GH176: https://github.com/pydata/pandas/issues/176 -.. _GH104: https://github.com/pydata/pandas/issues/104 -.. _GH205: https://github.com/pydata/pandas/issues/205 -.. _PR209: https://github.com/pydata/pandas/pull/209 -.. _GH203: https://github.com/pydata/pandas/issues/203 -.. _PR200: https://github.com/pydata/pandas/pull/200 diff --git a/doc/source/v0.5.0.txt b/doc/source/v0.5.0.txt index 017d10d4c9b8c..d0550fd5ef8f3 100644 --- a/doc/source/v0.5.0.txt +++ b/doc/source/v0.5.0.txt @@ -9,23 +9,23 @@ New Features - :ref:`Added <basics.df_join>` ``DataFrame.align`` method with standard join options - :ref:`Added <io.parse_dates>` ``parse_dates`` option to ``read_csv`` and ``read_table`` methods to optionally try to parse dates in the index columns -- :ref:`Added <io.parse_dates>` ``nrows``, ``chunksize``, and ``iterator`` arguments to ``read_csv`` and ``read_table``. The last two return a new ``TextParser`` class capable of lazily iterating through chunks of a flat file (GH242_) -- :ref:`Added <merging.multikey_join>` ability to join on multiple columns in ``DataFrame.join`` (GH214_) +- :ref:`Added <io.parse_dates>` ``nrows``, ``chunksize``, and ``iterator`` arguments to ``read_csv`` and ``read_table``. The last two return a new ``TextParser`` class capable of lazily iterating through chunks of a flat file (:issue:`242`) +- :ref:`Added <merging.multikey_join>` ability to join on multiple columns in ``DataFrame.join`` (:issue:`214`) - Added private ``_get_duplicates`` function to ``Index`` for identifying duplicate values more easily (ENH5c_) - :ref:`Added <indexing.df_cols>` column attribute access to DataFrame. -- :ref:`Added <indexing.df_cols>` Python tab completion hook for DataFrame columns. (PR233_, GH230_) -- :ref:`Implemented <basics.describe>` ``Series.describe`` for Series containing objects (PR241_) -- :ref:`Added <merging.df_inner_join>` inner join option to ``DataFrame.join`` when joining on key(s) (GH248_) -- :ref:`Implemented <indexing.df_cols>` selecting DataFrame columns by passing a list to ``__getitem__`` (GH253_) -- :ref:`Implemented <indexing.set_ops>` & and | to intersect / union Index objects, respectively (GH261_) -- :ref:`Added<reshaping.pivot>` ``pivot_table`` convenience function to pandas namespace (GH234_) -- :ref:`Implemented <basics.rename_axis>` ``Panel.rename_axis`` function (GH243_) -- DataFrame will show index level names in console output (PR334_) +- :ref:`Added <indexing.df_cols>` Python tab completion hook for DataFrame columns. (:issue:`233`, :issue:`230`) +- :ref:`Implemented <basics.describe>` ``Series.describe`` for Series containing objects (:issue:`241`) +- :ref:`Added <merging.df_inner_join>` inner join option to ``DataFrame.join`` when joining on key(s) (:issue:`248`) +- :ref:`Implemented <indexing.df_cols>` selecting DataFrame columns by passing a list to ``__getitem__`` (:issue:`253`) +- :ref:`Implemented <indexing.set_ops>` & and | to intersect / union Index objects, respectively (:issue:`261`) +- :ref:`Added<reshaping.pivot>` ``pivot_table`` convenience function to pandas namespace (:issue:`234`) +- :ref:`Implemented <basics.rename_axis>` ``Panel.rename_axis`` function (:issue:`243`) +- DataFrame will show index level names in console output (:issue:`334`) - :ref:`Implemented <indexing.take>` ``Panel.take`` - :ref:`Added<basics.console_output>` ``set_eng_float_format`` for alternate DataFrame floating point string formatting (ENH61_) - :ref:`Added <indexing.set_index>` convenience ``set_index`` function for creating a DataFrame index from its existing columns -- :ref:`Implemented <groupby.multiindex>` ``groupby`` hierarchical index level name (GH223_) -- :ref:`Added <io.store_in_csv>` support for different delimiters in ``DataFrame.to_csv`` (PR244_) +- :ref:`Implemented <groupby.multiindex>` ``groupby`` hierarchical index level name (:issue:`223`) +- :ref:`Added <io.store_in_csv>` support for different delimiters in ``DataFrame.to_csv`` (:issue:`244`) - TODO: DOCS ABOUT TAKE METHODS Performance Enhancements @@ -33,28 +33,11 @@ Performance Enhancements - VBENCH Major performance improvements in file parsing functions ``read_csv`` and ``read_table`` - VBENCH Added Cython function for converting tuples to ndarray very fast. Speeds up many MultiIndex-related operations -- VBENCH Refactored merging / joining code into a tidy class and disabled unnecessary computations in the float/object case, thus getting about 10% better performance (GH211_) -- VBENCH Improved speed of ``DataFrame.xs`` on mixed-type DataFrame objects by about 5x, regression from 0.3.0 (GH215_) +- VBENCH Refactored merging / joining code into a tidy class and disabled unnecessary computations in the float/object case, thus getting about 10% better performance (:issue:`211`) +- VBENCH Improved speed of ``DataFrame.xs`` on mixed-type DataFrame objects by about 5x, regression from 0.3.0 (:issue:`215`) - VBENCH With new ``DataFrame.align`` method, speeding up binary operations between differently-indexed DataFrame objects by 10-25%. -- VBENCH Significantly sped up conversion of nested dict into DataFrame (GH212_) +- VBENCH Significantly sped up conversion of nested dict into DataFrame (:issue:`212`) - VBENCH Significantly speed up DataFrame ``__repr__`` and ``count`` on large mixed-type DataFrame objects -.. _GH214: https://github.com/pydata/pandas/issues/214 -.. _GH248: https://github.com/pydata/pandas/issues/248 -.. _GH253: https://github.com/pydata/pandas/issues/253 -.. _GH261: https://github.com/pydata/pandas/issues/261 -.. _GH234: https://github.com/pydata/pandas/issues/234 -.. _GH243: https://github.com/pydata/pandas/issues/243 -.. _GH223: https://github.com/pydata/pandas/issues/223 -.. _PR244: https://github.com/pydata/pandas/pull/244 -.. _PR233: https://github.com/pydata/pandas/pull/233 -.. _GH230: https://github.com/pydata/pandas/issues/230 -.. _PR241: https://github.com/pydata/pandas/pull/241 -.. _GH242: https://github.com/pydata/pandas/issues/242 -.. _GH212: https://github.com/pydata/pandas/issues/212 -.. _GH211: https://github.com/pydata/pandas/issues/211 -.. _GH215: https://github.com/pydata/pandas/issues/215 -.. _GH213: https://github.com/pydata/pandas/issues/213 .. _ENH61: https://github.com/pydata/pandas/commit/6141961 -.. _PR334: https://github.com/pydata/pandas/pull/334 .. _ENH5c: https://github.com/pydata/pandas/commit/5ca6ff5d822ee4ddef1ec0d87b6d83d8b4bbd3eb diff --git a/doc/source/v0.6.0.txt b/doc/source/v0.6.0.txt index e72aec601221e..55a67a75e0fd1 100644 --- a/doc/source/v0.6.0.txt +++ b/doc/source/v0.6.0.txt @@ -6,95 +6,51 @@ v.0.6.0 (November 25, 2011) New Features ~~~~~~~~~~~~ - :ref:`Added <reshaping.melt>` ``melt`` function to ``pandas.core.reshape`` -- :ref:`Added <groupby.multiindex>` ``level`` parameter to group by level in Series and DataFrame descriptive statistics (PR313_) -- :ref:`Added <basics.head_tail>` ``head`` and ``tail`` methods to Series, analogous to to DataFrame (PR296_) -- :ref:`Added <indexing.boolean>` ``Series.isin`` function which checks if each value is contained in a passed sequence (GH289_) +- :ref:`Added <groupby.multiindex>` ``level`` parameter to group by level in Series and DataFrame descriptive statistics (:issue:`313`) +- :ref:`Added <basics.head_tail>` ``head`` and ``tail`` methods to Series, analogous to to DataFrame (:issue:`296`) +- :ref:`Added <indexing.boolean>` ``Series.isin`` function which checks if each value is contained in a passed sequence (:issue:`289`) - :ref:`Added <io.formatting>` ``float_format`` option to ``Series.to_string`` -- :ref:`Added <io.parse_dates>` ``skip_footer`` (GH291_) and ``converters`` (GH343_) options to ``read_csv`` and ``read_table`` -- :ref:`Added <indexing.duplicate>` ``drop_duplicates`` and ``duplicated`` functions for removing duplicate DataFrame rows and checking for duplicate rows, respectively (GH319_) -- :ref:`Implemented <dsintro.boolean>` operators '&', '|', '^', '-' on DataFrame (GH347_) +- :ref:`Added <io.parse_dates>` ``skip_footer`` (:issue:`291`) and ``converters`` (:issue:`343`) options to ``read_csv`` and ``read_table`` +- :ref:`Added <indexing.duplicate>` ``drop_duplicates`` and ``duplicated`` functions for removing duplicate DataFrame rows and checking for duplicate rows, respectively (:issue:`319`) +- :ref:`Implemented <dsintro.boolean>` operators '&', '|', '^', '-' on DataFrame (:issue:`347`) - :ref:`Added <basics.stats>` ``Series.mad``, mean absolute deviation -- :ref:`Added <timeseries.offsets>` ``QuarterEnd`` DateOffset (PR321_) -- :ref:`Added <dsintro.numpy_interop>` ``dot`` to DataFrame (GH65_) -- :ref:`Added <basics.panel>` ``orient`` option to ``Panel.from_dict`` (GH359_, GH301_) +- :ref:`Added <timeseries.offsets>` ``QuarterEnd`` DateOffset (:issue:`321`) +- :ref:`Added <dsintro.numpy_interop>` ``dot`` to DataFrame (:issue:`65`) +- :ref:`Added <basics.panel>` ``orient`` option to ``Panel.from_dict`` (:issue:`359`, :issue:`301`) - :ref:`Added <basics.dataframe.from_dict>` ``orient`` option to ``DataFrame.from_dict`` -- :ref:`Added <basics.dataframe.from_records>` passing list of tuples or list of lists to ``DataFrame.from_records`` (GH357_) -- :ref:`Added <groupby.multiindex>` multiple levels to groupby (GH103_) -- :ref:`Allow <basics.sorting>` multiple columns in ``by`` argument of ``DataFrame.sort_index`` (GH92_, PR362_) -- :ref:`Added <indexing.basics.get_value>` fast ``get_value`` and ``put_value`` methods to DataFrame (GH360_) -- :ref:`Added <computation.covariance>` ``cov`` instance methods to Series and DataFrame (GH194_, PR362_) -- :ref:`Added <visualization.barplot>` ``kind='bar'`` option to ``DataFrame.plot`` (PR348_) -- :ref:`Added <basics.idxmin>` ``idxmin`` and ``idxmax`` to Series and DataFrame (PR286_) -- :ref:`Added <io.clipboard>` ``read_clipboard`` function to parse DataFrame from clipboard (GH300_) -- :ref:`Added <basics.stats>` ``nunique`` function to Series for counting unique elements (GH297_) -- :ref:`Made <basics.dataframe>` DataFrame constructor use Series name if no columns passed (GH373_) -- :ref:`Support <io.parse_dates>` regular expressions in read_table/read_csv (GH364_) -- :ref:`Added <io.html>` ``DataFrame.to_html`` for writing DataFrame to HTML (PR387_) -- :ref:`Added <basics.dataframe>` support for MaskedArray data in DataFrame, masked values converted to NaN (PR396_) -- :ref:`Added <visualization.box>` ``DataFrame.boxplot`` function (GH368_) -- :ref:`Can <basics.apply>` pass extra args, kwds to DataFrame.apply (GH376_) -- :ref:`Implement <merging.multikey_join>` ``DataFrame.join`` with vector ``on`` argument (GH312_) -- :ref:`Added <visualization.basic>` ``legend`` boolean flag to ``DataFrame.plot`` (GH324_) -- :ref:`Can <reshaping.stacking>` pass multiple levels to ``stack`` and ``unstack`` (GH370_) -- :ref:`Can <reshaping.pivot>` pass multiple values columns to ``pivot_table`` (GH381_) -- :ref:`Use <groupby.multiindex>` Series name in GroupBy for result index (GH363_) -- :ref:`Added <basics.apply>` ``raw`` option to ``DataFrame.apply`` for performance if only need ndarray (GH309_) -- Added proper, tested weighted least squares to standard and panel OLS (GH303_) +- :ref:`Added <basics.dataframe.from_records>` passing list of tuples or list of lists to ``DataFrame.from_records`` (:issue:`357`) +- :ref:`Added <groupby.multiindex>` multiple levels to groupby (:issue:`103`) +- :ref:`Allow <basics.sorting>` multiple columns in ``by`` argument of ``DataFrame.sort_index`` (:issue:`92`, :issue:`362`) +- :ref:`Added <indexing.basics.get_value>` fast ``get_value`` and ``put_value`` methods to DataFrame (:issue:`360`) +- :ref:`Added <computation.covariance>` ``cov`` instance methods to Series and DataFrame (:issue:`194`, :issue:`362`) +- :ref:`Added <visualization.barplot>` ``kind='bar'`` option to ``DataFrame.plot`` (:issue:`348`) +- :ref:`Added <basics.idxmin>` ``idxmin`` and ``idxmax`` to Series and DataFrame (:issue:`286`) +- :ref:`Added <io.clipboard>` ``read_clipboard`` function to parse DataFrame from clipboard (:issue:`300`) +- :ref:`Added <basics.stats>` ``nunique`` function to Series for counting unique elements (:issue:`297`) +- :ref:`Made <basics.dataframe>` DataFrame constructor use Series name if no columns passed (:issue:`373`) +- :ref:`Support <io.parse_dates>` regular expressions in read_table/read_csv (:issue:`364`) +- :ref:`Added <io.html>` ``DataFrame.to_html`` for writing DataFrame to HTML (:issue:`387`) +- :ref:`Added <basics.dataframe>` support for MaskedArray data in DataFrame, masked values converted to NaN (:issue:`396`) +- :ref:`Added <visualization.box>` ``DataFrame.boxplot`` function (:issue:`368`) +- :ref:`Can <basics.apply>` pass extra args, kwds to DataFrame.apply (:issue:`376`) +- :ref:`Implement <merging.multikey_join>` ``DataFrame.join`` with vector ``on`` argument (:issue:`312`) +- :ref:`Added <visualization.basic>` ``legend`` boolean flag to ``DataFrame.plot`` (:issue:`324`) +- :ref:`Can <reshaping.stacking>` pass multiple levels to ``stack`` and ``unstack`` (:issue:`370`) +- :ref:`Can <reshaping.pivot>` pass multiple values columns to ``pivot_table`` (:issue:`381`) +- :ref:`Use <groupby.multiindex>` Series name in GroupBy for result index (:issue:`363`) +- :ref:`Added <basics.apply>` ``raw`` option to ``DataFrame.apply`` for performance if only need ndarray (:issue:`309`) +- Added proper, tested weighted least squares to standard and panel OLS (:issue:`303`) Performance Enhancements ~~~~~~~~~~~~~~~~~~~~~~~~ -- VBENCH Cythonized ``cache_readonly``, resulting in substantial micro-performance enhancements throughout the codebase (GH361_) -- VBENCH Special Cython matrix iterator for applying arbitrary reduction operations with 3-5x better performance than `np.apply_along_axis` (GH309_) +- VBENCH Cythonized ``cache_readonly``, resulting in substantial micro-performance enhancements throughout the codebase (:issue:`361`) +- VBENCH Special Cython matrix iterator for applying arbitrary reduction operations with 3-5x better performance than `np.apply_along_axis` (:issue:`309`) - VBENCH Improved performance of ``MultiIndex.from_tuples`` - VBENCH Special Cython matrix iterator for applying arbitrary reduction operations - VBENCH + DOCUMENT Add ``raw`` option to ``DataFrame.apply`` for getting better performance when -- VBENCH Faster cythonized count by level in Series and DataFrame (GH341_) +- VBENCH Faster cythonized count by level in Series and DataFrame (:issue:`341`) - VBENCH? Significant GroupBy performance enhancement with multiple keys with many "empty" combinations -- VBENCH New Cython vectorized function ``map_infer`` speeds up ``Series.apply`` and ``Series.map`` significantly when passed elementwise Python function, motivated by (PR355_) -- VBENCH Significantly improved performance of ``Series.order``, which also makes np.unique called on a Series faster (GH327_) -- VBENCH Vastly improved performance of GroupBy on axes with a MultiIndex (GH299_) +- VBENCH New Cython vectorized function ``map_infer`` speeds up ``Series.apply`` and ``Series.map`` significantly when passed elementwise Python function, motivated by (:issue:`355`) +- VBENCH Significantly improved performance of ``Series.order``, which also makes np.unique called on a Series faster (:issue:`327`) +- VBENCH Vastly improved performance of GroupBy on axes with a MultiIndex (:issue:`299`) -.. _GH65: https://github.com/pydata/pandas/issues/65 -.. _GH92: https://github.com/pydata/pandas/issues/92 -.. _GH103: https://github.com/pydata/pandas/issues/103 -.. _GH194: https://github.com/pydata/pandas/issues/194 -.. _GH289: https://github.com/pydata/pandas/issues/289 -.. _GH291: https://github.com/pydata/pandas/issues/291 -.. _GH297: https://github.com/pydata/pandas/issues/297 -.. _GH299: https://github.com/pydata/pandas/issues/299 -.. _GH300: https://github.com/pydata/pandas/issues/300 -.. _GH301: https://github.com/pydata/pandas/issues/301 -.. _GH303: https://github.com/pydata/pandas/issues/303 -.. _GH305: https://github.com/pydata/pandas/issues/305 -.. _GH308: https://github.com/pydata/pandas/issues/308 -.. _GH309: https://github.com/pydata/pandas/issues/309 -.. _GH312: https://github.com/pydata/pandas/issues/312 -.. _GH319: https://github.com/pydata/pandas/issues/319 -.. _GH324: https://github.com/pydata/pandas/issues/324 -.. _GH327: https://github.com/pydata/pandas/issues/327 -.. _GH341: https://github.com/pydata/pandas/issues/341 -.. _GH343: https://github.com/pydata/pandas/issues/343 -.. _GH347: https://github.com/pydata/pandas/issues/347 -.. _GH357: https://github.com/pydata/pandas/issues/357 -.. _GH359: https://github.com/pydata/pandas/issues/359 -.. _GH360: https://github.com/pydata/pandas/issues/360 -.. _GH361: https://github.com/pydata/pandas/issues/361 -.. _GH363: https://github.com/pydata/pandas/issues/363 -.. _GH364: https://github.com/pydata/pandas/issues/364 -.. _GH368: https://github.com/pydata/pandas/issues/368 -.. _GH370: https://github.com/pydata/pandas/issues/370 -.. _GH373: https://github.com/pydata/pandas/issues/373 -.. _GH376: https://github.com/pydata/pandas/issues/376 -.. _GH381: https://github.com/pydata/pandas/issues/381 -.. _GH382: https://github.com/pydata/pandas/issues/382 -.. _GH393: https://github.com/pydata/pandas/issues/393 -.. _PR286: https://github.com/pydata/pandas/pull/286 -.. _PR296: https://github.com/pydata/pandas/pull/296 -.. _PR313: https://github.com/pydata/pandas/pull/313 -.. _PR321: https://github.com/pydata/pandas/pull/321 -.. _PR348: https://github.com/pydata/pandas/pull/348 -.. _PR355: https://github.com/pydata/pandas/pull/355 -.. _PR362: https://github.com/pydata/pandas/pull/362 -.. _PR386: https://github.com/pydata/pandas/pull/386 -.. _PR387: https://github.com/pydata/pandas/pull/387 -.. _PR396: https://github.com/pydata/pandas/pull/396 diff --git a/doc/source/v0.6.1.txt b/doc/source/v0.6.1.txt index 7b0588884c5b2..7e593d07f7f2b 100644 --- a/doc/source/v0.6.1.txt +++ b/doc/source/v0.6.1.txt @@ -8,28 +8,28 @@ New features ~~~~~~~~~~~~ - Can :ref:`append single rows <merging.append.row>` (as Series) to a DataFrame - Add Spearman and Kendall rank :ref:`correlation <computation.correlation>` - options to Series.corr and DataFrame.corr (GH428_) + options to Series.corr and DataFrame.corr (:issue:`428`) - :ref:`Added <indexing.basics.get_value>` ``get_value`` and ``set_value`` methods to Series, DataFrame, and Panel for very low-overhead access (>2x faster in many - cases) to scalar elements (GH437_, GH438_). ``set_value`` is capable of + cases) to scalar elements (:issue:`437`, :issue:`438`). ``set_value`` is capable of producing an enlarged object. -- Add PyQt table widget to sandbox (PR435_) +- Add PyQt table widget to sandbox (:issue:`435`) - DataFrame.align can :ref:`accept Series arguments <basics.align.frame.series>` - and an :ref:`axis option <basics.df_join>` (GH461_) + and an :ref:`axis option <basics.df_join>` (:issue:`461`) - Implement new :ref:`SparseArray <sparse.array>` and :ref:`SparseList <sparse.list>` - data structures. SparseSeries now derives from SparseArray (GH463_) -- :ref:`Better console printing options <basics.console_output>` (PR453_) + data structures. SparseSeries now derives from SparseArray (:issue:`463`) +- :ref:`Better console printing options <basics.console_output>` (:issue:`453`) - Implement fast :ref:`data ranking <computation.ranking>` for Series and - DataFrame, fast versions of scipy.stats.rankdata (GH428_) + DataFrame, fast versions of scipy.stats.rankdata (:issue:`428`) - Implement :ref:`DataFrame.from_items <basics.dataframe.from_items>` alternate - constructor (GH444_) + constructor (:issue:`444`) - DataFrame.convert_objects method for :ref:`inferring better dtypes <basics.cast>` - for object columns (GH302_) + for object columns (:issue:`302`) - Add :ref:`rolling_corr_pairwise <stats.moments.corr_pairwise>` function for - computing Panel of correlation matrices (GH189_) + computing Panel of correlation matrices (:issue:`189`) - Add :ref:`margins <reshaping.pivot.margins>` option to :ref:`pivot_table - <reshaping.pivot>` for computing subgroup aggregates (GH114_) -- Add ``Series.from_csv`` function (PR482_) + <reshaping.pivot>` for computing subgroup aggregates (:issue:`114`) +- Add ``Series.from_csv`` function (:issue:`482`) - :ref:`Can pass <stats.moments.binary>` DataFrame/DataFrame and DataFrame/Series to rolling_corr/rolling_cov (GH #462) - MultiIndex.get_level_values can :ref:`accept the level name <indexing.get_level_values>` @@ -48,15 +48,3 @@ Performance improvements - Column deletion in DataFrame copies no data (computes views on blocks) (GH #158) -.. _GH114: https://github.com/pydata/pandas/issues/114 -.. _GH189: https://github.com/pydata/pandas/issues/302 -.. _GH302: https://github.com/pydata/pandas/issues/302 -.. _GH428: https://github.com/pydata/pandas/issues/428 -.. _GH437: https://github.com/pydata/pandas/issues/437 -.. _GH438: https://github.com/pydata/pandas/issues/438 -.. _GH444: https://github.com/pydata/pandas/issues/444 -.. _GH461: https://github.com/pydata/pandas/issues/461 -.. _GH463: https://github.com/pydata/pandas/issues/463 -.. _PR435: https://github.com/pydata/pandas/pull/435 -.. _PR453: https://github.com/pydata/pandas/pull/453 -.. _PR482: https://github.com/pydata/pandas/pull/482 diff --git a/doc/source/v0.7.0.txt b/doc/source/v0.7.0.txt index 6ff748f142d15..bf7acd3820db0 100644 --- a/doc/source/v0.7.0.txt +++ b/doc/source/v0.7.0.txt @@ -9,24 +9,24 @@ New features - New unified :ref:`merge function <merging.join>` for efficiently performing full gamut of database / relational-algebra operations. Refactored existing join methods to use the new infrastructure, resulting in substantial - performance gains (GH220_, GH249_, GH267_) + performance gains (:issue:`220`, :issue:`249`, :issue:`267`) - New :ref:`unified concatenation function <merging.concat>` for concatenating Series, DataFrame or Panel objects along an axis. Can form union or intersection of the other axes. Improves performance of ``Series.append`` and - ``DataFrame.append`` (GH468_, GH479_, GH273_) + ``DataFrame.append`` (:issue:`468`, :issue:`479`, :issue:`273`) - :ref:`Can <merging.concatenation>` pass multiple DataFrames to `DataFrame.append` to concatenate (stack) and multiple Series to ``Series.append`` too - :ref:`Can<basics.dataframe.from_list_of_dicts>` pass list of dicts (e.g., a - list of JSON objects) to DataFrame constructor (GH526_) + list of JSON objects) to DataFrame constructor (:issue:`526`) - You can now :ref:`set multiple columns <indexing.columns.multiple>` in a - DataFrame via ``__getitem__``, useful for transformation (GH342_) + DataFrame via ``__getitem__``, useful for transformation (:issue:`342`) -- Handle differently-indexed output values in ``DataFrame.apply`` (GH498_) +- Handle differently-indexed output values in ``DataFrame.apply`` (:issue:`498`) .. ipython:: python @@ -34,10 +34,10 @@ New features df.apply(lambda x: x.describe()) - :ref:`Add<indexing.reorderlevels>` ``reorder_levels`` method to Series and - DataFrame (PR534_) + DataFrame (:issue:`534`) - :ref:`Add<indexing.dictionarylike>` dict-like ``get`` function to DataFrame - and Panel (PR521_) + and Panel (:issue:`521`) - :ref:`Add<basics.iterrows>` ``DataFrame.iterrows`` method for efficiently iterating through the rows of a DataFrame @@ -52,10 +52,10 @@ New features - :ref:`Add <indexing.advanced_reindex>` ``level`` option to the ``reindex`` and ``align`` methods on Series and DataFrame for broadcasting values across - a level (GH542_, PR552_, others) + a level (:issue:`542`, :issue:`552`, others) - :ref:`Add <dsintro.panel_item_selection>` attribute-based item access to - ``Panel`` and add IPython completion (PR563_) + ``Panel`` and add IPython completion (:issue:`563`) - :ref:`Add <visualization.basic>` ``logy`` option to ``Series.plot`` for log-scaling on the Y axis @@ -64,38 +64,38 @@ New features ``DataFrame.to_string`` - :ref:`Can <merging.multiple_join>` pass multiple DataFrames to - ``DataFrame.join`` to join on index (GH115_) + ``DataFrame.join`` to join on index (:issue:`115`) - :ref:`Can <merging.multiple_join>` pass multiple Panels to ``Panel.join`` - (GH115_) + (:issue:`115`) - :ref:`Added <io.formatting>` ``justify`` argument to ``DataFrame.to_string`` to allow different alignment of column headers - :ref:`Add <groupby.attributes>` ``sort`` option to GroupBy to allow disabling - sorting of the group keys for potential speedups (GH595_) + sorting of the group keys for potential speedups (:issue:`595`) - :ref:`Can <basics.dataframe.from_series>` pass MaskedArray to Series - constructor (PR563_) + constructor (:issue:`563`) - :ref:`Add <dsintro.panel_item_selection>` Panel item access via attributes - and IPython completion (GH554_) + and IPython completion (:issue:`554`) - Implement ``DataFrame.lookup``, fancy-indexing analogue for retrieving values - given a sequence of row and column labels (GH338_) + given a sequence of row and column labels (:issue:`338`) - Can pass a :ref:`list of functions <groupby.aggregate.multifunc>` to aggregate with groupby on a DataFrame, yielding an aggregated result with - hierarchical columns (GH166_) + hierarchical columns (:issue:`166`) - Can call ``cummin`` and ``cummax`` on Series and DataFrame to get cumulative - minimum and maximum, respectively (GH647_) + minimum and maximum, respectively (:issue:`647`) - ``value_range`` added as utility function to get min and max of a dataframe - (GH288_) + (:issue:`288`) - Added ``encoding`` argument to ``read_csv``, ``read_table``, ``to_csv`` and - ``from_csv`` for non-ascii text (GH717_) + ``from_csv`` for non-ascii text (:issue:`717`) - :ref:`Added <basics.stats>` ``abs`` method to pandas objects @@ -231,28 +231,28 @@ Other API Changes - If ``Series.sort`` is called on a column of a DataFrame, an exception will now be raised. Before it was possible to accidentally mutate a DataFrame's column by doing ``df[col].sort()`` instead of the side-effect free method - ``df[col].order()`` (GH316_) + ``df[col].order()`` (:issue:`316`) - Miscellaneous renames and deprecations which will (harmlessly) raise ``FutureWarning`` -- ``drop`` added as an optional parameter to ``DataFrame.reset_index`` (GH699_) +- ``drop`` added as an optional parameter to ``DataFrame.reset_index`` (:issue:`699`) Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - :ref:`Cythonized GroupBy aggregations <groupby.aggregate.cython>` no longer - presort the data, thus achieving a significant speedup (GH93_). GroupBy + presort the data, thus achieving a significant speedup (:issue:`93`). GroupBy aggregations with Python functions significantly sped up by clever - manipulation of the ndarray data type in Cython (GH496_). + manipulation of the ndarray data type in Cython (:issue:`496`). - Better error message in DataFrame constructor when passed column labels - don't match data (GH497_) + don't match data (:issue:`497`) - Substantially improve performance of multi-GroupBy aggregation when a - Python function is passed, reuse ndarray object in Cython (GH496_) -- Can store objects indexed by tuples and floats in HDFStore (GH492_) -- Don't print length by default in Series.to_string, add `length` option (GH489_) + Python function is passed, reuse ndarray object in Cython (:issue:`496`) +- Can store objects indexed by tuples and floats in HDFStore (:issue:`492`) +- Don't print length by default in Series.to_string, add `length` option (:issue:`489`) - Improve Cython code for multi-groupby to aggregate without having to sort - the data (GH93_) + the data (:issue:`93`) - Improve MultiIndex reindexing speed by storing tuples in the MultiIndex, test for backwards unpickling compatibility - Improve column reindexing performance by using specialized Cython take @@ -262,47 +262,11 @@ Performance improvements regression from prior versions - Friendlier error message in setup.py if NumPy not installed - Use common set of NA-handling operations (sum, mean, etc.) in Panel class - also (GH536_) + also (:issue:`536`) - Default name assignment when calling ``reset_index`` on DataFrame with a - regular (non-hierarchical) index (GH476_) + regular (non-hierarchical) index (:issue:`476`) - Use Cythonized groupers when possible in Series/DataFrame stat ops with - ``level`` parameter passed (GH545_) + ``level`` parameter passed (:issue:`545`) - Ported skiplist data structure to C to speed up ``rolling_median`` by about - 5-10x in most typical use cases (GH374_) - -.. _GH115: https://github.com/pydata/pandas/issues/115 -.. _GH166: https://github.com/pydata/pandas/issues/166 -.. _GH220: https://github.com/pydata/pandas/issues/220 -.. _GH288: https://github.com/pydata/pandas/issues/288 -.. _GH249: https://github.com/pydata/pandas/issues/249 -.. _GH267: https://github.com/pydata/pandas/issues/267 -.. _GH273: https://github.com/pydata/pandas/issues/273 -.. _GH316: https://github.com/pydata/pandas/issues/316 -.. _GH338: https://github.com/pydata/pandas/issues/338 -.. _GH342: https://github.com/pydata/pandas/issues/342 -.. _GH374: https://github.com/pydata/pandas/issues/374 -.. _GH439: https://github.com/pydata/pandas/issues/439 -.. _GH468: https://github.com/pydata/pandas/issues/468 -.. _GH476: https://github.com/pydata/pandas/issues/476 -.. _GH479: https://github.com/pydata/pandas/issues/479 -.. _GH489: https://github.com/pydata/pandas/issues/489 -.. _GH492: https://github.com/pydata/pandas/issues/492 -.. _GH496: https://github.com/pydata/pandas/issues/496 -.. _GH497: https://github.com/pydata/pandas/issues/497 -.. _GH498: https://github.com/pydata/pandas/issues/498 -.. _GH526: https://github.com/pydata/pandas/issues/526 -.. _GH536: https://github.com/pydata/pandas/issues/536 -.. _GH542: https://github.com/pydata/pandas/issues/542 -.. _GH545: https://github.com/pydata/pandas/issues/545 -.. _GH554: https://github.com/pydata/pandas/issues/554 -.. _GH595: https://github.com/pydata/pandas/issues/595 -.. _GH647: https://github.com/pydata/pandas/issues/647 -.. _GH699: https://github.com/pydata/pandas/issues/699 -.. _GH717: https://github.com/pydata/pandas/issues/717 -.. _GH93: https://github.com/pydata/pandas/issues/93 -.. _GH93: https://github.com/pydata/pandas/issues/93 -.. _PR521: https://github.com/pydata/pandas/pull/521 -.. _PR534: https://github.com/pydata/pandas/pull/534 -.. _PR552: https://github.com/pydata/pandas/pull/552 -.. _PR554: https://github.com/pydata/pandas/pull/554 -.. _PR563: https://github.com/pydata/pandas/pull/563 + 5-10x in most typical use cases (:issue:`374`) + diff --git a/doc/source/v0.7.1.txt b/doc/source/v0.7.1.txt index 181751eb1c4b0..bc12cb8d200cd 100644 --- a/doc/source/v0.7.1.txt +++ b/doc/source/v0.7.1.txt @@ -10,30 +10,21 @@ New features ~~~~~~~~~~~~ - Add ``to_clipboard`` function to pandas namespace for writing objects to - the system clipboard (GH774_) + the system clipboard (:issue:`774`) - Add ``itertuples`` method to DataFrame for iterating through the rows of a - dataframe as tuples (GH818_) + dataframe as tuples (:issue:`818`) - Add ability to pass fill_value and method to DataFrame and Series align - method (GH806_, GH807_) - - Add fill_value option to reindex, align methods (GH784_) - - Enable concat to produce DataFrame from Series (GH787_) - - Add ``between`` method to Series (GH802_) + method (:issue:`806`, :issue:`807`) + - Add fill_value option to reindex, align methods (:issue:`784`) + - Enable concat to produce DataFrame from Series (:issue:`787`) + - Add ``between`` method to Series (:issue:`802`) - Add HTML representation hook to DataFrame for the IPython HTML notebook - (GH773_) + (:issue:`773`) - Support for reading Excel 2007 XML documents using openpyxl Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improve performance and memory usage of fillna on DataFrame - - Can concatenate a list of Series along axis=1 to obtain a DataFrame (GH787_) + - Can concatenate a list of Series along axis=1 to obtain a DataFrame (:issue:`787`) -.. _GH774: https://github.com/pydata/pandas/issues/774 -.. _GH818: https://github.com/pydata/pandas/issues/818 -.. _GH806: https://github.com/pydata/pandas/issues/806 -.. _GH807: https://github.com/pydata/pandas/issues/807 -.. _GH784: https://github.com/pydata/pandas/issues/784 -.. _GH787: https://github.com/pydata/pandas/issues/787 -.. _GH802: https://github.com/pydata/pandas/issues/802 -.. _GH773: https://github.com/pydata/pandas/issues/773 -.. _GH787: https://github.com/pydata/pandas/issues/787 \ No newline at end of file diff --git a/doc/source/v0.7.2.txt b/doc/source/v0.7.2.txt index 04f7686ed20c6..c711639354139 100644 --- a/doc/source/v0.7.2.txt +++ b/doc/source/v0.7.2.txt @@ -8,31 +8,20 @@ This release targets bugs in 0.7.1, and adds a few minor features. New features ~~~~~~~~~~~~ - - Add additional tie-breaking methods in DataFrame.rank (GH874_) - - Add ascending parameter to rank in Series, DataFrame (GH875_) - - Add coerce_float option to DataFrame.from_records (GH893_) - - Add sort_columns parameter to allow unsorted plots (GH918_) - - Enable column access via attributes on GroupBy (GH882_) - - Can pass dict of values to DataFrame.fillna (GH661_) + - Add additional tie-breaking methods in DataFrame.rank (:issue:`874`) + - Add ascending parameter to rank in Series, DataFrame (:issue:`875`) + - Add coerce_float option to DataFrame.from_records (:issue:`893`) + - Add sort_columns parameter to allow unsorted plots (:issue:`918`) + - Enable column access via attributes on GroupBy (:issue:`882`) + - Can pass dict of values to DataFrame.fillna (:issue:`661`) - Can select multiple hierarchical groups by passing list of values in .ix - (GH134_) - - Add ``axis`` option to DataFrame.fillna (GH174_) - - Add level keyword to ``drop`` for dropping values from a level (GH159_) + (:issue:`134`) + - Add ``axis`` option to DataFrame.fillna (:issue:`174`) + - Add level keyword to ``drop`` for dropping values from a level (:issue:`159`) Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - - Use khash for Series.value_counts, add raw function to algorithms.py (GH861_) - - Intercept __builtin__.sum in groupby (GH885_) + - Use khash for Series.value_counts, add raw function to algorithms.py (:issue:`861`) + - Intercept __builtin__.sum in groupby (:issue:`885`) -.. _GH134: https://github.com/pydata/pandas/issues/134 -.. _GH159: https://github.com/pydata/pandas/issues/159 -.. _GH174: https://github.com/pydata/pandas/issues/174 -.. _GH661: https://github.com/pydata/pandas/issues/661 -.. _GH874: https://github.com/pydata/pandas/issues/874 -.. _GH875: https://github.com/pydata/pandas/issues/875 -.. _GH893: https://github.com/pydata/pandas/issues/893 -.. _GH918: https://github.com/pydata/pandas/issues/918 -.. _GH882: https://github.com/pydata/pandas/issues/882 -.. _GH861: https://github.com/pydata/pandas/issues/861 -.. _GH885: https://github.com/pydata/pandas/issues/885 diff --git a/doc/source/v0.7.3.txt b/doc/source/v0.7.3.txt index 72106ae7efb5d..afb4b8faac2cc 100644 --- a/doc/source/v0.7.3.txt +++ b/doc/source/v0.7.3.txt @@ -6,8 +6,8 @@ v.0.7.3 (April 12, 2012) This is a minor release from 0.7.2 and fixes many minor bugs and adds a number of nice new features. There are also a couple of API changes to note; these should not affect very many users, and we are inclined to call them "bug fixes" -even though they do constitute a change in behavior. See the `full release -notes <https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue +even though they do constitute a change in behavior. See the :ref:`full release +notes <release>` or issue tracker on GitHub for a complete list. New features diff --git a/doc/source/v0.8.0.txt b/doc/source/v0.8.0.txt index 22e6a056bd4ce..243b7466d7dee 100644 --- a/doc/source/v0.8.0.txt +++ b/doc/source/v0.8.0.txt @@ -10,8 +10,8 @@ than 20 distinct authors. Most pandas 0.7.3 and earlier users should not experience any issues upgrading, but due to the migration to the NumPy datetime64 dtype, there may be a number of bugs and incompatibilities lurking. Lingering incompatibilities will be fixed ASAP in a 0.8.1 release if -necessary. See the `full release notes -<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker +necessary. See the :ref:`full release notes +<release>` or issue tracker on GitHub for a complete list. Support for non-unique indexes diff --git a/doc/source/v0.8.1.txt b/doc/source/v0.8.1.txt index 0bfa1d20e1b22..cecf6f16cdc71 100644 --- a/doc/source/v0.8.1.txt +++ b/doc/source/v0.8.1.txt @@ -11,36 +11,26 @@ New features ~~~~~~~~~~~~ - Add :ref:`vectorized string processing methods <basics.string_methods>` - accessible via Series.str (GH620_) - - Add option to disable adjustment in EWMA (GH1584_) - - :ref:`Radviz plot <visualization.radviz>` (GH1566_) + accessible via Series.str (:issue:`620`) + - Add option to disable adjustment in EWMA (:issue:`1584`) + - :ref:`Radviz plot <visualization.radviz>` (:issue:`1566`) - :ref:`Parallel coordinates plot <visualization.parallel_coordinates>` - :ref:`Bootstrap plot <visualization.bootstrap>` - - Per column styles and secondary y-axis plotting (GH1559_) - - New datetime converters millisecond plotting (GH1599_) - - Add option to disable "sparse" display of hierarchical indexes (GH1538_) + - Per column styles and secondary y-axis plotting (:issue:`1559`) + - New datetime converters millisecond plotting (:issue:`1599`) + - Add option to disable "sparse" display of hierarchical indexes (:issue:`1538`) - Series/DataFrame's ``set_index`` method can :ref:`append levels - <indexing.set_index>` to an existing Index/MultiIndex (GH1569_, GH1577_) + <indexing.set_index>` to an existing Index/MultiIndex (:issue:`1569`, :issue:`1577`) Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improved implementation of rolling min and max (thanks to `Bottleneck <http://berkeleyanalytics.com/bottleneck/>`__ !) - - Add accelerated ``'median'`` GroupBy option (GH1358_) + - Add accelerated ``'median'`` GroupBy option (:issue:`1358`) - Significantly improve the performance of parsing ISO8601-format date - strings with ``DatetimeIndex`` or ``to_datetime`` (GH1571_) + strings with ``DatetimeIndex`` or ``to_datetime`` (:issue:`1571`) - Improve the performance of GroupBy on single-key aggregations and use with Categorical types - Significant datetime parsing performance improvments -.. _GH620: https://github.com/pydata/pandas/issues/620 -.. _GH1358: https://github.com/pydata/pandas/issues/1358 -.. _GH1538: https://github.com/pydata/pandas/issues/1538 -.. _GH1559: https://github.com/pydata/pandas/issues/1559 -.. _GH1584: https://github.com/pydata/pandas/issues/1584 -.. _GH1566: https://github.com/pydata/pandas/issues/1566 -.. _GH1569: https://github.com/pydata/pandas/issues/1569 -.. _GH1571: https://github.com/pydata/pandas/issues/1571 -.. _GH1577: https://github.com/pydata/pandas/issues/1577 -.. _GH1599: https://github.com/pydata/pandas/issues/1599 diff --git a/doc/source/v0.9.0.txt b/doc/source/v0.9.0.txt index 3b91e64253dea..b0c2c2455ab77 100644 --- a/doc/source/v0.9.0.txt +++ b/doc/source/v0.9.0.txt @@ -13,19 +13,19 @@ New features ~~~~~~~~~~~~ - Add ``encode`` and ``decode`` for unicode handling to :ref:`vectorized - string processing methods <basics.string_methods>` in Series.str (GH1706_) - - Add ``DataFrame.to_latex`` method (GH1735_) - - Add convenient expanding window equivalents of all rolling_* ops (GH1785_) + string processing methods <basics.string_methods>` in Series.str (:issue:`1706`) + - Add ``DataFrame.to_latex`` method (:issue:`1735`) + - Add convenient expanding window equivalents of all rolling_* ops (:issue:`1785`) - Add Options class to pandas.io.data for fetching options data from Yahoo! - Finance (GH1748_, GH1739_) + Finance (:issue:`1748`, :issue:`1739`) - More flexible parsing of boolean values (Yes, No, TRUE, FALSE, etc) - (GH1691_, GH1295_) + (:issue:`1691`, :issue:`1295`) - Add ``level`` parameter to ``Series.reset_index`` - - ``TimeSeries.between_time`` can now select times across midnight (GH1871_) - - Series constructor can now handle generator as input (GH1679_) + - ``TimeSeries.between_time`` can now select times across midnight (:issue:`1871`) + - Series constructor can now handle generator as input (:issue:`1679`) - ``DataFrame.dropna`` can now take multiple axes (tuple/list) as input - (GH924_) - - Enable ``skip_footer`` parameter in ``ExcelFile.parse`` (GH1843_) + (:issue:`924`) + - Enable ``skip_footer`` parameter in ``ExcelFile.parse`` (:issue:`1843`) API changes ~~~~~~~~~~~ @@ -58,57 +58,37 @@ API changes s2 - Deprecated ``day_of_year`` API removed from PeriodIndex, use ``dayofyear`` - (GH1723_) + (:issue:`1723`) - Don't modify NumPy suppress printoption to True at import time - The internal HDF5 data arrangement for DataFrames has been transposed. Legacy - files will still be readable by HDFStore (GH1834_, GH1824_) + files will still be readable by HDFStore (:issue:`1834`, :issue:`1824`) - Legacy cruft removed: pandas.stats.misc.quantileTS -- Use ISO8601 format for Period repr: monthly, daily, and on down (GH1776_) +- Use ISO8601 format for Period repr: monthly, daily, and on down (:issue:`1776`) - Empty DataFrame columns are now created as object dtype. This will prevent a class of TypeErrors that was occurring in code where the dtype of a column would depend on the presence of data or not (e.g. a SQL query having results) - (GH1783_) + (:issue:`1783`) - Setting parts of DataFrame/Panel using ix now aligns input Series/DataFrame - (GH1630_) + (:issue:`1630`) - ``first`` and ``last`` methods in ``GroupBy`` no longer drop non-numeric - columns (GH1809_) + columns (:issue:`1809`) - Resolved inconsistencies in specifying custom NA values in text parser. ``na_values`` of type dict no longer override default NAs unless - ``keep_default_na`` is set to false explicitly (GH1657_) + ``keep_default_na`` is set to false explicitly (:issue:`1657`) - ``DataFrame.dot`` will not do data alignment, and also work with Series - (GH1915_) + (:issue:`1915`) -See the `full release notes -<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker +See the :ref:`full release notes +<release>` or issue tracker on GitHub for a complete list. -.. _GH1706: https://github.com/pydata/pandas/issues/1706 -.. _GH1735: https://github.com/pydata/pandas/issues/1735 -.. _GH1785: https://github.com/pydata/pandas/issues/1785 -.. _GH1748: https://github.com/pydata/pandas/issues/1748 -.. _GH1739: https://github.com/pydata/pandas/issues/1739 -.. _GH1691: https://github.com/pydata/pandas/issues/1691 -.. _GH1295: https://github.com/pydata/pandas/issues/1295 -.. _GH1723: https://github.com/pydata/pandas/issues/1723 -.. _GH1834: https://github.com/pydata/pandas/issues/1834 -.. _GH1824: https://github.com/pydata/pandas/issues/1824 -.. _GH1776: https://github.com/pydata/pandas/issues/1776 -.. _GH1783: https://github.com/pydata/pandas/issues/1783 -.. _GH1630: https://github.com/pydata/pandas/issues/1630 -.. _GH1809: https://github.com/pydata/pandas/issues/1809 -.. _GH1657: https://github.com/pydata/pandas/issues/1657 -.. _GH1871: https://github.com/pydata/pandas/issues/1871 -.. _GH1679: https://github.com/pydata/pandas/issues/1679 -.. _GH1915: https://github.com/pydata/pandas/issues/1915 -.. _GH924: https://github.com/pydata/pandas/issues/924 -.. _GH1843: https://github.com/pydata/pandas/issues/1843 diff --git a/doc/source/v0.9.1.txt b/doc/source/v0.9.1.txt index 6733ab8a9e95e..7de000c255d4c 100644 --- a/doc/source/v0.9.1.txt +++ b/doc/source/v0.9.1.txt @@ -13,7 +13,7 @@ New features ~~~~~~~~~~~~ - `Series.sort`, `DataFrame.sort`, and `DataFrame.sort_index` can now be - specified in a per-column manner to support multiple sort orders (GH928_) + specified in a per-column manner to support multiple sort orders (:issue:`928`) .. ipython:: python @@ -24,7 +24,7 @@ New features - `DataFrame.rank` now supports additional argument values for the `na_option` parameter so missing values can be assigned either the largest - or the smallest rank (GH1508_, GH2159_) + or the smallest rank (:issue:`1508`, :issue:`2159`) .. ipython:: python @@ -40,7 +40,7 @@ New features - DataFrame has new `where` and `mask` methods to select values according to a - given boolean mask (GH2109_, GH2151_) + given boolean mask (:issue:`2109`, :issue:`2151`) DataFrame currently supports slicing via a boolean vector the same length as the DataFrame (inside the `[]`). The returned DataFrame has the same number of columns as the original, but is sliced on its index. @@ -81,7 +81,7 @@ New features df.mask(df<=0) - - Enable referencing of Excel columns by their column names (GH1936_) + - Enable referencing of Excel columns by their column names (:issue:`1936`) .. ipython:: python @@ -92,13 +92,13 @@ New features - Added option to disable pandas-style tick locators and formatters using `series.plot(x_compat=True)` or `pandas.plot_params['x_compat'] = - True` (GH2205_) + True` (:issue:`2205`) - Existing TimeSeries methods `at_time` and `between_time` were added to - DataFrame (GH2149_) - - DataFrame.dot can now accept ndarrays (GH2042_) - - DataFrame.drop now supports non-unique indexes (GH2101_) - - Panel.shift now supports negative periods (GH2164_) - - DataFrame now support unary ~ operator (GH2110_) + DataFrame (:issue:`2149`) + - DataFrame.dot can now accept ndarrays (:issue:`2042`) + - DataFrame.drop now supports non-unique indexes (:issue:`2101`) + - Panel.shift now supports negative periods (:issue:`2164`) + - DataFrame now support unary ~ operator (:issue:`2110`) API changes ~~~~~~~~~~~ @@ -116,7 +116,7 @@ API changes - Period.end_time now returns the last nanosecond in the time interval - (GH2124_, GH2125_, GH1764_) + (:issue:`2124`, :issue:`2125`, :issue:`1764`) .. ipython:: python @@ -126,7 +126,7 @@ API changes - File parsers no longer coerce to float or bool for columns that have custom - converters specified (GH2184_) + converters specified (:issue:`2184`) .. ipython:: python @@ -136,98 +136,6 @@ API changes read_csv(StringIO(data), converters={'A' : lambda x: x.strip()}) -See the `full release notes -<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker +See the :ref:`full release notes +<release>` or issue tracker on GitHub for a complete list. - - -.. _GH1508: https://github.com/pydata/pandas/issues/1508 -.. _GH928: https://github.com/pydata/pandas/issues/928 -.. _GH2159: https://github.com/pydata/pandas/issues/2159 -.. _GH2109: https://github.com/pydata/pandas/issues/2109 -.. _GH2151: https://github.com/pydata/pandas/issues/2151 -.. _GH2149: https://github.com/pydata/pandas/issues/2149 -.. _GH2101: https://github.com/pydata/pandas/issues/2101 -.. _GH2042: https://github.com/pydata/pandas/issues/2042 -.. _GH1936: https://github.com/pydata/pandas/issues/1936 -.. _GH1764: https://github.com/pydata/pandas/issues/1764 -.. _GH2125: https://github.com/pydata/pandas/issues/2125 -.. _GH2124: https://github.com/pydata/pandas/issues/2124 -.. _GH2110: https://github.com/pydata/pandas/issues/2110 -.. _GH2184: https://github.com/pydata/pandas/issues/2184 -.. _GH2205: https://github.com/pydata/pandas/issues/2205 - -.. _GH2181: https://github.com/pydata/pandas/issues/2181 -.. _GH2180: https://github.com/pydata/pandas/issues/2180 -.. _GH2176: https://github.com/pydata/pandas/issues/2176 -.. _GH2174: https://github.com/pydata/pandas/issues/2174 -.. _GH2173: https://github.com/pydata/pandas/issues/2173 -.. _GH2170: https://github.com/pydata/pandas/issues/2170 -.. _GH2169: https://github.com/pydata/pandas/issues/2169 -.. _GH2167: https://github.com/pydata/pandas/issues/2167 -.. _GH2166: https://github.com/pydata/pandas/issues/2166 -.. _GH2165: https://github.com/pydata/pandas/issues/2165 -.. _GH2164: https://github.com/pydata/pandas/issues/2164 -.. _GH2163: https://github.com/pydata/pandas/issues/2163 -.. _GH2161: https://github.com/pydata/pandas/issues/2161 -.. _GH2157: https://github.com/pydata/pandas/issues/2157 -.. _GH2155: https://github.com/pydata/pandas/issues/2155 -.. _GH2152: https://github.com/pydata/pandas/issues/2152 -.. _GH2150: https://github.com/pydata/pandas/issues/2150 -.. _GH2148: https://github.com/pydata/pandas/issues/2148 -.. _GH2147: https://github.com/pydata/pandas/issues/2147 -.. _GH2146: https://github.com/pydata/pandas/issues/2146 -.. _GH2144: https://github.com/pydata/pandas/issues/2144 -.. _GH2140: https://github.com/pydata/pandas/issues/2140 -.. _GH2135: https://github.com/pydata/pandas/issues/2135 -.. _GH2133: https://github.com/pydata/pandas/issues/2133 -.. _GH2131: https://github.com/pydata/pandas/issues/2131 -.. _GH2129: https://github.com/pydata/pandas/issues/2129 -.. _GH2128: https://github.com/pydata/pandas/issues/2128 -.. _GH2127: https://github.com/pydata/pandas/issues/2127 -.. _GH2122: https://github.com/pydata/pandas/issues/2122 -.. _GH2120: https://github.com/pydata/pandas/issues/2120 -.. _GH2119: https://github.com/pydata/pandas/issues/2119 -.. _GH2117: https://github.com/pydata/pandas/issues/2117 -.. _GH2116: https://github.com/pydata/pandas/issues/2116 -.. _GH2114: https://github.com/pydata/pandas/issues/2114 -.. _GH2113: https://github.com/pydata/pandas/issues/2113 -.. _GH2111: https://github.com/pydata/pandas/issues/2111 -.. _GH2108: https://github.com/pydata/pandas/issues/2108 -.. _GH2107: https://github.com/pydata/pandas/issues/2107 -.. _GH2103: https://github.com/pydata/pandas/issues/2103 -.. _GH2100: https://github.com/pydata/pandas/issues/2100 -.. _GH2096: https://github.com/pydata/pandas/issues/2096 -.. _GH2095: https://github.com/pydata/pandas/issues/2095 -.. _GH2093: https://github.com/pydata/pandas/issues/2093 -.. _GH2087: https://github.com/pydata/pandas/issues/2087 -.. _GH2086: https://github.com/pydata/pandas/issues/2086 -.. _GH2083: https://github.com/pydata/pandas/issues/2083 -.. _GH2082: https://github.com/pydata/pandas/issues/2082 -.. _GH2080: https://github.com/pydata/pandas/issues/2080 -.. _GH2079: https://github.com/pydata/pandas/issues/2079 -.. _GH2078: https://github.com/pydata/pandas/issues/2078 -.. _GH2077: https://github.com/pydata/pandas/issues/2077 -.. _GH2075: https://github.com/pydata/pandas/issues/2075 -.. _GH2068: https://github.com/pydata/pandas/issues/2068 -.. _GH2066: https://github.com/pydata/pandas/issues/2066 -.. _GH2065: https://github.com/pydata/pandas/issues/2065 -.. _GH2063: https://github.com/pydata/pandas/issues/2063 -.. _GH2061: https://github.com/pydata/pandas/issues/2061 -.. _GH2060: https://github.com/pydata/pandas/issues/2060 -.. _GH2059: https://github.com/pydata/pandas/issues/2059 -.. _GH2056: https://github.com/pydata/pandas/issues/2056 -.. _GH2051: https://github.com/pydata/pandas/issues/2051 -.. _GH2049: https://github.com/pydata/pandas/issues/2049 -.. _GH2043: https://github.com/pydata/pandas/issues/2043 -.. _GH2041: https://github.com/pydata/pandas/issues/2041 -.. _GH2032: https://github.com/pydata/pandas/issues/2032 -.. _GH2029: https://github.com/pydata/pandas/issues/2029 -.. _GH2018: https://github.com/pydata/pandas/issues/2018 -.. _GH2008: https://github.com/pydata/pandas/issues/2008 -.. _GH2005: https://github.com/pydata/pandas/issues/2005 -.. _GH1979: https://github.com/pydata/pandas/issues/1979 -.. _GH1976: https://github.com/pydata/pandas/issues/1976 -.. _GH1959: https://github.com/pydata/pandas/issues/1959 -.. _GH1890: https://github.com/pydata/pandas/issues/1890 -.. _GH1555: https://github.com/pydata/pandas/issues/1555
closes #3182.
https://api.github.com/repos/pandas-dev/pandas/pulls/3961
2013-06-19T16:14:32Z
2013-06-20T18:10:16Z
2013-06-20T18:10:16Z
2014-06-20T21:52:48Z
BUG/BLD: add compiler flag for older compilers
diff --git a/setup.py b/setup.py index bd23b4ef05ce2..ee8f30d62ac6c 100755 --- a/setup.py +++ b/setup.py @@ -482,7 +482,8 @@ def pxd(name): 'pandas/src/datetime/np_datetime_strings.c'], include_dirs=['pandas/src/ujson/python', 'pandas/src/ujson/lib', - 'pandas/src/datetime'] + common_include) + 'pandas/src/datetime'] + common_include, + extra_compile_args=['-D_GNU_SOURCE']) extensions.append(ujson_ext)
The -D_GNU_SOURCE is needed by older versions of GCC. Many newer versions enable this flag by default, which is why it wasn't being caught before. closes #3957.
https://api.github.com/repos/pandas-dev/pandas/pulls/3959
2013-06-19T14:51:05Z
2013-06-19T16:48:59Z
2013-06-19T16:48:59Z
2014-07-11T21:47:26Z
CLN: Change bare exceptions pt 1
diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py index 035db279064a0..de510aa155412 100644 --- a/pandas/sparse/array.py +++ b/pandas/sparse/array.py @@ -261,7 +261,7 @@ def _get_val_at(self, loc): loc += n if loc >= len(self) or loc < 0: - raise Exception('Out of bounds access') + raise IndexError('out of bounds access') sp_loc = self.sp_index.lookup(loc) if sp_loc == -1: @@ -283,7 +283,7 @@ def take(self, indices, axis=0): n = len(self) if (indices < 0).any() or (indices >= n).any(): - raise Exception('out of bounds access') + raise IndexError('out of bounds access') if self.sp_index.npoints > 0: locs = np.array([self.sp_index.lookup(loc) for loc in indices]) @@ -296,10 +296,10 @@ def take(self, indices, axis=0): return result def __setitem__(self, key, value): - raise Exception('SparseArray objects are immutable') + raise TypeError('%r object does not support item assignment' % self.__class__.__name__) def __setslice__(self, i, j, value): - raise Exception('SparseArray objects are immutable') + raise TypeError('%r object does not support item assignment' % self.__class__.__name__) def to_dense(self): """ @@ -313,7 +313,7 @@ def astype(self, dtype=None): """ dtype = np.dtype(dtype) if dtype is not None and dtype not in (np.float_, float): - raise Exception('Can only support floating point data for now') + raise TypeError('Can only support floating point data for now') return self.copy() def copy(self, deep=True): diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index 9694cc005d178..0a08fba49afeb 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -195,10 +195,10 @@ def _init_matrix(self, data, index, columns, dtype=None): columns = _default_index(K) if len(columns) != K: - raise Exception('Column length mismatch: %d vs. %d' % + raise ValueError('Column length mismatch: %d vs. %d' % (len(columns), K)) if len(index) != N: - raise Exception('Index length mismatch: %d vs. %d' % + raise ValueError('Index length mismatch: %d vs. %d' % (len(index), N)) data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)]) @@ -585,7 +585,7 @@ def _combine_const(self, other, func): def _reindex_index(self, index, method, copy, level, fill_value=np.nan, limit=None): if level is not None: - raise Exception('Reindex by level not supported for sparse') + raise TypeError('Reindex by level not supported for sparse') if self.index.equals(index): if copy: @@ -616,7 +616,7 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, def _reindex_columns(self, columns, copy, level, fill_value, limit=None): if level is not None: - raise Exception('Reindex by level not supported for sparse') + raise TypeError('Reindex by level not supported for sparse') if com.notnull(fill_value): raise NotImplementedError @@ -889,9 +889,12 @@ def stack_sparse_frame(frame): inds_to_concat = [] vals_to_concat = [] + # TODO: Figure out whether this can be reached. + # I think this currently can't be reached because you can't build a SparseDataFrame + # with a non-np.NaN fill value (fails earlier). for _, series in frame.iteritems(): if not np.isnan(series.fill_value): - raise Exception('This routine assumes NaN fill value') + raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) @@ -931,7 +934,7 @@ def homogenize(series_dict): for _, series in series_dict.iteritems(): if not np.isnan(series.fill_value): - raise Exception('this method is only valid with NaN fill values') + raise TypeError('this method is only valid with NaN fill values') if index is None: index = series.sp_index diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index 0b2842155b299..246e6fa93918f 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -249,7 +249,7 @@ def to_frame(self, filter_observations=True): frame : DataFrame """ if not filter_observations: - raise Exception('filter_observations=False not supported for ' + raise TypeError('filter_observations=False not supported for ' 'SparsePanel.to_long') I, N, K = self.shape @@ -325,7 +325,7 @@ def reindex(self, major=None, items=None, minor=None, major_axis=None, if item in self._frames: new_frames[item] = self._frames[item] else: - raise Exception('Reindexing with new items not yet ' + raise NotImplementedError('Reindexing with new items not yet ' 'supported') else: new_frames = self._frames @@ -488,7 +488,7 @@ def _stack_sparse_info(frame): series = frame[col] if not np.isnan(series.fill_value): - raise Exception('This routine assumes NaN fill value') + raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index bd01845a295b6..1b8d3541da289 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -133,7 +133,7 @@ def __new__(cls, data, index=None, sparse_index=None, kind='block', raise AssertionError() else: if index is None: - raise Exception('must pass index!') + raise TypeError('must pass index!') length = len(index) @@ -388,7 +388,7 @@ def astype(self, dtype=None): """ if dtype is not None and dtype not in (np.float_, float): - raise Exception('Can only support floating point data') + raise TypeError('Can only support floating point data') return self.copy() diff --git a/pandas/sparse/tests/test_array.py b/pandas/sparse/tests/test_array.py index cf2cd2f687e8d..a92170621f50d 100644 --- a/pandas/sparse/tests/test_array.py +++ b/pandas/sparse/tests/test_array.py @@ -1,3 +1,4 @@ +import re from numpy import nan, ndarray import numpy as np @@ -8,7 +9,7 @@ from pandas.core.series import Series from pandas.core.common import notnull from pandas.sparse.api import SparseArray -from pandas.util.testing import assert_almost_equal +from pandas.util.testing import assert_almost_equal, assertRaisesRegexp def assert_sp_array_equal(left, right): @@ -28,6 +29,24 @@ def setUp(self): self.arr = SparseArray(self.arr_data) self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + def test_get_item(self): + errmsg = re.compile("bounds") + assertRaisesRegexp(IndexError, errmsg, lambda : self.arr[11]) + assertRaisesRegexp(IndexError, errmsg, lambda : self.arr[-11]) + self.assertEqual(self.arr[-1], self.arr[len(self.arr) - 1]) + + def test_bad_take(self): + assertRaisesRegexp(IndexError, "bounds", lambda : self.arr.take(11)) + self.assertRaises(IndexError, lambda : self.arr.take(-11)) + + def test_set_item(self): + def setitem(): + self.arr[5] = 3 + def setslice(): + self.arr[1:5] = 2 + assertRaisesRegexp(TypeError, "item assignment", setitem) + assertRaisesRegexp(TypeError, "item assignment", setslice) + def test_constructor_from_sparse(self): res = SparseArray(self.zarr) self.assertEquals(res.fill_value, 0) @@ -47,7 +66,7 @@ def test_astype(self): res.sp_values[:3] = 27 self.assert_(not (self.arr.sp_values[:3] == 27).any()) - self.assertRaises(Exception, self.arr.astype, 'i8') + assertRaisesRegexp(TypeError, "floating point", self.arr.astype, 'i8') def test_copy_shallow(self): arr2 = self.arr.copy(deep=False) diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py index c6515cd4113f0..1382a6a642aa3 100644 --- a/pandas/sparse/tests/test_sparse.py +++ b/pandas/sparse/tests/test_sparse.py @@ -13,7 +13,7 @@ dec = np.testing.dec from pandas.util.testing import (assert_almost_equal, assert_series_equal, - assert_frame_equal, assert_panel_equal) + assert_frame_equal, assert_panel_equal, assertRaisesRegexp) from numpy.testing import assert_equal from pandas import Series, DataFrame, bdate_range, Panel @@ -641,7 +641,7 @@ def _check_matches(indices, expected): # must have NaN fill value data = {'a': SparseSeries(np.arange(7), sparse_index=expected2, fill_value=0)} - nose.tools.assert_raises(Exception, spf.homogenize, data) + assertRaisesRegexp(TypeError, "NaN fill value", spf.homogenize, data) def test_fill_value_corner(self): cop = self.zbseries.copy() @@ -791,7 +791,7 @@ def test_constructor(self): assert_sp_frame_equal(cons, reindexed) # assert level parameter breaks reindex - self.assertRaises(Exception, self.frame.reindex, idx, level=0) + self.assertRaises(TypeError, self.frame.reindex, idx, level=0) repr(self.frame) @@ -805,14 +805,14 @@ def test_constructor_ndarray(self): assert_sp_frame_equal(sp, self.frame.reindex(columns=['A'])) # raise on level argument - self.assertRaises(Exception, self.frame.reindex, columns=['A'], + self.assertRaises(TypeError, self.frame.reindex, columns=['A'], level=1) # wrong length index / columns - self.assertRaises(Exception, SparseDataFrame, self.frame.values, - index=self.frame.index[:-1]) - self.assertRaises(Exception, SparseDataFrame, self.frame.values, - columns=self.frame.columns[:-1]) + assertRaisesRegexp(ValueError, "^Index length", SparseDataFrame, self.frame.values, + index=self.frame.index[:-1]) + assertRaisesRegexp(ValueError, "^Column length", SparseDataFrame, self.frame.values, + columns=self.frame.columns[:-1]) def test_constructor_empty(self): sp = SparseDataFrame() @@ -840,11 +840,17 @@ def test_constructor_from_series(self): x = Series(np.random.randn(10000), name ='a') y = Series(np.random.randn(10000), name ='b') - x.ix[:9998] = 0 - x = x.to_sparse(fill_value=0) + x2 = x.astype(float) + x2.ix[:9998] = np.NaN + x_sparse = x2.to_sparse(fill_value=np.NaN) - # currently fails - #df1 = SparseDataFrame([x, y]) + # Currently fails too with weird ufunc error + # df1 = SparseDataFrame([x_sparse, y]) + + y.ix[:9998] = 0 + y_sparse = y.to_sparse(fill_value=0) + # without sparse value raises error + # df2 = SparseDataFrame([x2_sparse, y]) def test_dtypes(self): df = DataFrame(np.random.randn(10000, 4)) diff --git a/pandas/stats/common.py b/pandas/stats/common.py index c3034dbc390bf..75ebc9284ca21 100644 --- a/pandas/stats/common.py +++ b/pandas/stats/common.py @@ -1,42 +1,33 @@ -def _get_cluster_type(cluster_type): - cluster_type = _WINDOW_TYPES.get(cluster_type, cluster_type) - if cluster_type is None: - return cluster_type - - cluster_type_up = cluster_type.upper() - - if cluster_type_up == 'ENTITY': - return 'entity' - elif cluster_type_up == 'TIME': - return 'time' - else: # pragma: no cover - raise Exception('Unrecognized cluster type: %s' % cluster_type) - -_CLUSTER_TYPES = { - 0: 'time', - 1: 'entity' -} _WINDOW_TYPES = { 0: 'full_sample', 1: 'rolling', 2: 'expanding' } +# also allow 'rolling' as key +_WINDOW_TYPES.update((v, v) for k,v in _WINDOW_TYPES.items()) +_ADDITIONAL_CLUSTER_TYPES = set(("entity", "time")) +def _get_cluster_type(cluster_type): + # this was previous behavior + if cluster_type is None: + return cluster_type + try: + return _get_window_type(cluster_type) + except ValueError: + final_type = str(cluster_type).lower().replace("_", " ") + if final_type in _ADDITIONAL_CLUSTER_TYPES: + return final_type + raise ValueError('Unrecognized cluster type: %s' % cluster_type) def _get_window_type(window_type): - window_type = _WINDOW_TYPES.get(window_type, window_type) - window_type_up = window_type.upper() - - if window_type_up in ('FULL SAMPLE', 'FULL_SAMPLE'): - return 'full_sample' - elif window_type_up == 'ROLLING': - return 'rolling' - elif window_type_up == 'EXPANDING': - return 'expanding' - else: # pragma: no cover - raise Exception('Unrecognized window type: %s' % window_type) - + # e.g., 0, 1, 2 + final_type = _WINDOW_TYPES.get(window_type) + # e.g., 'full_sample' + final_type = final_type or _WINDOW_TYPES.get(str(window_type).lower().replace(" ", "_")) + if final_type is None: + raise ValueError('Unrecognized window type: %s' % window_type) + return final_type def banner(text, width=80): """ diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py index 13eeb03e15328..cdcf1ab2ab036 100644 --- a/pandas/stats/ols.py +++ b/pandas/stats/ols.py @@ -634,8 +634,8 @@ def _set_window(self, window_type, window, min_periods): self._window_type = scom._get_window_type(window_type) if self._is_rolling: - if not ((window is not None)): - raise AssertionError() + if window is None: + raise AssertionError("Must specify window.") if min_periods is None: min_periods = window else: diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py index 17a45409c1ab5..abcf5b8df9a9a 100644 --- a/pandas/stats/tests/test_ols.py +++ b/pandas/stats/tests/test_ols.py @@ -19,7 +19,7 @@ from pandas.stats.ols import _filter_data from pandas.stats.plm import NonPooledPanelOLS, PanelOLS from pandas.util.testing import (assert_almost_equal, assert_series_equal, - assert_frame_equal) + assert_frame_equal, assertRaisesRegexp) import pandas.util.testing as tm from common import BaseTest @@ -663,7 +663,10 @@ def testRollingWithNeweyWest(self): def testRollingWithEntityCluster(self): self.checkMovingOLS(self.panel_x, self.panel_y, cluster='entity') - + def testUnknownClusterRaisesValueError(self): + assertRaisesRegexp(ValueError, "Unrecognized cluster.*ridiculous", + self.checkMovingOLS, self.panel_x, self.panel_y, + cluster='ridiculous') def testRollingWithTimeEffectsAndEntityCluster(self): self.checkMovingOLS(self.panel_x, self.panel_y, time_effects=True, cluster='entity') @@ -689,6 +692,10 @@ def testNonPooled(self): self.checkNonPooled(y=self.panel_y, x=self.panel_x) self.checkNonPooled(y=self.panel_y, x=self.panel_x, window_type='rolling', window=25, min_periods=10) + def testUnknownWindowType(self): + self.assertRaisesRegexp(ValueError, "window.*ridiculous", + self.checkNonPooled, y=self.panel_y, x=self.panel_x, + window_type='ridiculous', window=25, min_periods=10) def checkNonPooled(self, x, y, **kwds): # For now, just check that it doesn't crash diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 8342d218e76bb..63f92e9fa7a35 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5455,7 +5455,7 @@ def test_append_series_dict(self): columns=['foo', 'bar', 'baz', 'qux']) series = df.ix[4] - self.assertRaises(Exception, df.append, series, verify_integrity=True) + self.assertRaises(ValueError, df.append, series, verify_integrity=True) series.name = None self.assertRaises(Exception, df.append, series, verify_integrity=True) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index c5770c61e2f81..cf7d360b5a93d 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1681,7 +1681,7 @@ def test_append(self): else: self.fail("orphaned index!") - self.assertRaises(Exception, self.ts.append, self.ts, + self.assertRaises(ValueError, self.ts.append, self.ts, verify_integrity=True) def test_append_many(self): diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 75e35b403dd78..f96f3b98a0383 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -16,7 +16,7 @@ from pandas.core.internals import (IntBlock, BoolBlock, BlockManager, make_block, _consolidate) from pandas.util.decorators import cache_readonly, Appender, Substitution - +from pandas.core.common import PandasError from pandas.sparse.frame import SparseDataFrame import pandas.core.common as com @@ -1002,7 +1002,8 @@ def _get_concatenated_data(self): blk.ref_items = self.new_axes[0] new_data = BlockManager(new_blocks, self.new_axes) - except Exception: # EAFP + # Eventual goal would be to move everything to PandasError or other explicit error + except (Exception, PandasError): # EAFP # should not be possible to fail here for the expected reason with # axis = 0 if self.axis == 0: # pragma: no cover @@ -1039,8 +1040,11 @@ def _concat_blocks(self, blocks): if self.axis > 0: # Not safe to remove this check, need to profile if not _all_indexes_same([b.items for b in blocks]): - raise Exception('dtypes are not consistent throughout ' - 'DataFrames') + # TODO: Either profile this piece or remove. + # FIXME: Need to figure out how to test whether this line exists or does not...(unclear if even possible + # or maybe would require performance test) + raise PandasError('dtypes are not consistent throughout ' + 'DataFrames') return make_block(concat_values, blocks[0].items, self.new_axes[0]) else: @@ -1184,7 +1188,7 @@ def _maybe_check_integrity(self, concat_index): if self.verify_integrity: if not concat_index.is_unique: overlap = concat_index.get_duplicates() - raise Exception('Indexes have overlapping values: %s' + raise ValueError('Indexes have overlapping values: %s' % str(overlap)) diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index 1c020353ebb43..b0261077fc767 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -1078,7 +1078,7 @@ def test_append(self): self.assert_(appended is not self.frame) # overlap - self.assertRaises(Exception, self.frame.append, self.frame, + self.assertRaises(ValueError, self.frame.append, self.frame, verify_integrity=True) def test_append_length0_frame(self): diff --git a/pandas/tools/tests/test_tile.py b/pandas/tools/tests/test_tile.py index c3462dfc69e27..7da9a3bb5a95a 100644 --- a/pandas/tools/tests/test_tile.py +++ b/pandas/tools/tests/test_tile.py @@ -6,6 +6,7 @@ from pandas import DataFrame, Series, unique import pandas.util.testing as tm +from pandas.util.testing import assertRaisesRegexp import pandas.core.common as com from pandas.core.algorithms import quantile @@ -136,7 +137,7 @@ def test_qcut_specify_quantiles(self): self.assert_(factor.equals(expected)) def test_qcut_all_bins_same(self): - self.assertRaises(Exception, qcut, [0,0,0,0,0,0,0,0,0,0], 3) + assertRaisesRegexp(ValueError, "edges.*unique", qcut, [0,0,0,0,0,0,0,0,0,0], 3) def test_cut_out_of_bounds(self): arr = np.random.randn(100) diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py index 4c68594a8a093..ffed6cafc1047 100644 --- a/pandas/tools/tile.py +++ b/pandas/tools/tile.py @@ -151,7 +151,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None, retbins=False, ids = bins.searchsorted(x, side=side) if len(algos.unique(bins)) < len(bins): - raise Exception('Bin edges must be unique: %s' % repr(bins)) + raise ValueError('Bin edges must be unique: %s' % repr(bins)) if include_lowest: ids[x == bins[0]] = 1 diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 5985a8a898b27..f54bfee55782a 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -499,7 +499,7 @@ def get_offset(name): if offset is not None: return offset else: - raise Exception('Bad rule name requested: %s!' % name) + raise ValueError('Bad rule name requested: %s.' % name) getOffset = get_offset @@ -522,7 +522,7 @@ def get_offset_name(offset): if name is not None: return name else: - raise Exception('Bad rule given: %s!' % offset) + raise ValueError('Bad rule given: %s.' % offset) def get_legacy_offset_name(offset): diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 7f726b8f3c6ab..f560a6bf6e717 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -422,13 +422,20 @@ def tzinfo(self): @classmethod def _cached_range(cls, start=None, end=None, periods=None, offset=None, name=None): + if start is None and end is None: + # I somewhat believe this should never be raised externally and therefore + # should be a `PandasError` but whatever... + raise TypeError('Must specify either start or end.') if start is not None: start = Timestamp(start) if end is not None: end = Timestamp(end) + if (start is None or end is None) and periods is None: + raise TypeError('Must either specify period or provide both start and end.') if offset is None: - raise Exception('Must provide a DateOffset!') + # This can't happen with external-facing code, therefore PandasError + raise TypeError('Must provide offset.') drc = _daterange_cache if offset not in _daterange_cache: @@ -922,10 +929,10 @@ def _maybe_utc_convert(self, other): if isinstance(other, DatetimeIndex): if self.tz is not None: if other.tz is None: - raise Exception('Cannot join tz-naive with tz-aware ' + raise TypeError('Cannot join tz-naive with tz-aware ' 'DatetimeIndex') elif other.tz is not None: - raise Exception('Cannot join tz-naive with tz-aware ' + raise TypeError('Cannot join tz-naive with tz-aware ' 'DatetimeIndex') if self.tz != other.tz: @@ -1492,7 +1499,7 @@ def tz_convert(self, tz): if self.tz is None: # tz naive, use tz_localize - raise Exception('Cannot convert tz-naive timestamps, use ' + raise TypeError('Cannot convert tz-naive timestamps, use ' 'tz_localize to localize') # No conversion since timestamps are all UTC to begin with @@ -1507,7 +1514,7 @@ def tz_localize(self, tz): localized : DatetimeIndex """ if self.tz is not None: - raise ValueError("Already tz-aware, use tz_convert to convert.") + raise TypeError("Already tz-aware, use tz_convert to convert.") tz = tools._maybe_get_tz(tz) # Convert to UTC @@ -1678,7 +1685,7 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None, Frequency strings can have multiples, e.g. '5H' tz : string or None Time zone name for returning localized DatetimeIndex, for example - Asia/Beijing + Asia/Hong_Kong normalize : bool, default False Normalize start/end dates to midnight before generating date range name : str, default None diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 025a12a17687e..9585d1f81e81d 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -351,7 +351,7 @@ def apply(self, other): return BDay(self.n, offset=self.offset + other, normalize=self.normalize) else: - raise Exception('Only know how to combine business day with ' + raise TypeError('Only know how to combine business day with ' 'datetime or timedelta!') @classmethod @@ -487,7 +487,7 @@ def __init__(self, n=1, **kwds): if self.weekday is not None: if self.weekday < 0 or self.weekday > 6: - raise Exception('Day must be 0<=day<=6, got %d' % + raise ValueError('Day must be 0<=day<=6, got %d' % self.weekday) self._inc = timedelta(weeks=1) @@ -562,13 +562,13 @@ def __init__(self, n=1, **kwds): self.week = kwds['week'] if self.n == 0: - raise Exception('N cannot be 0') + raise ValueError('N cannot be 0') if self.weekday < 0 or self.weekday > 6: - raise Exception('Day must be 0<=day<=6, got %d' % + raise ValueError('Day must be 0<=day<=6, got %d' % self.weekday) if self.week < 0 or self.week > 3: - raise Exception('Week must be 0<=day<=3, got %d' % + raise ValueError('Week must be 0<=day<=3, got %d' % self.week) self.kwds = kwds diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index 22ed41f82506d..7fbdbbe328c84 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -13,6 +13,7 @@ import pandas.tseries.tools as tools import pandas.core.datetools as datetools +from pandas.util.testing import assertRaisesRegexp def _skip_if_no_pytz(): @@ -65,6 +66,12 @@ def test_constructor(self): self.assertRaises(ValueError, date_range, '2011-1-1', '2012-1-1', 'B') self.assertRaises(ValueError, bdate_range, '2011-1-1', '2012-1-1', 'B') + def test_naive_aware_conflicts(self): + naive = bdate_range(START, END, freq=datetools.bday, tz=None) + aware = bdate_range(START, END, freq=datetools.bday, tz="Asia/Hong_Kong") + assertRaisesRegexp(TypeError, "tz-naive.*tz-aware", naive.join, aware) + assertRaisesRegexp(TypeError, "tz-naive.*tz-aware", aware.join, naive) + def test_cached_range(self): rng = DatetimeIndex._cached_range(START, END, offset=datetools.bday) @@ -73,16 +80,16 @@ def test_cached_range(self): rng = DatetimeIndex._cached_range(end=START, periods=20, offset=datetools.bday) - self.assertRaises(Exception, DatetimeIndex._cached_range, START, END) + assertRaisesRegexp(TypeError, "offset", DatetimeIndex._cached_range, START, END) - self.assertRaises(Exception, DatetimeIndex._cached_range, START, - freq=datetools.bday) + assertRaisesRegexp(TypeError, "specify period", DatetimeIndex._cached_range, START, + offset=datetools.bday) - self.assertRaises(Exception, DatetimeIndex._cached_range, end=END, - freq=datetools.bday) + assertRaisesRegexp(TypeError, "specify period", DatetimeIndex._cached_range, end=END, + offset=datetools.bday) - self.assertRaises(Exception, DatetimeIndex._cached_range, periods=20, - freq=datetools.bday) + assertRaisesRegexp(TypeError, "start or end", DatetimeIndex._cached_range, periods=20, + offset=datetools.bday) def test_cached_range_bug(self): rng = date_range('2010-09-01 05:00:00', periods=50, diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 209f770da5c94..bcd74e7e6eecd 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -20,6 +20,7 @@ from pandas.tslib import monthrange from pandas.lib import Timestamp +from pandas.util.testing import assertRaisesRegexp _multiprocess_can_split_ = True @@ -44,7 +45,7 @@ def test_ole2datetime(): actual = ole2datetime(60000) assert actual == datetime(2064, 4, 8) - assert_raises(Exception, ole2datetime, 60) + assert_raises(ValueError, ole2datetime, 60) def test_to_datetime1(): @@ -285,7 +286,7 @@ def test_apply_large_n(self): self.assertEqual(rs, xp) def test_apply_corner(self): - self.assertRaises(Exception, BDay().apply, BMonthEnd()) + self.assertRaises(TypeError, BDay().apply, BMonthEnd()) def test_offsets_compare_equal(self): # root cause of #456 @@ -301,8 +302,8 @@ def assertOnOffset(offset, date, expected): class TestWeek(unittest.TestCase): def test_corner(self): - self.assertRaises(Exception, Week, weekday=7) - self.assertRaises(Exception, Week, weekday=-1) + self.assertRaises(ValueError, Week, weekday=7) + assertRaisesRegexp(ValueError, "Day must be", Week, weekday=-1) def test_isAnchored(self): self.assert_(Week(weekday=0).isAnchored()) @@ -366,11 +367,11 @@ def test_offsets_compare_equal(self): class TestWeekOfMonth(unittest.TestCase): def test_constructor(self): - self.assertRaises(Exception, WeekOfMonth, n=0, week=1, weekday=1) - self.assertRaises(Exception, WeekOfMonth, n=1, week=4, weekday=0) - self.assertRaises(Exception, WeekOfMonth, n=1, week=-1, weekday=0) - self.assertRaises(Exception, WeekOfMonth, n=1, week=0, weekday=-1) - self.assertRaises(Exception, WeekOfMonth, n=1, week=0, weekday=7) + assertRaisesRegexp(ValueError, "^N cannot be 0", WeekOfMonth, n=0, week=1, weekday=1) + assertRaisesRegexp(ValueError, "^Week", WeekOfMonth, n=1, week=4, weekday=0) + assertRaisesRegexp(ValueError, "^Week", WeekOfMonth, n=1, week=-1, weekday=0) + assertRaisesRegexp(ValueError, "^Day", WeekOfMonth, n=1, week=0, weekday=-1) + assertRaisesRegexp(ValueError, "^Day", WeekOfMonth, n=1, week=0, weekday=7) def test_offset(self): date1 = datetime(2011, 1, 4) # 1st Tuesday of Month @@ -1445,7 +1446,7 @@ def test_hasOffsetName(): def test_get_offset_name(): - assert_raises(Exception, get_offset_name, BDay(2)) + assertRaisesRegexp(ValueError, 'Bad rule.*BusinessDays', get_offset_name, BDay(2)) assert get_offset_name(BDay()) == 'B' assert get_offset_name(BMonthEnd()) == 'BM' @@ -1457,7 +1458,7 @@ def test_get_offset_name(): def test_get_offset(): - assert_raises(Exception, get_offset, 'gibberish') + assertRaisesRegexp(ValueError, "rule.*GIBBERISH", get_offset, 'gibberish') assert get_offset('B') == BDay() assert get_offset('b') == BDay() diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 21b11bb455e32..e57b554b7ca3c 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -18,8 +18,9 @@ import pandas.tseries.offsets as offsets from pandas.tseries.index import bdate_range, date_range import pandas.tseries.tools as tools +from pytz import NonExistentTimeError -from pandas.util.testing import assert_series_equal, assert_almost_equal +from pandas.util.testing import assert_series_equal, assert_almost_equal, assertRaisesRegexp import pandas.util.testing as tm import pandas.lib as lib @@ -93,7 +94,8 @@ def test_localize_utc_conversion(self): # DST ambiguity, this should fail rng = date_range('3/11/2012', '3/12/2012', freq='30T') - self.assertRaises(Exception, rng.tz_localize, 'US/Eastern') + # Is this really how it should fail?? + self.assertRaises(NonExistentTimeError, rng.tz_localize, 'US/Eastern') def test_timestamp_tz_localize(self): stamp = Timestamp('3/11/2012 04:00') @@ -672,7 +674,7 @@ def test_series_frame_tz_localize(self): # Can't localize if already tz-aware rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') ts = Series(1, index=rng) - self.assertRaises(Exception, ts.tz_localize, 'US/Eastern') + assertRaisesRegexp(TypeError, 'Already tz-aware', ts.tz_localize, 'US/Eastern') def test_series_frame_tz_convert(self): rng = date_range('1/1/2011', periods=200, freq='D', @@ -696,7 +698,7 @@ def test_series_frame_tz_convert(self): # can't convert tz-naive rng = date_range('1/1/2011', periods=200, freq='D') ts = Series(1, index=rng) - self.assertRaises(Exception, ts.tz_convert, 'US/Eastern') + assertRaisesRegexp(TypeError, "Cannot convert tz-naive", ts.tz_convert, 'US/Eastern') def test_join_utc_convert(self): rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index c80d2ef5d4e1c..531d9f399279b 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -344,6 +344,6 @@ def ole2datetime(oledt): # Excel has a bug where it thinks the date 2/29/1900 exists # we just reject any date before 3/1/1900. if val < 61: - raise Exception("Value is outside of acceptable range: %s " % val) + raise ValueError("Value is outside of acceptable range: %s " % val) return OLE_TIME_ZERO + timedelta(days=val) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index c297cfa554fa5..5e1ab59305bab 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -748,6 +748,56 @@ def stdin_encoding(encoding=None): yield sys.stdin = _stdin +def assertRaisesRegexp(exception, regexp, callable, *args, **kwargs): + """ Port of assertRaisesRegexp from unittest in Python 2.7 - used in with statement. + + Explanation from standard library: + Like assertRaises() but also tests that regexp matches on the string + representation of the raised exception. regexp may be a regular expression + object or a string containing a regular expression suitable for use by + re.search(). + + You can pass either a regular expression or a compiled regular expression object. + >>> assertRaisesRegexp(ValueError, 'invalid literal for.*XYZ', + ... int, 'XYZ') + >>> import re + >>> assertRaisesRegexp(ValueError, re.compile('literal'), int, 'XYZ') + + If an exception of a different type is raised, it bubbles up. + + >>> assertRaisesRegexp(TypeError, 'literal', int, 'XYZ') + Traceback (most recent call last): + ... + ValueError: invalid literal for int() with base 10: 'XYZ' + >>> dct = {} + >>> assertRaisesRegexp(KeyError, 'pear', dct.__getitem__, 'apple') + Traceback (most recent call last): + ... + AssertionError: "pear" does not match "'apple'" + >>> assertRaisesRegexp(KeyError, 'apple', dct.__getitem__, 'apple') + >>> assertRaisesRegexp(Exception, 'operand type.*int.*dict', lambda : 2 + {}) + """ + + import re + try: + callable(*args, **kwargs) + except Exception as e: + if not issubclass(e.__class__, exception): + # mimics behavior of unittest + raise + # don't recompile + if hasattr(regexp, "search"): + expected_regexp = regexp + else: + expected_regexp = re.compile(regexp) + if not expected_regexp.search(str(e)): + raise AssertionError('"%s" does not match "%s"' % + (expected_regexp.pattern, str(e))) + else: + # Apparently some exceptions don't have a __name__ attribute? Just aping unittest library here + name = getattr(exception, "__name__", str(exception)) + raise AssertionError("{0} not raised".format(name)) + @contextmanager def assert_produces_warning(expected_warning=Warning, filter_level="always"):
Relates to #3954. In addition to changing exceptions types returned to be more explicit, this pull incorporates the following changes: 1. Adds a new `assertRaisesRegexp` to `util/testing.py` to port the `assertRaisesRegexp` helper from 2.7+ unittest 2. Cleans up stats/common. 3. Fix up initial assertions in `tseries.offset._cached_range` that were all off + fix the test cases which were all just raising TypeErrors because they were calling with the wrong arguments. 4. Changes the example timezone from Asia/Beijing to Asia/Hong+Kong b/c `Asia/Beijing` is not supported by pytz. 5. Any tz-aware : tz-naive comparison fails with TypeError, as will mismatched, localize, etc. calls. 6. Changes `SparseArray` indexing error messages to match tuple message for completeness. 7. Improves the window check in ols. After you all say it's okay to merge, I'll update the docs to reflect changes.
https://api.github.com/repos/pandas-dev/pandas/pulls/3956
2013-06-19T03:46:35Z
2013-06-19T21:09:34Z
2013-06-19T21:09:34Z
2014-06-19T19:20:54Z
BLD: Tweak to Makefile.
diff --git a/Makefile b/Makefile index 6b7e02404525b..5349443ed477f 100644 --- a/Makefile +++ b/Makefile @@ -2,10 +2,10 @@ clean: clean_pyc -rm -rf build dist - -find . -name '*.so' -exec rm -f {} \; + -find . -name '*.so' -exec rm {} \; clean_pyc: - -find . -name '*.pyc' -exec rm -f {} \; + -find . -name '*.pyc' -or -name '*.pyo' -exec rm {} \; tseries: pandas/lib.pyx pandas/tslib.pyx pandas/hashtable.pyx python setup.py build_ext --inplace
https://api.github.com/repos/pandas-dev/pandas/pulls/3955
2013-06-19T02:21:19Z
2013-06-19T12:53:19Z
2013-06-19T12:53:19Z
2014-07-16T08:14:53Z
Doc for pandas.io.data
diff --git a/doc/source/io.rst b/doc/source/io.rst index d2d0c5c23af9e..c4d7497308524 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2463,3 +2463,58 @@ Alternatively, the function :func:`~pandas.io.stata.read_stata` can be used import os os.remove('stata.dta') + +Data +---- + +Functions from :mod:`pandas.io.data` extract data from various Internet +sources into a DataFrame. Currently the following sources are supported: + + - Yahoo! Finance + - Google Finance + - St. Louis FED (FRED) + - Kenneth French's data library + +It should be noted, that various sources support different kinds of data, so not all sources implement the same methods and the data elements returned might also differ. + +Loading Yahoo! Finance data: + +.. ipython:: python + + import pandas.io.data as web + from datetime import datetime + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + f=web.DataReader("F", 'yahoo', start, end) + f.ix['2010-01-04'] + +Loading Google Finance data: + +.. ipython:: python + + import pandas.io.data as web + from datetime import datetime + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + f=web.DataReader("F", 'google', start, end) + f.ix['2010-01-04'] + +Loading FRED data: + +.. ipython:: python + + import pandas.io.data as web + from datetime import datetime + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + gdp=web.DataReader("GDP", "fred", start, end) + gdp.ix['2013-01-01'] + +Loading Fama/French data (the dataset names are listed at `Fama/French Data Library +<http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html>`_): + +.. ipython:: python + + import pandas.io.data as web + ip=web.DataReader("5_Industry_Portfolios", "famafrench") + ip[4].ix[192607]
https://api.github.com/repos/pandas-dev/pandas/pulls/3953
2013-06-19T01:39:17Z
2013-06-22T17:36:32Z
2013-06-22T17:36:32Z
2013-06-22T17:36:51Z
ENH: enable support for iterator with read_hdf in HDFStore (GH3937)
diff --git a/RELEASE.rst b/RELEASE.rst index ebd88091050f1..8e4bdd3cba268 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -101,6 +101,7 @@ pandas 0.11.1 to select with a Storer; these are invalid parameters at this time - can now specify an ``encoding`` option to ``append/put`` to enable alternate encodings (GH3750_) + - enable support for ``iterator/chunksize`` with ``read_hdf`` - The repr() for (Multi)Index now obeys display.max_seq_items rather then numpy threshold print options. (GH3426_, GH3466_) - Added mangle_dupe_cols option to read_table/csv, allowing users diff --git a/doc/source/io.rst b/doc/source/io.rst index 6fee8ad35e10c..e586c7efeec61 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1925,6 +1925,18 @@ The default is 50,000 rows returned in a chunk. for df in store.select('df', chunksize=3): print df +.. note:: + + .. versionadded:: 0.11.1 + + You can also use the iterator with ``read_hdf`` which will open, then + automatically close the store when finished iterating. + + .. code-block:: python + + for df in read_hdf('store.h5','df', chunsize=3): + print df + Note, that the chunksize keyword applies to the **returned** rows. So if you are doing a query, then that set will be subdivided and returned in the iterator. Keep in mind that if you do not pass a ``where`` selection criteria diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index d6b8c6d516b25..97f236166be45 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -6,6 +6,11 @@ v0.11.1 (June ??, 2013) This is a minor release from 0.11.0 and includes several new features and enhancements along with a large number of bug fixes. +Highlites include a consistent I/O API naming scheme, routines to read html, +write multi-indexes to csv files, read & write STATA data files, read & write JSON format +files, Python 3 support for ``HDFStore``, filtering of groupby expressions via ``filter``, and a +revamped ``replace`` routine that accepts regular expressions. + API changes ~~~~~~~~~~~ @@ -148,8 +153,8 @@ API changes ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try until success is also valid -Enhancements -~~~~~~~~~~~~ +I/O Enhancements +~~~~~~~~~~~~~~~~ - ``pd.read_html()`` can now parse HTML strings, files or urls and return DataFrames, courtesy of @cpcloud. (GH3477_, GH3605_, GH3606_, GH3616_). @@ -184,28 +189,6 @@ Enhancements accessable via ``read_json`` top-level function for reading, and ``to_json`` DataFrame method for writing, :ref:`See the docs<io.json>` - - ``DataFrame.replace()`` now allows regular expressions on contained - ``Series`` with object dtype. See the examples section in the regular docs - :ref:`Replacing via String Expression <missing_data.replace_expression>` - - For example you can do - - .. ipython :: python - - df = DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]}) - df.replace(regex=r'\s*\.\s*', value=np.nan) - - to replace all occurrences of the string ``'.'`` with zero or more - instances of surrounding whitespace with ``NaN``. - - Regular string replacement still works as expected. For example, you can do - - .. ipython :: python - - df.replace('.', np.nan) - - to replace all occurrences of the string ``'.'`` with ``NaN``. - - Multi-index column support for reading and writing csv format files - The ``header`` option in ``read_csv`` now accepts a @@ -225,19 +208,62 @@ Enhancements with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will be *lost*. + .. ipython:: python + + from pandas.util.testing import makeCustomDataframe as mkdf + df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4) + df.to_csv('mi.csv',tupleize_cols=False) + print open('mi.csv').read() + pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1],tupleize_cols=False) + + .. ipython:: python + :suppress: + + import os + os.remove('mi.csv') + + - Support for ``HDFStore`` (via ``PyTables 3.0.0``) on Python3 + + - Iterator support via ``read_hdf`` that automatically opens and closes the + store when iteration is finished. This is only for *tables* + .. ipython:: python - from pandas.util.testing import makeCustomDataframe as mkdf - df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4) - df.to_csv('mi.csv',tupleize_cols=False) - print open('mi.csv').read() - pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1],tupleize_cols=False) + path = 'store_iterator.h5' + DataFrame(randn(10,2)).to_hdf(path,'df',table=True) + for df in read_hdf(path,'df', chunksize=3): + print df .. ipython:: python - :suppress: + :suppress: - import os - os.remove('mi.csv') + import os + os.remove(path) + +Other Enhancements +~~~~~~~~~~~~~~~~~~ + + - ``DataFrame.replace()`` now allows regular expressions on contained + ``Series`` with object dtype. See the examples section in the regular docs + :ref:`Replacing via String Expression <missing_data.replace_expression>` + + For example you can do + + .. ipython :: python + + df = DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]}) + df.replace(regex=r'\s*\.\s*', value=np.nan) + + to replace all occurrences of the string ``'.'`` with zero or more + instances of surrounding whitespace with ``NaN``. + + Regular string replacement still works as expected. For example, you can do + + .. ipython :: python + + df.replace('.', np.nan) + + to replace all occurrences of the string ``'.'`` with ``NaN``. - ``pd.melt()`` now accepts the optional parameters ``var_name`` and ``value_name`` to specify custom column names of the returned DataFrame. @@ -261,8 +287,6 @@ Enhancements pd.get_option('a.b') pd.get_option('b.c') - - Support for ``HDFStore`` (via ``PyTables 3.0.0``) on Python3 - - The ``filter`` method for group objects returns a subset of the original object. Suppose we want to take only elements that belong to groups with a group sum greater than 2. diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 62aa1b99dfac0..83e46fc949a4d 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -196,12 +196,27 @@ def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, app def read_hdf(path_or_buf, key, **kwargs): """ read from the store, closeit if we opened it """ - f = lambda store: store.select(key, **kwargs) + f = lambda store, auto_close: store.select(key, auto_close=auto_close, **kwargs) if isinstance(path_or_buf, basestring): - with get_store(path_or_buf) as store: - return f(store) - f(path_or_buf) + + # can't auto open/close if we are using an iterator + # so delegate to the iterator + store = HDFStore(path_or_buf) + try: + return f(store, True) + except: + + # if there is an error, close the store + try: + store.close() + except: + pass + + raise + + # a passed store; user controls open/close + f(path_or_buf, False) class HDFStore(object): """ @@ -405,7 +420,7 @@ def get(self, key): raise KeyError('No object named %s in the file' % key) return self._read_group(group) - def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, **kwargs): + def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas object stored in file, optionally based on where criteria @@ -419,6 +434,7 @@ def select(self, key, where=None, start=None, stop=None, columns=None, iterator= columns : a list of columns that if not None, will limit the return columns iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator + auto_close : boolean, should automatically close the store when finished, default is False """ group = self.get_node(key) @@ -434,9 +450,11 @@ def func(_start, _stop): return s.read(where=where, start=_start, stop=_stop, columns=columns, **kwargs) if iterator or chunksize is not None: - return TableIterator(func, nrows=s.nrows, start=start, stop=stop, chunksize=chunksize) + if not s.is_table: + raise TypeError("can only use an iterator or chunksize on a table") + return TableIterator(self, func, nrows=s.nrows, start=start, stop=stop, chunksize=chunksize, auto_close=auto_close) - return TableIterator(func, nrows=s.nrows, start=start, stop=stop).get_values() + return TableIterator(self, func, nrows=s.nrows, start=start, stop=stop, auto_close=auto_close).get_values() def select_as_coordinates(self, key, where=None, start=None, stop=None, **kwargs): """ @@ -473,7 +491,7 @@ def select_column(self, key, column, **kwargs): """ return self.get_storer(key).read_column(column = column, **kwargs) - def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, **kwargs): + def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas objects from multiple tables Parameters @@ -541,9 +559,9 @@ def func(_start, _stop): return concat(objs, axis=axis, verify_integrity=True) if iterator or chunksize is not None: - return TableIterator(func, nrows=nrows, start=start, stop=stop, chunksize=chunksize) + return TableIterator(self, func, nrows=nrows, start=start, stop=stop, chunksize=chunksize, auto_close=auto_close) - return TableIterator(func, nrows=nrows, start=start, stop=stop).get_values() + return TableIterator(self, func, nrows=nrows, start=start, stop=stop, auto_close=auto_close).get_values() def put(self, key, value, table=None, append=False, **kwargs): @@ -916,16 +934,20 @@ class TableIterator(object): Parameters ---------- - func : the function to get results + store : the reference store + func : the function to get results nrows : the rows to iterate on start : the passed start value (default is None) - stop : the passed stop value (default is None) + stop : the passed stop value (default is None) chunksize : the passed chunking valeu (default is 50000) + auto_close : boolean, automatically close the store at the end of iteration, + default is False kwargs : the passed kwargs """ - def __init__(self, func, nrows, start=None, stop=None, chunksize=None): - self.func = func + def __init__(self, store, func, nrows, start=None, stop=None, chunksize=None, auto_close=False): + self.store = store + self.func = func self.nrows = nrows or 0 self.start = start or 0 @@ -937,6 +959,7 @@ def __init__(self, func, nrows, start=None, stop=None, chunksize=None): chunksize = 100000 self.chunksize = chunksize + self.auto_close = auto_close def __iter__(self): current = self.start @@ -950,9 +973,16 @@ def __iter__(self): yield v + self.close() + + def close(self): + if self.auto_close: + self.store.close() + def get_values(self): - return self.func(self.start, self.stop) - + results = self.func(self.start, self.stop) + self.close() + return results class IndexCol(object): """ an index column description class diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 3266a906dcfae..f7f77698f51f5 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -2078,6 +2078,7 @@ def test_select_iterator(self): results = [] for s in store.select('df',chunksize=100): results.append(s) + self.assert_(len(results) == 5) result = concat(results) tm.assert_frame_equal(expected, result) @@ -2085,7 +2086,28 @@ def test_select_iterator(self): for s in store.select('df',chunksize=150): results.append(s) result = concat(results) - tm.assert_frame_equal(expected, result) + tm.assert_frame_equal(result, expected) + + with tm.ensure_clean(self.path) as path: + + df = tm.makeTimeDataFrame(500) + df.to_hdf(path,'df_non_table') + self.assertRaises(TypeError, read_hdf, path,'df_non_table',chunksize=100) + self.assertRaises(TypeError, read_hdf, path,'df_non_table',iterator=True) + + with tm.ensure_clean(self.path) as path: + + df = tm.makeTimeDataFrame(500) + df.to_hdf(path,'df',table=True) + + results = [] + for x in read_hdf(path,'df',chunksize=100): + results.append(x) + + self.assert_(len(results) == 5) + result = concat(results) + tm.assert_frame_equal(result, df) + tm.assert_frame_equal(result, read_hdf(path,'df')) # multiple
closes #3937 DOC: update v0.11.1.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/3949
2013-06-18T22:05:48Z
2013-06-18T22:57:40Z
2013-06-18T22:57:40Z
2014-07-01T10:07:30Z
ENH: update bundled ujson to latest v1.33
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py index fe717f56e6bea..997229487e1b9 100644 --- a/pandas/io/tests/test_json/test_pandas.py +++ b/pandas/io/tests/test_json/test_pandas.py @@ -179,21 +179,21 @@ def test_frame_from_json_bad_data(self): # too few indices json = StringIO('{"columns":["A","B"],' '"index":["2","3"],' - '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"') + '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}') self.assertRaises(ValueError, read_json, json, orient="split") # too many columns json = StringIO('{"columns":["A","B","C"],' '"index":["1","2","3"],' - '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"') + '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}') self.assertRaises(AssertionError, read_json, json, orient="split") # bad key json = StringIO('{"badkey":["A","B"],' '"index":["2","3"],' - '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"') + '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}') self.assertRaises(TypeError, read_json, json, orient="split") diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py index 2e775b4a541ea..23bd41d245f75 100644 --- a/pandas/io/tests/test_json/test_ujson.py +++ b/pandas/io/tests/test_json/test_ujson.py @@ -15,6 +15,8 @@ import calendar import StringIO import re +import random +import decimal from functools import partial import pandas.util.py3compat as py3compat @@ -36,6 +38,72 @@ def _skip_if_python_ver(skip_major, skip_minor=None): else partial(json.dumps, encoding="utf-8")) class UltraJSONTests(TestCase): + + def test_encodeDecimal(self): + sut = decimal.Decimal("1337.1337") + encoded = ujson.encode(sut, double_precision=100) + decoded = ujson.decode(encoded) + self.assertEquals(decoded, 1337.1337) + + def test_encodeStringConversion(self): + input = "A string \\ / \b \f \n \r \t </script> &" + not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"' + html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"' + + def helper(expected_output, **encode_kwargs): + output = ujson.encode(input, **encode_kwargs) + self.assertEquals(input, json.loads(output)) + self.assertEquals(output, expected_output) + self.assertEquals(input, ujson.decode(output)) + + # Default behavior assumes encode_html_chars=False. + helper(not_html_encoded, ensure_ascii=True) + helper(not_html_encoded, ensure_ascii=False) + + # Make sure explicit encode_html_chars=False works. + helper(not_html_encoded, ensure_ascii=True, encode_html_chars=False) + helper(not_html_encoded, ensure_ascii=False, encode_html_chars=False) + + # Make sure explicit encode_html_chars=True does the encoding. + helper(html_encoded, ensure_ascii=True, encode_html_chars=True) + helper(html_encoded, ensure_ascii=False, encode_html_chars=True) + + def test_doubleLongIssue(self): + sut = {u'a': -4342969734183514} + encoded = json.dumps(sut) + decoded = json.loads(encoded) + self.assertEqual(sut, decoded) + encoded = ujson.encode(sut, double_precision=100) + decoded = ujson.decode(encoded) + self.assertEqual(sut, decoded) + + def test_doubleLongDecimalIssue(self): + sut = {u'a': -12345678901234.56789012} + encoded = json.dumps(sut) + decoded = json.loads(encoded) + self.assertEqual(sut, decoded) + encoded = ujson.encode(sut, double_precision=100) + decoded = ujson.decode(encoded) + self.assertEqual(sut, decoded) + + + def test_encodeDecodeLongDecimal(self): + sut = {u'a': -528656961.4399388} + encoded = ujson.dumps(sut, double_precision=15) + ujson.decode(encoded) + + def test_decimalDecodeTest(self): + sut = {u'a': 4.56} + encoded = ujson.encode(sut) + decoded = ujson.decode(encoded) + self.assertNotEqual(sut, decoded) + + def test_decimalDecodeTestPrecise(self): + sut = {u'a': 4.56} + encoded = ujson.encode(sut) + decoded = ujson.decode(encoded, precise_float=True) + self.assertEqual(sut, decoded) + def test_encodeDictWithUnicodeKeys(self): input = { u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1" } output = ujson.encode(input) @@ -59,6 +127,7 @@ def test_encodeWithDecimal(self): def test_encodeDoubleNegConversion(self): input = -math.pi output = ujson.encode(input) + self.assertEquals(round(input, 5), round(json.loads(output), 5)) self.assertEquals(round(input, 5), round(ujson.decode(output), 5)) @@ -93,10 +162,6 @@ def test_doublePrecisionTest(self): self.assertEquals(round(input, 3), json.loads(output)) self.assertEquals(round(input, 3), ujson.decode(output)) - output = ujson.encode(input) - self.assertEquals(round(input, 5), json.loads(output)) - self.assertEquals(round(input, 5), ujson.decode(output)) - def test_invalidDoublePrecision(self): input = 30.12345678901234567890 output = ujson.encode(input, double_precision = 20) @@ -373,6 +438,15 @@ def test_decodeBrokenArrayEnd(self): return assert False, "Wrong exception" + def test_decodeArrayDepthTooBig(self): + input = '[' * (1024 * 1024) + try: + ujson.decode(input) + assert False, "Expected exception!" + except(ValueError): + return + assert False, "Wrong exception" + def test_decodeBrokenObjectEnd(self): input = "}" try: @@ -382,6 +456,15 @@ def test_decodeBrokenObjectEnd(self): return assert False, "Wrong exception" + def test_decodeObjectDepthTooBig(self): + input = '{' * (1024 * 1024) + try: + ujson.decode(input) + assert False, "Expected exception!" + except(ValueError): + return + assert False, "Wrong exception" + def test_decodeStringUnterminated(self): input = "\"TESTING" try: @@ -567,7 +650,7 @@ def test_numericIntFrcExp(self): self.assertAlmostEqual(output, json.loads(input)) def test_decodeNumericIntExpEPLUS(self): - input = "1337E+40" + input = "1337E+9" output = ujson.decode(input) self.assertAlmostEqual(output, json.loads(input)) @@ -1192,7 +1275,165 @@ def test_datetimeindex(self): decoded = Series(ujson.decode(ujson.encode(ts))) idx_values = decoded.index.values.astype(np.int64) decoded.index = DatetimeIndex(idx_values) - tm.assert_series_equal(np.round(ts, 5), decoded) + tm.assert_series_equal(ts, decoded) + + def test_decodeArrayTrailingCommaFail(self): + input = "[31337,]" + try: + ujson.decode(input) + except ValueError: + pass + else: + assert False, "expected ValueError" + + def test_decodeArrayLeadingCommaFail(self): + input = "[,31337]" + try: + ujson.decode(input) + except ValueError: + pass + else: + assert False, "expected ValueError" + + def test_decodeArrayOnlyCommaFail(self): + input = "[,]" + try: + ujson.decode(input) + except ValueError: + pass + else: + assert False, "expected ValueError" + + def test_decodeArrayUnmatchedBracketFail(self): + input = "[]]" + try: + ujson.decode(input) + except ValueError: + pass + else: + assert False, "expected ValueError" + + def test_decodeArrayEmpty(self): + input = "[]" + ujson.decode(input) + + def test_decodeArrayOneItem(self): + input = "[31337]" + ujson.decode(input) + + def test_decodeBigValue(self): + input = "9223372036854775807" + ujson.decode(input) + + def test_decodeSmallValue(self): + input = "-9223372036854775808" + ujson.decode(input) + + def test_decodeTooBigValue(self): + try: + input = "9223372036854775808" + ujson.decode(input) + except ValueError, e: + pass + else: + assert False, "expected ValueError" + + def test_decodeTooSmallValue(self): + try: + input = "-90223372036854775809" + ujson.decode(input) + except ValueError,e: + pass + else: + assert False, "expected ValueError" + + def test_decodeVeryTooBigValue(self): + try: + input = "9223372036854775808" + ujson.decode(input) + except ValueError: + pass + else: + assert False, "expected ValueError" + + def test_decodeVeryTooSmallValue(self): + try: + input = "-90223372036854775809" + ujson.decode(input) + except ValueError: + pass + else: + assert False, "expected ValueError" + + def test_decodeWithTrailingWhitespaces(self): + input = "{}\n\t " + ujson.decode(input) + + def test_decodeWithTrailingNonWhitespaces(self): + try: + input = "{}\n\t a" + ujson.decode(input) + except ValueError: + pass + else: + assert False, "expected ValueError" + + def test_decodeArrayWithBigInt(self): + try: + ujson.loads('[18446098363113800555]') + except ValueError: + pass + else: + assert False, "expected ValueError" + + def test_decodeArrayFaultyUnicode(self): + try: + ujson.loads('[18446098363113800555]') + except ValueError: + pass + else: + assert False, "expected ValueError" + + + def test_decodeFloatingPointAdditionalTests(self): + self.assertEquals(-1.1234567893, ujson.loads("-1.1234567893")) + self.assertEquals(-1.234567893, ujson.loads("-1.234567893")) + self.assertEquals(-1.34567893, ujson.loads("-1.34567893")) + self.assertEquals(-1.4567893, ujson.loads("-1.4567893")) + self.assertEquals(-1.567893, ujson.loads("-1.567893")) + self.assertEquals(-1.67893, ujson.loads("-1.67893")) + self.assertEquals(-1.7893, ujson.loads("-1.7893")) + self.assertEquals(-1.893, ujson.loads("-1.893")) + self.assertEquals(-1.3, ujson.loads("-1.3")) + + self.assertEquals(1.1234567893, ujson.loads("1.1234567893")) + self.assertEquals(1.234567893, ujson.loads("1.234567893")) + self.assertEquals(1.34567893, ujson.loads("1.34567893")) + self.assertEquals(1.4567893, ujson.loads("1.4567893")) + self.assertEquals(1.567893, ujson.loads("1.567893")) + self.assertEquals(1.67893, ujson.loads("1.67893")) + self.assertEquals(1.7893, ujson.loads("1.7893")) + self.assertEquals(1.893, ujson.loads("1.893")) + self.assertEquals(1.3, ujson.loads("1.3")) + + def test_encodeBigSet(self): + s = set() + for x in xrange(0, 100000): + s.add(x) + ujson.encode(s) + + def test_encodeEmptySet(self): + s = set() + self.assertEquals("[]", ujson.encode(s)) + + def test_encodeSet(self): + s = set([1,2,3,4,5,6,7,8,9]) + enc = ujson.encode(s) + dec = ujson.decode(enc) + + for v in dec: + self.assertTrue(v in s) + """ def test_decodeNumericIntFrcOverflow(self): diff --git a/pandas/src/ujson/lib/ultrajson.h b/pandas/src/ujson/lib/ultrajson.h index eae665f00f03e..4d7af3dde1f02 100644 --- a/pandas/src/ujson/lib/ultrajson.h +++ b/pandas/src/ujson/lib/ultrajson.h @@ -1,37 +1,38 @@ /* -Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB +Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -3. All advertising materials mentioning features or use of this software - must display the following acknowledgement: - This product includes software developed by ESN Social Software AB (www.esn.me). -4. Neither the name of the ESN Social Software AB nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the ESN Social Software AB nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Portions of code from: -MODP_ASCII - Ascii transformations (upper/lower, etc) + +Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) http://code.google.com/p/stringencoders/ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. +Numeric decoder derived from from TCL library +http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms + * Copyright (c) 1988-1993 The Regents of the University of California. + * Copyright (c) 1994 Sun Microsystems, Inc. */ /* @@ -54,8 +55,6 @@ tree doesn't have cyclic references. #include <stdio.h> #include <wchar.h> -//#define JSON_DECODE_NUMERIC_AS_DOUBLE - // Don't output any extra whitespaces when encoding #define JSON_NO_EXTRA_WHITESPACE @@ -69,6 +68,11 @@ tree doesn't have cyclic references. #define JSON_MAX_RECURSION_DEPTH 1024 #endif +// Max recursion depth, default for decoder +#ifndef JSON_MAX_OBJECT_DEPTH +#define JSON_MAX_OBJECT_DEPTH 1024 +#endif + /* Dictates and limits how much stack space for buffers UltraJSON will use before resorting to provided heap functions */ #ifndef JSON_MAX_STACK_BUFFER_SIZE @@ -95,26 +99,34 @@ typedef __int64 JSLONG; #else -#include <sys/types.h> +#include <stdint.h> typedef int64_t JSINT64; -typedef u_int64_t JSUINT64; +typedef uint64_t JSUINT64; typedef int32_t JSINT32; -typedef u_int32_t JSUINT32; +typedef uint32_t JSUINT32; #define FASTCALL_MSVC + +#if !defined __x86_64__ #define FASTCALL_ATTR __attribute__((fastcall)) +#else +#define FASTCALL_ATTR +#endif + #define INLINE_PREFIX inline -typedef u_int8_t JSUINT8; -typedef u_int16_t JSUTF16; -typedef u_int32_t JSUTF32; +typedef uint8_t JSUINT8; +typedef uint16_t JSUTF16; +typedef uint32_t JSUTF32; typedef int64_t JSLONG; #define EXPORTFUNCTION #endif +#if !(defined(__LITTLE_ENDIAN__) || defined(__BIG_ENDIAN__)) + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define __LITTLE_ENDIAN__ #else @@ -125,22 +137,24 @@ typedef int64_t JSLONG; #endif +#endif + #if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) #error "Endianess not supported" #endif enum JSTYPES { - JT_NULL, // NULL - JT_TRUE, //boolean true - JT_FALSE, //boolean false - JT_INT, //(JSINT32 (signed 32-bit)) - JT_LONG, //(JSINT64 (signed 64-bit)) - JT_DOUBLE, //(double) - JT_UTF8, //(char 8-bit) - JT_ARRAY, // Array structure - JT_OBJECT, // Key/Value structure - JT_INVALID, // Internal, do not return nor expect + JT_NULL, // NULL + JT_TRUE, //boolean true + JT_FALSE, //boolean false + JT_INT, //(JSINT32 (signed 32-bit)) + JT_LONG, //(JSINT64 (signed 64-bit)) + JT_DOUBLE, //(double) + JT_UTF8, //(char 8-bit) + JT_ARRAY, // Array structure + JT_OBJECT, // Key/Value structure + JT_INVALID, // Internal, do not return nor expect }; typedef void * JSOBJ; @@ -148,9 +162,9 @@ typedef void * JSITER; typedef struct __JSONTypeContext { - int type; - void *encoder; - void *prv; + int type; + void *encoder; + void *prv; } JSONTypeContext; /* @@ -166,79 +180,82 @@ typedef void *(*JSPFN_REALLOC)(void *base, size_t size); typedef struct __JSONObjectEncoder { - void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc); - void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc); - const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen); - JSINT64 (*getLongValue)(JSOBJ obj, JSONTypeContext *tc); - JSINT32 (*getIntValue)(JSOBJ obj, JSONTypeContext *tc); - double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc); - - /* - Begin iteration of an iteratable object (JS_ARRAY or JS_OBJECT) - Implementor should setup iteration state in ti->prv - */ - JSPFN_ITERBEGIN iterBegin; - - /* - Retrieve next object in an iteration. Should return 0 to indicate iteration has reached end or 1 if there are more items. - Implementor is responsible for keeping state of the iteration. Use ti->prv fields for this - */ - JSPFN_ITERNEXT iterNext; - - /* - Ends the iteration of an iteratable object. - Any iteration state stored in ti->prv can be freed here - */ - JSPFN_ITEREND iterEnd; - - /* - Returns a reference to the value object of an iterator - The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object - */ - JSPFN_ITERGETVALUE iterGetValue; - - /* - Return name of iterator. - The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object - */ - JSPFN_ITERGETNAME iterGetName; - - /* - Release a value as indicated by setting ti->release = 1 in the previous getValue call. - The ti->prv array should contain the necessary context to release the value - */ - void (*releaseObject)(JSOBJ obj); - - /* Library functions - Set to NULL to use STDLIB malloc,realloc,free */ - JSPFN_MALLOC malloc; - JSPFN_REALLOC realloc; - JSPFN_FREE free; - - /* - Configuration for max recursion, set to 0 to use default (see JSON_MAX_RECURSION_DEPTH)*/ - int recursionMax; - - /* - Configuration for max decimals of double floating poiunt numbers to encode (0-9) */ - int doublePrecision; - - /* - If true output will be ASCII with all characters above 127 encoded as \uXXXX. If false output will be UTF-8 or what ever charset strings are brought as */ - int forceASCII; - - - /* - Set to an error message if error occured */ - const char *errorMsg; - JSOBJ errorObj; - - /* Buffer stuff */ - char *start; - char *offset; - char *end; - int heap; - int level; + void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc); + void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc); + const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen); + JSINT64 (*getLongValue)(JSOBJ obj, JSONTypeContext *tc); + JSINT32 (*getIntValue)(JSOBJ obj, JSONTypeContext *tc); + double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc); + + /* + Begin iteration of an iteratable object (JS_ARRAY or JS_OBJECT) + Implementor should setup iteration state in ti->prv + */ + JSPFN_ITERBEGIN iterBegin; + + /* + Retrieve next object in an iteration. Should return 0 to indicate iteration has reached end or 1 if there are more items. + Implementor is responsible for keeping state of the iteration. Use ti->prv fields for this + */ + JSPFN_ITERNEXT iterNext; + + /* + Ends the iteration of an iteratable object. + Any iteration state stored in ti->prv can be freed here + */ + JSPFN_ITEREND iterEnd; + + /* + Returns a reference to the value object of an iterator + The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object + */ + JSPFN_ITERGETVALUE iterGetValue; + + /* + Return name of iterator. + The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object + */ + JSPFN_ITERGETNAME iterGetName; + + /* + Release a value as indicated by setting ti->release = 1 in the previous getValue call. + The ti->prv array should contain the necessary context to release the value + */ + void (*releaseObject)(JSOBJ obj); + + /* Library functions + Set to NULL to use STDLIB malloc,realloc,free */ + JSPFN_MALLOC malloc; + JSPFN_REALLOC realloc; + JSPFN_FREE free; + + /* + Configuration for max recursion, set to 0 to use default (see JSON_MAX_RECURSION_DEPTH)*/ + int recursionMax; + + /* + Configuration for max decimals of double floating poiunt numbers to encode (0-9) */ + int doublePrecision; + + /* + If true output will be ASCII with all characters above 127 encoded as \uXXXX. If false output will be UTF-8 or what ever charset strings are brought as */ + int forceASCII; + + /* + If true, '<', '>', and '&' characters will be encoded as \u003c, \u003e, and \u0026, respectively. If false, no special encoding will be used. */ + int encodeHTMLChars; + + /* + Set to an error message if error occured */ + const char *errorMsg; + JSOBJ errorObj; + + /* Buffer stuff */ + char *start; + char *offset; + char *end; + int heap; + int level; } JSONObjectEncoder; @@ -268,29 +285,27 @@ EXPORTFUNCTION char *JSON_EncodeObject(JSOBJ obj, JSONObjectEncoder *enc, char * typedef struct __JSONObjectDecoder { - JSOBJ (*newString)(wchar_t *start, wchar_t *end); - int (*objectAddKey)(JSOBJ obj, JSOBJ name, JSOBJ value); - int (*arrayAddItem)(JSOBJ obj, JSOBJ value); - JSOBJ (*newTrue)(void); - JSOBJ (*newFalse)(void); - JSOBJ (*newNull)(void); - JSOBJ (*newObject)(void *decoder); - JSOBJ (*endObject)(JSOBJ obj); - JSOBJ (*newArray)(void *decoder); - JSOBJ (*endArray)(JSOBJ obj); - JSOBJ (*newInt)(JSINT32 value); - JSOBJ (*newLong)(JSINT64 value); - JSOBJ (*newDouble)(double value); - void (*releaseObject)(JSOBJ obj, void *decoder); - JSPFN_MALLOC malloc; - JSPFN_FREE free; - JSPFN_REALLOC realloc; - - char *errorStr; - char *errorOffset; - - - + JSOBJ (*newString)(void *prv, wchar_t *start, wchar_t *end); + int (*objectAddKey)(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value); + int (*arrayAddItem)(void *prv, JSOBJ obj, JSOBJ value); + JSOBJ (*newTrue)(void *prv); + JSOBJ (*newFalse)(void *prv); + JSOBJ (*newNull)(void *prv); + JSOBJ (*newObject)(void *prv, void *decoder); + JSOBJ (*endObject)(void *prv, JSOBJ obj); + JSOBJ (*newArray)(void *prv, void *decoder); + JSOBJ (*endArray)(void *prv, JSOBJ obj); + JSOBJ (*newInt)(void *prv, JSINT32 value); + JSOBJ (*newLong)(void *prv, JSINT64 value); + JSOBJ (*newDouble)(void *prv, double value); + void (*releaseObject)(void *prv, JSOBJ obj, void *decoder); + JSPFN_MALLOC malloc; + JSPFN_FREE free; + JSPFN_REALLOC realloc; + char *errorStr; + char *errorOffset; + int preciseFloat; + void *prv; } JSONObjectDecoder; EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer); diff --git a/pandas/src/ujson/lib/ultrajsondec.c b/pandas/src/ujson/lib/ultrajsondec.c index eda30f3fea839..c5cf341ad3092 100644 --- a/pandas/src/ujson/lib/ultrajsondec.c +++ b/pandas/src/ujson/lib/ultrajsondec.c @@ -1,37 +1,38 @@ /* -Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB +Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -3. All advertising materials mentioning features or use of this software - must display the following acknowledgement: - This product includes software developed by ESN Social Software AB (www.esn.me). -4. Neither the name of the ESN Social Software AB nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the ESN Social Software AB nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Portions of code from: -MODP_ASCII - Ascii transformations (upper/lower, etc) + +Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) http://code.google.com/p/stringencoders/ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. +Numeric decoder derived from from TCL library +http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +* Copyright (c) 1988-1993 The Regents of the University of California. +* Copyright (c) 1994 Sun Microsystems, Inc. */ #include "ultrajson.h" @@ -40,806 +41,871 @@ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights rese #include <string.h> #include <limits.h> #include <wchar.h> +#include <stdlib.h> +#include <errno.h> + +#ifndef TRUE +#define TRUE 1 +#define FALSE 0 +#endif +#ifndef NULL +#define NULL 0 +#endif struct DecoderState { - char *start; - char *end; - wchar_t *escStart; - wchar_t *escEnd; - int escHeap; - int lastType; - JSONObjectDecoder *dec; + char *start; + char *end; + wchar_t *escStart; + wchar_t *escEnd; + int escHeap; + int lastType; + JSUINT32 objDepth; + void *prv; + JSONObjectDecoder *dec; }; JSOBJ FASTCALL_MSVC decode_any( struct DecoderState *ds) FASTCALL_ATTR; typedef JSOBJ (*PFN_DECODER)( struct DecoderState *ds); -#define RETURN_JSOBJ_NULLCHECK(_expr) return(_expr); -double createDouble(double intNeg, double intValue, double frcValue, int frcDecimalCount) +static JSOBJ SetError( struct DecoderState *ds, int offset, const char *message) { - static const double g_pow10[] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000, 100000000000, 1000000000000, 10000000000000, 100000000000000, 1000000000000000}; - - return (intValue + (frcValue / g_pow10[frcDecimalCount])) * intNeg; + ds->dec->errorOffset = ds->start + offset; + ds->dec->errorStr = (char *) message; + return NULL; } -static JSOBJ SetError( struct DecoderState *ds, int offset, const char *message) +static void ClearError( struct DecoderState *ds) { - ds->dec->errorOffset = ds->start + offset; - ds->dec->errorStr = (char *) message; - return NULL; + ds->dec->errorOffset = 0; + ds->dec->errorStr = NULL; } +double createDouble(double intNeg, double intValue, double frcValue, int frcDecimalCount) +{ + static const double g_pow10[] = {1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001,0.0000001, 0.00000001, 0.000000001, 0.0000000001, 0.00000000001, 0.000000000001, 0.0000000000001, 0.00000000000001, 0.000000000000001}; + return (intValue + (frcValue * g_pow10[frcDecimalCount])) * intNeg; +} -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric ( struct DecoderState *ds) +FASTCALL_ATTR JSOBJ FASTCALL_MSVC decodePreciseFloat(struct DecoderState *ds) { -#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE - double intNeg = 1; - double intValue; -#else - int intNeg = 1; - JSLONG intValue; -#endif + char *end; + double value; + errno = 0; - double expNeg; - int chr; - int decimalCount = 0; - double frcValue = 0.0; - double expValue; - char *offset = ds->start; + value = strtod(ds->start, &end); - if (*(offset) == '-') - { - offset ++; - intNeg = -1; - } + if (errno == ERANGE) + { + return SetError(ds, -1, "Range error when decoding numeric as double"); + } - // Scan integer part - intValue = 0; + ds->start = end; + return ds->dec->newDouble(ds->prv, value); +} - while (1) - { - chr = (int) (unsigned char) *(offset); +FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric (struct DecoderState *ds) +{ + int intNeg = 1; + int mantSize = 0; + JSUINT64 intValue; + int chr; + int decimalCount = 0; + double frcValue = 0.0; + double expNeg; + double expValue; + char *offset = ds->start; + + JSUINT64 overflowLimit = LLONG_MAX; + + if (*(offset) == '-') + { + offset ++; + intNeg = -1; + overflowLimit = LLONG_MIN; + } + + // Scan integer part + intValue = 0; - switch (chr) + while (1) + { + chr = (int) (unsigned char) *(offset); + + switch (chr) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + //FIXME: Check for arithemtic overflow here + //PERF: Don't do 64-bit arithmetic here unless we know we have to + intValue = intValue * 10ULL + (JSLONG) (chr - 48); + + if (intValue > overflowLimit) { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - //FIXME: Check for arithemtic overflow here - //PERF: Don't do 64-bit arithmetic here unless we know we have to -#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE - intValue = intValue * 10.0 + (double) (chr - 48); -#else - intValue = intValue * 10LL + (JSLONG) (chr - 48); -#endif - offset ++; - break; - - case '.': - offset ++; - goto DECODE_FRACTION; - break; - - case 'e': - case 'E': - offset ++; - goto DECODE_EXPONENT; - break; - - default: - goto BREAK_INT_LOOP; - break; + return SetError(ds, -1, overflowLimit == LLONG_MAX ? "Value is too big" : "Value is too small"); } + + offset ++; + mantSize ++; + break; + } + case '.': + { + offset ++; + goto DECODE_FRACTION; + break; + } + case 'e': + case 'E': + { + offset ++; + goto DECODE_EXPONENT; + break; + } + + default: + { + goto BREAK_INT_LOOP; + break; + } } + } BREAK_INT_LOOP: - ds->lastType = JT_INT; - ds->start = offset; + ds->lastType = JT_INT; + ds->start = offset; - //If input string is LONGLONG_MIN here the value is already negative so we should not flip it - -#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE -#else - if (intValue < 0) - { - intNeg = 1; - } -#endif - - //dbg1 = (intValue * intNeg); - //dbg2 = (JSLONG) dbg1; - -#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE - if (intValue > (double) INT_MAX || intValue < (double) INT_MIN) -#else - if ( (intValue >> 31)) -#endif - { - RETURN_JSOBJ_NULLCHECK(ds->dec->newLong( (JSINT64) (intValue * (JSINT64) intNeg))); - } - else - { - RETURN_JSOBJ_NULLCHECK(ds->dec->newInt( (JSINT32) (intValue * intNeg))); - } + if ((intValue >> 31)) + { + return ds->dec->newLong(ds->prv, (JSINT64) (intValue * (JSINT64) intNeg)); + } + else + { + return ds->dec->newInt(ds->prv, (JSINT32) (intValue * intNeg)); + } +DECODE_FRACTION: + if (ds->dec->preciseFloat) + { + return decodePreciseFloat(ds); + } -DECODE_FRACTION: + // Scan fraction part + frcValue = 0.0; + for (;;) + { + chr = (int) (unsigned char) *(offset); - // Scan fraction part - frcValue = 0.0; - while (1) + switch (chr) { - chr = (int) (unsigned char) *(offset); - - switch (chr) + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + if (decimalCount < JSON_DOUBLE_MAX_DECIMALS) { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - if (decimalCount < JSON_DOUBLE_MAX_DECIMALS) - { - frcValue = frcValue * 10.0 + (double) (chr - 48); - decimalCount ++; - } - offset ++; - break; - - case 'e': - case 'E': - offset ++; - goto DECODE_EXPONENT; - break; - - default: - goto BREAK_FRC_LOOP; + frcValue = frcValue * 10.0 + (double) (chr - 48); + decimalCount ++; } + offset ++; + break; + } + case 'e': + case 'E': + { + offset ++; + goto DECODE_EXPONENT; + break; + } + default: + { + goto BREAK_FRC_LOOP; + } } + } BREAK_FRC_LOOP: - - if (intValue < 0) - { - intNeg = 1; - } - - //FIXME: Check for arithemtic overflow here - ds->lastType = JT_DOUBLE; - ds->start = offset; - RETURN_JSOBJ_NULLCHECK(ds->dec->newDouble (createDouble( (double) intNeg, (double) intValue, frcValue, decimalCount))); + //FIXME: Check for arithemtic overflow here + ds->lastType = JT_DOUBLE; + ds->start = offset; + return ds->dec->newDouble (ds->prv, createDouble( (double) intNeg, (double) intValue, frcValue, decimalCount)); DECODE_EXPONENT: - expNeg = 1.0; + if (ds->dec->preciseFloat) + { + return decodePreciseFloat(ds); + } - if (*(offset) == '-') - { - expNeg = -1.0; - offset ++; - } - else - if (*(offset) == '+') - { - expNeg = +1.0; - offset ++; - } + expNeg = 1.0; - expValue = 0.0; + if (*(offset) == '-') + { + expNeg = -1.0; + offset ++; + } + else + if (*(offset) == '+') + { + expNeg = +1.0; + offset ++; + } - while (1) - { - chr = (int) (unsigned char) *(offset); + expValue = 0.0; - switch (chr) - { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - expValue = expValue * 10.0 + (double) (chr - 48); - offset ++; - break; - - default: - goto BREAK_EXP_LOOP; + for (;;) + { + chr = (int) (unsigned char) *(offset); - } + switch (chr) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + expValue = expValue * 10.0 + (double) (chr - 48); + offset ++; + break; + } + default: + { + goto BREAK_EXP_LOOP; + } } + } BREAK_EXP_LOOP: - -#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE -#else - if (intValue < 0) - { - intNeg = 1; - } -#endif - - //FIXME: Check for arithemtic overflow here - ds->lastType = JT_DOUBLE; - ds->start = offset; - RETURN_JSOBJ_NULLCHECK(ds->dec->newDouble (createDouble( (double) intNeg, (double) intValue , frcValue, decimalCount) * pow(10.0, expValue * expNeg))); + //FIXME: Check for arithemtic overflow here + ds->lastType = JT_DOUBLE; + ds->start = offset; + return ds->dec->newDouble (ds->prv, createDouble( (double) intNeg, (double) intValue , frcValue, decimalCount) * pow(10.0, expValue * expNeg)); } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_true ( struct DecoderState *ds) +FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_true ( struct DecoderState *ds) { - char *offset = ds->start; - offset ++; + char *offset = ds->start; + offset ++; - if (*(offset++) != 'r') - goto SETERROR; - if (*(offset++) != 'u') - goto SETERROR; - if (*(offset++) != 'e') - goto SETERROR; + if (*(offset++) != 'r') + goto SETERROR; + if (*(offset++) != 'u') + goto SETERROR; + if (*(offset++) != 'e') + goto SETERROR; - ds->lastType = JT_TRUE; - ds->start = offset; - RETURN_JSOBJ_NULLCHECK(ds->dec->newTrue()); + ds->lastType = JT_TRUE; + ds->start = offset; + return ds->dec->newTrue(ds->prv); SETERROR: - return SetError(ds, -1, "Unexpected character found when decoding 'true'"); + return SetError(ds, -1, "Unexpected character found when decoding 'true'"); } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_false ( struct DecoderState *ds) +FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_false ( struct DecoderState *ds) { - char *offset = ds->start; - offset ++; - - if (*(offset++) != 'a') - goto SETERROR; - if (*(offset++) != 'l') - goto SETERROR; - if (*(offset++) != 's') - goto SETERROR; - if (*(offset++) != 'e') - goto SETERROR; - - ds->lastType = JT_FALSE; - ds->start = offset; - RETURN_JSOBJ_NULLCHECK(ds->dec->newFalse()); + char *offset = ds->start; + offset ++; + + if (*(offset++) != 'a') + goto SETERROR; + if (*(offset++) != 'l') + goto SETERROR; + if (*(offset++) != 's') + goto SETERROR; + if (*(offset++) != 'e') + goto SETERROR; + + ds->lastType = JT_FALSE; + ds->start = offset; + return ds->dec->newFalse(ds->prv); SETERROR: - return SetError(ds, -1, "Unexpected character found when decoding 'false'"); - + return SetError(ds, -1, "Unexpected character found when decoding 'false'"); } - -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_null ( struct DecoderState *ds) +FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_null ( struct DecoderState *ds) { - char *offset = ds->start; - offset ++; + char *offset = ds->start; + offset ++; - if (*(offset++) != 'u') - goto SETERROR; - if (*(offset++) != 'l') - goto SETERROR; - if (*(offset++) != 'l') - goto SETERROR; + if (*(offset++) != 'u') + goto SETERROR; + if (*(offset++) != 'l') + goto SETERROR; + if (*(offset++) != 'l') + goto SETERROR; - ds->lastType = JT_NULL; - ds->start = offset; - RETURN_JSOBJ_NULLCHECK(ds->dec->newNull()); + ds->lastType = JT_NULL; + ds->start = offset; + return ds->dec->newNull(ds->prv); SETERROR: - return SetError(ds, -1, "Unexpected character found when decoding 'null'"); + return SetError(ds, -1, "Unexpected character found when decoding 'null'"); } -FASTCALL_ATTR void FASTCALL_MSVC SkipWhitespace(struct DecoderState *ds) +FASTCALL_ATTR void FASTCALL_MSVC SkipWhitespace(struct DecoderState *ds) { - char *offset = ds->start; + char *offset; - while (1) + for (offset = ds->start; (ds->end - offset) > 0; offset ++) + { + switch (*offset) { - switch (*offset) - { - case ' ': - case '\t': - case '\r': - case '\n': - offset ++; - break; - - default: - ds->start = offset; - return; - } + case ' ': + case '\t': + case '\r': + case '\n': + break; + + default: + ds->start = offset; + return; } -} + } + if (offset == ds->end) + { + ds->start = ds->end; + } +} enum DECODESTRINGSTATE { - DS_ISNULL = 0x32, - DS_ISQUOTE, - DS_ISESCAPE, - DS_UTFLENERROR, + DS_ISNULL = 0x32, + DS_ISQUOTE, + DS_ISESCAPE, + DS_UTFLENERROR, }; -static const JSUINT8 g_decoderLookup[256] = +static const JSUINT8 g_decoderLookup[256] = { -/* 0x00 */ DS_ISNULL, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x10 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x20 */ 1, 1, DS_ISQUOTE, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, DS_ISESCAPE, 1, 1, 1, -/* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x90 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0xb0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -/* 0xf0 */ 4, 4, 4, 4, 4, 4, 4, 4, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, + /* 0x00 */ DS_ISNULL, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0x10 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0x20 */ 1, 1, DS_ISQUOTE, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, DS_ISESCAPE, 1, 1, 1, + /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0x90 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0xb0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + /* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + /* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 0xf0 */ 4, 4, 4, 4, 4, 4, 4, 4, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, }; - FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_string ( struct DecoderState *ds) { - JSUTF16 sur[2] = { 0 }; - int iSur = 0; - int index; - wchar_t *escOffset; - size_t escLen = (ds->escEnd - ds->escStart); - JSUINT8 *inputOffset; - JSUINT8 oct; - JSUTF32 ucs; - ds->lastType = JT_INVALID; - ds->start ++; - - if ( (ds->end - ds->start) > escLen) + JSUTF16 sur[2] = { 0 }; + int iSur = 0; + int index; + wchar_t *escOffset; + wchar_t *escStart; + size_t escLen = (ds->escEnd - ds->escStart); + JSUINT8 *inputOffset; + JSUINT8 oct; + JSUTF32 ucs; + ds->lastType = JT_INVALID; + ds->start ++; + + if ( (size_t) (ds->end - ds->start) > escLen) + { + size_t newSize = (ds->end - ds->start); + + if (ds->escHeap) { - size_t newSize = (ds->end - ds->start); - - if (ds->escHeap) - { - ds->escStart = (wchar_t *) ds->dec->realloc (ds->escStart, newSize * sizeof(wchar_t)); - if (!ds->escStart) - { - return SetError(ds, -1, "Could not reserve memory block"); - } - } - else - { - wchar_t *oldStart = ds->escStart; - ds->escHeap = 1; - ds->escStart = (wchar_t *) ds->dec->malloc (newSize * sizeof(wchar_t)); - if (!ds->escStart) - { - return SetError(ds, -1, "Could not reserve memory block"); - } - memcpy (ds->escStart, oldStart, escLen * sizeof(wchar_t)); - } - - ds->escEnd = ds->escStart + newSize; + if (newSize > (UINT_MAX / sizeof(wchar_t))) + { + return SetError(ds, -1, "Could not reserve memory block"); + } + escStart = (wchar_t *)ds->dec->realloc(ds->escStart, newSize * sizeof(wchar_t)); + if (!escStart) + { + ds->dec->free(ds->escStart); + return SetError(ds, -1, "Could not reserve memory block"); + } + ds->escStart = escStart; + } + else + { + wchar_t *oldStart = ds->escStart; + ds->escHeap = 1; + if (newSize > (UINT_MAX / sizeof(wchar_t))) + { + return SetError(ds, -1, "Could not reserve memory block"); + } + ds->escStart = (wchar_t *) ds->dec->malloc(newSize * sizeof(wchar_t)); + if (!ds->escStart) + { + return SetError(ds, -1, "Could not reserve memory block"); + } + memcpy(ds->escStart, oldStart, escLen * sizeof(wchar_t)); } - escOffset = ds->escStart; - inputOffset = ds->start; + ds->escEnd = ds->escStart + newSize; + } - while(1) + escOffset = ds->escStart; + inputOffset = (JSUINT8 *) ds->start; + + for (;;) + { + switch (g_decoderLookup[(JSUINT8)(*inputOffset)]) { - switch (g_decoderLookup[(JSUINT8)(*inputOffset)]) + case DS_ISNULL: + { + return SetError(ds, -1, "Unmatched ''\"' when when decoding 'string'"); + } + case DS_ISQUOTE: + { + ds->lastType = JT_UTF8; + inputOffset ++; + ds->start += ( (char *) inputOffset - (ds->start)); + return ds->dec->newString(ds->prv, ds->escStart, escOffset); + } + case DS_UTFLENERROR: + { + return SetError (ds, -1, "Invalid UTF-8 sequence length when decoding 'string'"); + } + case DS_ISESCAPE: + inputOffset ++; + switch (*inputOffset) { - case DS_ISNULL: - return SetError(ds, -1, "Unmatched ''\"' when when decoding 'string'"); - - case DS_ISQUOTE: - ds->lastType = JT_UTF8; + case '\\': *(escOffset++) = L'\\'; inputOffset++; continue; + case '\"': *(escOffset++) = L'\"'; inputOffset++; continue; + case '/': *(escOffset++) = L'/'; inputOffset++; continue; + case 'b': *(escOffset++) = L'\b'; inputOffset++; continue; + case 'f': *(escOffset++) = L'\f'; inputOffset++; continue; + case 'n': *(escOffset++) = L'\n'; inputOffset++; continue; + case 'r': *(escOffset++) = L'\r'; inputOffset++; continue; + case 't': *(escOffset++) = L'\t'; inputOffset++; continue; + + case 'u': + { + int index; inputOffset ++; - ds->start += ( (char *) inputOffset - (ds->start)); - RETURN_JSOBJ_NULLCHECK(ds->dec->newString(ds->escStart, escOffset)); - - case DS_UTFLENERROR: - return SetError (ds, -1, "Invalid UTF-8 sequence length when decoding 'string'"); - case DS_ISESCAPE: - inputOffset ++; - switch (*inputOffset) + for (index = 0; index < 4; index ++) { - case '\\': *(escOffset++) = L'\\'; inputOffset++; continue; - case '\"': *(escOffset++) = L'\"'; inputOffset++; continue; - case '/': *(escOffset++) = L'/'; inputOffset++; continue; - case 'b': *(escOffset++) = L'\b'; inputOffset++; continue; - case 'f': *(escOffset++) = L'\f'; inputOffset++; continue; - case 'n': *(escOffset++) = L'\n'; inputOffset++; continue; - case 'r': *(escOffset++) = L'\r'; inputOffset++; continue; - case 't': *(escOffset++) = L'\t'; inputOffset++; continue; - - case 'u': - { - int index; - inputOffset ++; - - for (index = 0; index < 4; index ++) - { - switch (*inputOffset) - { - case '\0': return SetError (ds, -1, "Unterminated unicode escape sequence when decoding 'string'"); - default: return SetError (ds, -1, "Unexpected character in unicode escape sequence when decoding 'string'"); - - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - sur[iSur] = (sur[iSur] << 4) + (JSUTF16) (*inputOffset - '0'); - break; - - case 'a': - case 'b': - case 'c': - case 'd': - case 'e': - case 'f': - sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'a'); - break; - - case 'A': - case 'B': - case 'C': - case 'D': - case 'E': - case 'F': - sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'A'); - break; - } - - inputOffset ++; - } - - - if (iSur == 0) - { - if((sur[iSur] & 0xfc00) == 0xd800) - { - // First of a surrogate pair, continue parsing - iSur ++; - break; - } - (*escOffset++) = (wchar_t) sur[iSur]; - iSur = 0; - } - else - { - // Decode pair - if ((sur[1] & 0xfc00) != 0xdc00) - { - return SetError (ds, -1, "Unpaired high surrogate when decoding 'string'"); - } + switch (*inputOffset) + { + case '\0': return SetError (ds, -1, "Unterminated unicode escape sequence when decoding 'string'"); + default: return SetError (ds, -1, "Unexpected character in unicode escape sequence when decoding 'string'"); + + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + sur[iSur] = (sur[iSur] << 4) + (JSUTF16) (*inputOffset - '0'); + break; + + case 'a': + case 'b': + case 'c': + case 'd': + case 'e': + case 'f': + sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'a'); + break; + + case 'A': + case 'B': + case 'C': + case 'D': + case 'E': + case 'F': + sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'A'); + break; + } + + inputOffset ++; + } + if (iSur == 0) + { + if((sur[iSur] & 0xfc00) == 0xd800) + { + // First of a surrogate pair, continue parsing + iSur ++; + break; + } + (*escOffset++) = (wchar_t) sur[iSur]; + iSur = 0; + } + else + { + // Decode pair + if ((sur[1] & 0xfc00) != 0xdc00) + { + return SetError (ds, -1, "Unpaired high surrogate when decoding 'string'"); + } #if WCHAR_MAX == 0xffff - (*escOffset++) = (wchar_t) sur[0]; - (*escOffset++) = (wchar_t) sur[1]; + (*escOffset++) = (wchar_t) sur[0]; + (*escOffset++) = (wchar_t) sur[1]; #else - (*escOffset++) = (wchar_t) 0x10000 + (((sur[0] - 0xd800) << 10) | (sur[1] - 0xdc00)); + (*escOffset++) = (wchar_t) 0x10000 + (((sur[0] - 0xd800) << 10) | (sur[1] - 0xdc00)); #endif - iSur = 0; - } - break; - } - - case '\0': return SetError(ds, -1, "Unterminated escape sequence when decoding 'string'"); - default: return SetError(ds, -1, "Unrecognized escape sequence when decoding 'string'"); + iSur = 0; } - break; - - case 1: - *(escOffset++) = (wchar_t) (*inputOffset++); - break; + break; + } - case 2: + case '\0': return SetError(ds, -1, "Unterminated escape sequence when decoding 'string'"); + default: return SetError(ds, -1, "Unrecognized escape sequence when decoding 'string'"); + } + break; + + case 1: + { + *(escOffset++) = (wchar_t) (*inputOffset++); + break; + } + + case 2: + { + ucs = (*inputOffset++) & 0x1f; + ucs <<= 6; + if (((*inputOffset) & 0x80) != 0x80) { - ucs = (*inputOffset++) & 0x1f; - ucs <<= 6; - if (((*inputOffset) & 0x80) != 0x80) - { - return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'"); - } - ucs |= (*inputOffset++) & 0x3f; - if (ucs < 0x80) return SetError (ds, -1, "Overlong 2 byte UTF-8 sequence detected when decoding 'string'"); - *(escOffset++) = (wchar_t) ucs; - break; + return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'"); } - - case 3: + ucs |= (*inputOffset++) & 0x3f; + if (ucs < 0x80) return SetError (ds, -1, "Overlong 2 byte UTF-8 sequence detected when decoding 'string'"); + *(escOffset++) = (wchar_t) ucs; + break; + } + + case 3: + { + JSUTF32 ucs = 0; + ucs |= (*inputOffset++) & 0x0f; + + for (index = 0; index < 2; index ++) { - JSUTF32 ucs = 0; - ucs |= (*inputOffset++) & 0x0f; + ucs <<= 6; + oct = (*inputOffset++); - for (index = 0; index < 2; index ++) - { - ucs <<= 6; - oct = (*inputOffset++); + if ((oct & 0x80) != 0x80) + { + return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'"); + } - if ((oct & 0x80) != 0x80) - { - return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'"); - } + ucs |= oct & 0x3f; + } - ucs |= oct & 0x3f; - } + if (ucs < 0x800) return SetError (ds, -1, "Overlong 3 byte UTF-8 sequence detected when encoding string"); + *(escOffset++) = (wchar_t) ucs; + break; + } - if (ucs < 0x800) return SetError (ds, -1, "Overlong 3 byte UTF-8 sequence detected when encoding string"); - *(escOffset++) = (wchar_t) ucs; - break; - } + case 4: + { + JSUTF32 ucs = 0; + ucs |= (*inputOffset++) & 0x07; - case 4: + for (index = 0; index < 3; index ++) { - JSUTF32 ucs = 0; - ucs |= (*inputOffset++) & 0x07; - - for (index = 0; index < 3; index ++) - { - ucs <<= 6; - oct = (*inputOffset++); + ucs <<= 6; + oct = (*inputOffset++); - if ((oct & 0x80) != 0x80) - { - return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'"); - } + if ((oct & 0x80) != 0x80) + { + return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'"); + } - ucs |= oct & 0x3f; - } + ucs |= oct & 0x3f; + } - if (ucs < 0x10000) return SetError (ds, -1, "Overlong 4 byte UTF-8 sequence detected when decoding 'string'"); + if (ucs < 0x10000) return SetError (ds, -1, "Overlong 4 byte UTF-8 sequence detected when decoding 'string'"); - #if WCHAR_MAX == 0xffff - if (ucs >= 0x10000) - { - ucs -= 0x10000; - *(escOffset++) = (ucs >> 10) + 0xd800; - *(escOffset++) = (ucs & 0x3ff) + 0xdc00; - } - else - { - *(escOffset++) = (wchar_t) ucs; - } - #else - *(escOffset++) = (wchar_t) ucs; - #endif - break; +#if WCHAR_MAX == 0xffff + if (ucs >= 0x10000) + { + ucs -= 0x10000; + *(escOffset++) = (wchar_t) (ucs >> 10) + 0xd800; + *(escOffset++) = (wchar_t) (ucs & 0x3ff) + 0xdc00; } + else + { + *(escOffset++) = (wchar_t) ucs; } +#else + *(escOffset++) = (wchar_t) ucs; +#endif + break; + } } + } } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_array( struct DecoderState *ds) +FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_array(struct DecoderState *ds) { - JSOBJ itemValue; - JSOBJ newObj = ds->dec->newArray(ds->dec); + JSOBJ itemValue; + JSOBJ newObj; + int len; + ds->objDepth++; + if (ds->objDepth > JSON_MAX_OBJECT_DEPTH) { + return SetError(ds, -1, "Reached object decoding depth limit"); + } - ds->lastType = JT_INVALID; - ds->start ++; + newObj = ds->dec->newArray(ds->prv, ds->dec); + len = 0; - while (1)//(*ds->start) != '\0') - { - SkipWhitespace(ds); + ds->lastType = JT_INVALID; + ds->start ++; - if ((*ds->start) == ']') - { - ds->start++; - return ds->dec->endArray(newObj); - } + for (;;) + { + SkipWhitespace(ds); - itemValue = decode_any(ds); + if ((*ds->start) == ']') + { + ds->objDepth--; + if (len == 0) + { + ds->start ++; + return ds->dec->endArray(ds->prv, newObj); + } + + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + return SetError(ds, -1, "Unexpected character found when decoding array value (1)"); + } - if (itemValue == NULL) - { - ds->dec->releaseObject(newObj, ds->dec); - return NULL; - } + itemValue = decode_any(ds); - if (!ds->dec->arrayAddItem (newObj, itemValue)) - { - ds->dec->releaseObject(newObj, ds->dec); - return NULL; - } + if (itemValue == NULL) + { + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + return NULL; + } - SkipWhitespace(ds); + if (!ds->dec->arrayAddItem (ds->prv, newObj, itemValue)) + { + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + return NULL; + } - switch (*(ds->start++)) - { - case ']': - return ds->dec->endArray(newObj); + SkipWhitespace(ds); - case ',': - break; + switch (*(ds->start++)) + { + case ']': + { + ds->objDepth--; + return ds->dec->endArray(ds->prv, newObj); + } + case ',': + break; - default: - ds->dec->releaseObject(newObj, ds->dec); - return SetError(ds, -1, "Unexpected character in found when decoding array value"); - } + default: + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + return SetError(ds, -1, "Unexpected character found when decoding array value (2)"); } - ds->dec->releaseObject(newObj, ds->dec); - return SetError(ds, -1, "Unmatched ']' when decoding 'array'"); + len ++; + } } - - FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_object( struct DecoderState *ds) { - JSOBJ itemName; - JSOBJ itemValue; - JSOBJ newObj = ds->dec->newObject(ds->dec); + JSOBJ itemName; + JSOBJ itemValue; + JSOBJ newObj; - ds->start ++; + ds->objDepth++; + if (ds->objDepth > JSON_MAX_OBJECT_DEPTH) { + return SetError(ds, -1, "Reached object decoding depth limit"); + } - while (1) - { - SkipWhitespace(ds); + newObj = ds->dec->newObject(ds->prv, ds->dec); - if ((*ds->start) == '}') - { - ds->start ++; - return ds->dec->endObject(newObj); - } + ds->start ++; - ds->lastType = JT_INVALID; - itemName = decode_any(ds); + for (;;) + { + SkipWhitespace(ds); - if (itemName == NULL) - { - ds->dec->releaseObject(newObj, ds->dec); - return NULL; - } + if ((*ds->start) == '}') + { + ds->objDepth--; + ds->start ++; + return ds->dec->endObject(ds->prv, newObj); + } - if (ds->lastType != JT_UTF8) - { - ds->dec->releaseObject(newObj, ds->dec); - ds->dec->releaseObject(itemName, ds->dec); - return SetError(ds, -1, "Key name of object must be 'string' when decoding 'object'"); - } + ds->lastType = JT_INVALID; + itemName = decode_any(ds); - SkipWhitespace(ds); + if (itemName == NULL) + { + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + return NULL; + } - if (*(ds->start++) != ':') - { - ds->dec->releaseObject(newObj, ds->dec); - ds->dec->releaseObject(itemName, ds->dec); - return SetError(ds, -1, "No ':' found when decoding object value"); - } + if (ds->lastType != JT_UTF8) + { + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + ds->dec->releaseObject(ds->prv, itemName, ds->dec); + return SetError(ds, -1, "Key name of object must be 'string' when decoding 'object'"); + } - SkipWhitespace(ds); + SkipWhitespace(ds); - itemValue = decode_any(ds); + if (*(ds->start++) != ':') + { + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + ds->dec->releaseObject(ds->prv, itemName, ds->dec); + return SetError(ds, -1, "No ':' found when decoding object value"); + } - if (itemValue == NULL) - { - ds->dec->releaseObject(newObj, ds->dec); - ds->dec->releaseObject(itemName, ds->dec); - return NULL; - } + SkipWhitespace(ds); - if (!ds->dec->objectAddKey (newObj, itemName, itemValue)) - { - ds->dec->releaseObject(newObj, ds->dec); - ds->dec->releaseObject(itemName, ds->dec); - ds->dec->releaseObject(itemValue, ds->dec); - return NULL; - } + itemValue = decode_any(ds); - SkipWhitespace(ds); + if (itemValue == NULL) + { + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + ds->dec->releaseObject(ds->prv, itemName, ds->dec); + return NULL; + } - switch (*(ds->start++)) - { - case '}': - return ds->dec->endObject(newObj); + if (!ds->dec->objectAddKey (ds->prv, newObj, itemName, itemValue)) + { + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + ds->dec->releaseObject(ds->prv, itemName, ds->dec); + ds->dec->releaseObject(ds->prv, itemValue, ds->dec); + return NULL; + } - case ',': - break; + SkipWhitespace(ds); - default: - ds->dec->releaseObject(newObj, ds->dec); - return SetError(ds, -1, "Unexpected character in found when decoding object value"); - } + switch (*(ds->start++)) + { + case '}': + { + ds->objDepth--; + return ds->dec->endObject(ds->prv, newObj); + } + case ',': + break; + + default: + ds->dec->releaseObject(ds->prv, newObj, ds->dec); + return SetError(ds, -1, "Unexpected character in found when decoding object value"); } - - ds->dec->releaseObject(newObj, ds->dec); - return SetError(ds, -1, "Unmatched '}' when decoding object"); + } } FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_any(struct DecoderState *ds) { - while (1) + for (;;) + { + switch (*ds->start) { - switch (*ds->start) - { - case '\"': - return decode_string (ds); - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - case '-': - return decode_numeric (ds); - - case '[': return decode_array (ds); - case '{': return decode_object (ds); - case 't': return decode_true (ds); - case 'f': return decode_false (ds); - case 'n': return decode_null (ds); - - case ' ': - case '\t': - case '\r': - case '\n': - // White space - ds->start ++; - break; - - default: - return SetError(ds, -1, "Expected object or value"); - } + case '\"': + return decode_string (ds); + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '-': + return decode_numeric (ds); + + case '[': return decode_array (ds); + case '{': return decode_object (ds); + case 't': return decode_true (ds); + case 'f': return decode_false (ds); + case 'n': return decode_null (ds); + + case ' ': + case '\t': + case '\r': + case '\n': + // White space + ds->start ++; + break; + + default: + return SetError(ds, -1, "Expected object or value"); } + } } - JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer) { - - /* - FIXME: Base the size of escBuffer of that of cbBuffer so that the unicode escaping doesn't run into the wall each time */ - struct DecoderState ds; - wchar_t escBuffer[(JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t))]; - JSOBJ ret; - - ds.start = (char *) buffer; - ds.end = ds.start + cbBuffer; - - ds.escStart = escBuffer; - ds.escEnd = ds.escStart + (JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t)); - ds.escHeap = 0; - ds.dec = dec; - ds.dec->errorStr = NULL; - ds.dec->errorOffset = NULL; - - ds.dec = dec; - - ret = decode_any (&ds); - - if (ds.escHeap) - { - dec->free(ds.escStart); - } - return ret; + /* + FIXME: Base the size of escBuffer of that of cbBuffer so that the unicode escaping doesn't run into the wall each time */ + struct DecoderState ds; + wchar_t escBuffer[(JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t))]; + JSOBJ ret; + + ds.start = (char *) buffer; + ds.end = ds.start + cbBuffer; + + ds.escStart = escBuffer; + ds.escEnd = ds.escStart + (JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t)); + ds.escHeap = 0; + ds.prv = dec->prv; + ds.dec = dec; + ds.dec->errorStr = NULL; + ds.dec->errorOffset = NULL; + ds.objDepth = 0; + + ds.dec = dec; + + ret = decode_any (&ds); + + if (ds.escHeap) + { + dec->free(ds.escStart); + } + + SkipWhitespace(&ds); + + if (ds.start != ds.end && ret) + { + dec->releaseObject(ds.prv, ret, ds.dec); + return SetError(&ds, -1, "Trailing data"); + } + + return ret; } diff --git a/pandas/src/ujson/lib/ultrajsonenc.c b/pandas/src/ujson/lib/ultrajsonenc.c index 22871513870b7..01fc7c10fe755 100644 --- a/pandas/src/ujson/lib/ultrajsonenc.c +++ b/pandas/src/ujson/lib/ultrajsonenc.c @@ -1,37 +1,38 @@ /* -Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB +Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -3. All advertising materials mentioning features or use of this software - must display the following acknowledgement: - This product includes software developed by ESN Social Software AB (www.esn.me). -4. Neither the name of the ESN Social Software AB nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the ESN Social Software AB nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Portions of code from: -MODP_ASCII - Ascii transformations (upper/lower, etc) + +Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) http://code.google.com/p/stringencoders/ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. +Numeric decoder derived from from TCL library +http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms + * Copyright (c) 1988-1993 The Regents of the University of California. + * Copyright (c) 1994 Sun Microsystems, Inc. */ #include "ultrajson.h" @@ -50,42 +51,57 @@ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights rese #define FALSE 0 #endif +/* +Worst cases being: + +Control characters (ASCII < 32) +0x00 (1 byte) input => \u0000 output (6 bytes) +1 * 6 => 6 (6 bytes required) + +or UTF-16 surrogate pairs +4 bytes input in UTF-8 => \uXXXX\uYYYY (12 bytes). + +4 * 6 => 24 bytes (12 bytes required) + +The extra 2 bytes are for the quotes around the string + +*/ +#define RESERVE_STRING(_len) (2 + ((_len) * 6)) + static const double g_pow10[] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000, 100000000000, 1000000000000, 10000000000000, 100000000000000, 1000000000000000}; static const char g_hexChars[] = "0123456789abcdef"; static const char g_escapeChars[] = "0123456789\\b\\t\\n\\f\\r\\\"\\\\\\/"; - /* FIXME: While this is fine dandy and working it's a magic value mess which probably only the author understands. Needs a cleanup and more documentation */ /* Table for pure ascii output escaping all characters above 127 to \uXXXX */ -static const JSUINT8 g_asciiOutputTable[256] = +static const JSUINT8 g_asciiOutputTable[256] = { -/* 0x00 */ 0, 30, 30, 30, 30, 30, 30, 30, 10, 12, 14, 30, 16, 18, 30, 30, +/* 0x00 */ 0, 30, 30, 30, 30, 30, 30, 30, 10, 12, 14, 30, 16, 18, 30, 30, /* 0x10 */ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, -/* 0x20 */ 1, 1, 20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 24, -/* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +/* 0x20 */ 1, 1, 20, 1, 1, 1, 29, 1, 1, 1, 1, 1, 1, 1, 1, 24, +/* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 29, 1, 29, 1, +/* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 22, 1, 1, 1, -/* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +/* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +/* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x90 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +/* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0xb0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +/* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, +/* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, /* 0xf0 */ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 1, 1 }; - static void SetError (JSOBJ obj, JSONObjectEncoder *enc, const char *message) { - enc->errorMsg = message; - enc->errorObj = obj; + enc->errorMsg = message; + enc->errorObj = obj; } /* @@ -93,332 +109,357 @@ FIXME: Keep track of how big these get across several encoder calls and try to m That way we won't run our head into the wall each call */ void Buffer_Realloc (JSONObjectEncoder *enc, size_t cbNeeded) { - size_t curSize = enc->end - enc->start; - size_t newSize = curSize * 2; - size_t offset = enc->offset - enc->start; - - while (newSize < curSize + cbNeeded) + size_t curSize = enc->end - enc->start; + size_t newSize = curSize * 2; + size_t offset = enc->offset - enc->start; + + while (newSize < curSize + cbNeeded) + { + newSize *= 2; + } + + if (enc->heap) + { + enc->start = (char *) enc->realloc (enc->start, newSize); + if (!enc->start) { - newSize *= 2; + SetError (NULL, enc, "Could not reserve memory block"); + return; } - - if (enc->heap) + } + else + { + char *oldStart = enc->start; + enc->heap = 1; + enc->start = (char *) enc->malloc (newSize); + if (!enc->start) { - enc->start = (char *) enc->realloc (enc->start, newSize); - if (!enc->start) - { - SetError (NULL, enc, "Could not reserve memory block"); - return; - } + SetError (NULL, enc, "Could not reserve memory block"); + return; } - else - { - char *oldStart = enc->start; - enc->heap = 1; - enc->start = (char *) enc->malloc (newSize); - if (!enc->start) - { - SetError (NULL, enc, "Could not reserve memory block"); - return; - } - memcpy (enc->start, oldStart, offset); - } - enc->offset = enc->start + offset; - enc->end = enc->start + newSize; + memcpy (enc->start, oldStart, offset); + } + enc->offset = enc->start + offset; + enc->end = enc->start + newSize; } FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC Buffer_AppendShortHexUnchecked (char *outputOffset, unsigned short value) { - *(outputOffset++) = g_hexChars[(value & 0xf000) >> 12]; - *(outputOffset++) = g_hexChars[(value & 0x0f00) >> 8]; - *(outputOffset++) = g_hexChars[(value & 0x00f0) >> 4]; - *(outputOffset++) = g_hexChars[(value & 0x000f) >> 0]; + *(outputOffset++) = g_hexChars[(value & 0xf000) >> 12]; + *(outputOffset++) = g_hexChars[(value & 0x0f00) >> 8]; + *(outputOffset++) = g_hexChars[(value & 0x00f0) >> 4]; + *(outputOffset++) = g_hexChars[(value & 0x000f) >> 0]; } -int Buffer_EscapeStringUnvalidated (JSOBJ obj, JSONObjectEncoder *enc, const char *io, const char *end) +int Buffer_EscapeStringUnvalidated (JSONObjectEncoder *enc, const char *io, const char *end) { - char *of = (char *) enc->offset; + char *of = (char *) enc->offset; - while (1) + for (;;) + { + switch (*io) { - switch (*io) + case 0x00: + { + if (io < end) { - case 0x00: - if (io < end) - { - *(of++) = '\\'; - *(of++) = 'u'; - *(of++) = '0'; - *(of++) = '0'; - *(of++) = '0'; - *(of++) = '0'; - break; - } - else - { - enc->offset += (of - enc->offset); - return TRUE; - } - - case '\"': (*of++) = '\\'; (*of++) = '\"'; break; - case '\\': (*of++) = '\\'; (*of++) = '\\'; break; - case '/': (*of++) = '\\'; (*of++) = '/'; break; - case '\b': (*of++) = '\\'; (*of++) = 'b'; break; - case '\f': (*of++) = '\\'; (*of++) = 'f'; break; - case '\n': (*of++) = '\\'; (*of++) = 'n'; break; - case '\r': (*of++) = '\\'; (*of++) = 'r'; break; - case '\t': (*of++) = '\\'; (*of++) = 't'; break; - - case 0x01: - case 0x02: - case 0x03: - case 0x04: - case 0x05: - case 0x06: - case 0x07: - case 0x0b: - case 0x0e: - case 0x0f: - case 0x10: - case 0x11: - case 0x12: - case 0x13: - case 0x14: - case 0x15: - case 0x16: - case 0x17: - case 0x18: - case 0x19: - case 0x1a: - case 0x1b: - case 0x1c: - case 0x1d: - case 0x1e: - case 0x1f: - *(of++) = '\\'; - *(of++) = 'u'; - *(of++) = '0'; - *(of++) = '0'; - *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)]; - *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)]; - break; - - default: (*of++) = (*io); break; + *(of++) = '\\'; + *(of++) = 'u'; + *(of++) = '0'; + *(of++) = '0'; + *(of++) = '0'; + *(of++) = '0'; + break; } - - io++; - } - - return FALSE; + else + { + enc->offset += (of - enc->offset); + return TRUE; + } + } + case '\"': (*of++) = '\\'; (*of++) = '\"'; break; + case '\\': (*of++) = '\\'; (*of++) = '\\'; break; + case '/': (*of++) = '\\'; (*of++) = '/'; break; + case '\b': (*of++) = '\\'; (*of++) = 'b'; break; + case '\f': (*of++) = '\\'; (*of++) = 'f'; break; + case '\n': (*of++) = '\\'; (*of++) = 'n'; break; + case '\r': (*of++) = '\\'; (*of++) = 'r'; break; + case '\t': (*of++) = '\\'; (*of++) = 't'; break; + + case 0x26: // '/' + case 0x3c: // '<' + case 0x3e: // '>' + { + if (enc->encodeHTMLChars) + { + // Fall through to \u00XX case below. + } + else + { + // Same as default case below. + (*of++) = (*io); + break; + } + } + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x0b: + case 0x0e: + case 0x0f: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1a: + case 0x1b: + case 0x1c: + case 0x1d: + case 0x1e: + case 0x1f: + { + *(of++) = '\\'; + *(of++) = 'u'; + *(of++) = '0'; + *(of++) = '0'; + *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)]; + *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)]; + break; + } + default: (*of++) = (*io); break; + } + io++; + } } - -/* -FIXME: -This code only works with Little and Big Endian - -FIXME: The JSON spec says escape "/" but non of the others do and we don't -want to be left alone doing it so we don't :) - -*/ int Buffer_EscapeStringValidated (JSOBJ obj, JSONObjectEncoder *enc, const char *io, const char *end) { - JSUTF32 ucs; - char *of = (char *) enc->offset; + JSUTF32 ucs; + char *of = (char *) enc->offset; - while (1) + for (;;) + { + JSUINT8 utflen = g_asciiOutputTable[(unsigned char) *io]; + + switch (utflen) { + case 0: + { + if (io < end) + { + *(of++) = '\\'; + *(of++) = 'u'; + *(of++) = '0'; + *(of++) = '0'; + *(of++) = '0'; + *(of++) = '0'; + io ++; + continue; + } + else + { + enc->offset += (of - enc->offset); + return TRUE; + } + } - //JSUINT8 chr = (unsigned char) *io; - JSUINT8 utflen = g_asciiOutputTable[(unsigned char) *io]; + case 1: + { + *(of++)= (*io++); + continue; + } - switch (utflen) + case 2: + { + JSUTF32 in; + JSUTF16 in16; + + if (end - io < 1) { - case 0: - { - if (io < end) - { - *(of++) = '\\'; - *(of++) = 'u'; - *(of++) = '0'; - *(of++) = '0'; - *(of++) = '0'; - *(of++) = '0'; - io ++; - continue; - } - else - { - enc->offset += (of - enc->offset); - return TRUE; - } - } - - case 1: - { - *(of++)= (*io++); - continue; - } - - case 2: - { - JSUTF32 in; - JSUTF16 in16; - - if (end - io < 1) - { - enc->offset += (of - enc->offset); - SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string"); - return FALSE; - } - - memcpy(&in16, io, sizeof(JSUTF16)); - in = (JSUTF32) in16; + enc->offset += (of - enc->offset); + SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string"); + return FALSE; + } + + memcpy(&in16, io, sizeof(JSUTF16)); + in = (JSUTF32) in16; #ifdef __LITTLE_ENDIAN__ - ucs = ((in & 0x1f) << 6) | ((in >> 8) & 0x3f); + ucs = ((in & 0x1f) << 6) | ((in >> 8) & 0x3f); #else - ucs = ((in & 0x1f00) >> 2) | (in & 0x3f); + ucs = ((in & 0x1f00) >> 2) | (in & 0x3f); #endif - if (ucs < 0x80) - { - enc->offset += (of - enc->offset); - SetError (obj, enc, "Overlong 2 byte UTF-8 sequence detected when encoding string"); - return FALSE; - } - - io += 2; - break; - } - - case 3: - { - JSUTF32 in; - JSUTF16 in16; - JSUINT8 in8; - - if (end - io < 2) - { - enc->offset += (of - enc->offset); - SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string"); - return FALSE; - } - - memcpy(&in16, io, sizeof(JSUTF16)); - memcpy(&in8, io + 2, sizeof(JSUINT8)); + if (ucs < 0x80) + { + enc->offset += (of - enc->offset); + SetError (obj, enc, "Overlong 2 byte UTF-8 sequence detected when encoding string"); + return FALSE; + } + + io += 2; + break; + } + + case 3: + { + JSUTF32 in; + JSUTF16 in16; + JSUINT8 in8; + + if (end - io < 2) + { + enc->offset += (of - enc->offset); + SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string"); + return FALSE; + } + + memcpy(&in16, io, sizeof(JSUTF16)); + memcpy(&in8, io + 2, sizeof(JSUINT8)); #ifdef __LITTLE_ENDIAN__ - in = (JSUTF32) in16; - in |= in8 << 16; - ucs = ((in & 0x0f) << 12) | ((in & 0x3f00) >> 2) | ((in & 0x3f0000) >> 16); + in = (JSUTF32) in16; + in |= in8 << 16; + ucs = ((in & 0x0f) << 12) | ((in & 0x3f00) >> 2) | ((in & 0x3f0000) >> 16); #else - in = in16 << 8; - in |= in8; - ucs = ((in & 0x0f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f); + in = in16 << 8; + in |= in8; + ucs = ((in & 0x0f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f); #endif + if (ucs < 0x800) + { + enc->offset += (of - enc->offset); + SetError (obj, enc, "Overlong 3 byte UTF-8 sequence detected when encoding string"); + return FALSE; + } + + io += 3; + break; + } + case 4: + { + JSUTF32 in; - if (ucs < 0x800) - { - enc->offset += (of - enc->offset); - SetError (obj, enc, "Overlong 3 byte UTF-8 sequence detected when encoding string"); - return FALSE; - } - - io += 3; - break; - } - case 4: - { - JSUTF32 in; - - if (end - io < 3) - { - enc->offset += (of - enc->offset); - SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string"); - return FALSE; - } - - memcpy(&in, io, sizeof(JSUTF32)); + if (end - io < 3) + { + enc->offset += (of - enc->offset); + SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string"); + return FALSE; + } + + memcpy(&in, io, sizeof(JSUTF32)); #ifdef __LITTLE_ENDIAN__ - ucs = ((in & 0x07) << 18) | ((in & 0x3f00) << 4) | ((in & 0x3f0000) >> 10) | ((in & 0x3f000000) >> 24); + ucs = ((in & 0x07) << 18) | ((in & 0x3f00) << 4) | ((in & 0x3f0000) >> 10) | ((in & 0x3f000000) >> 24); #else - ucs = ((in & 0x07000000) >> 6) | ((in & 0x3f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f); + ucs = ((in & 0x07000000) >> 6) | ((in & 0x3f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f); #endif - if (ucs < 0x10000) - { - enc->offset += (of - enc->offset); - SetError (obj, enc, "Overlong 4 byte UTF-8 sequence detected when encoding string"); - return FALSE; - } - - io += 4; - break; - } - - - case 5: - case 6: - enc->offset += (of - enc->offset); - SetError (obj, enc, "Unsupported UTF-8 sequence length when encoding string"); - return FALSE; - - case 30: - // \uXXXX encode - *(of++) = '\\'; - *(of++) = 'u'; - *(of++) = '0'; - *(of++) = '0'; - *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)]; - *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)]; - io ++; - continue; - - case 10: - case 12: - case 14: - case 16: - case 18: - case 20: - case 22: - case 24: - *(of++) = *( (char *) (g_escapeChars + utflen + 0)); - *(of++) = *( (char *) (g_escapeChars + utflen + 1)); - io ++; - continue; + if (ucs < 0x10000) + { + enc->offset += (of - enc->offset); + SetError (obj, enc, "Overlong 4 byte UTF-8 sequence detected when encoding string"); + return FALSE; } - /* - If the character is a UTF8 sequence of length > 1 we end up here */ - if (ucs >= 0x10000) + io += 4; + break; + } + + + case 5: + case 6: + { + enc->offset += (of - enc->offset); + SetError (obj, enc, "Unsupported UTF-8 sequence length when encoding string"); + return FALSE; + } + + case 29: + { + if (enc->encodeHTMLChars) { - ucs -= 0x10000; - *(of++) = '\\'; - *(of++) = 'u'; - Buffer_AppendShortHexUnchecked(of, (ucs >> 10) + 0xd800); - of += 4; - - *(of++) = '\\'; - *(of++) = 'u'; - Buffer_AppendShortHexUnchecked(of, (ucs & 0x3ff) + 0xdc00); - of += 4; + // Fall through to \u00XX case 30 below. } else { - *(of++) = '\\'; - *(of++) = 'u'; - Buffer_AppendShortHexUnchecked(of, ucs); - of += 4; + // Same as case 1 above. + *(of++) = (*io++); + continue; } + } + + case 30: + { + // \uXXXX encode + *(of++) = '\\'; + *(of++) = 'u'; + *(of++) = '0'; + *(of++) = '0'; + *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)]; + *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)]; + io ++; + continue; + } + case 10: + case 12: + case 14: + case 16: + case 18: + case 20: + case 22: + case 24: + { + *(of++) = *( (char *) (g_escapeChars + utflen + 0)); + *(of++) = *( (char *) (g_escapeChars + utflen + 1)); + io ++; + continue; + } + // This can never happen, it's here to make L4 VC++ happy + default: + { + ucs = 0; + break; + } } - return FALSE; + /* + If the character is a UTF8 sequence of length > 1 we end up here */ + if (ucs >= 0x10000) + { + ucs -= 0x10000; + *(of++) = '\\'; + *(of++) = 'u'; + Buffer_AppendShortHexUnchecked(of, (unsigned short) (ucs >> 10) + 0xd800); + of += 4; + + *(of++) = '\\'; + *(of++) = 'u'; + Buffer_AppendShortHexUnchecked(of, (unsigned short) (ucs & 0x3ff) + 0xdc00); + of += 4; + } + else + { + *(of++) = '\\'; + *(of++) = 'u'; + Buffer_AppendShortHexUnchecked(of, (unsigned short) ucs); + of += 4; + } + } } #define Buffer_Reserve(__enc, __len) \ - if ((__enc)->end - (__enc)->offset < (__len)) \ + if ( (size_t) ((__enc)->end - (__enc)->offset) < (size_t) (__len)) \ { \ - Buffer_Realloc((__enc), (__len));\ + Buffer_Realloc((__enc), (__len));\ } \ @@ -427,176 +468,180 @@ int Buffer_EscapeStringValidated (JSOBJ obj, JSONObjectEncoder *enc, const char FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC strreverse(char* begin, char* end) { - char aux; - while (end > begin) - aux = *end, *end-- = *begin, *begin++ = aux; + char aux; + while (end > begin) + aux = *end, *end-- = *begin, *begin++ = aux; } void Buffer_AppendIntUnchecked(JSONObjectEncoder *enc, JSINT32 value) { - char* wstr; - JSUINT32 uvalue = (value < 0) ? -value : value; + char* wstr; + JSUINT32 uvalue = (value < 0) ? -value : value; - wstr = enc->offset; - // Conversion. Number is reversed. - - do *wstr++ = (char)(48 + (uvalue % 10)); while(uvalue /= 10); - if (value < 0) *wstr++ = '-'; + wstr = enc->offset; + // Conversion. Number is reversed. - // Reverse string - strreverse(enc->offset,wstr - 1); - enc->offset += (wstr - (enc->offset)); + do *wstr++ = (char)(48 + (uvalue % 10)); while(uvalue /= 10); + if (value < 0) *wstr++ = '-'; + + // Reverse string + strreverse(enc->offset,wstr - 1); + enc->offset += (wstr - (enc->offset)); } void Buffer_AppendLongUnchecked(JSONObjectEncoder *enc, JSINT64 value) { - char* wstr; - JSUINT64 uvalue = (value < 0) ? -value : value; + char* wstr; + JSUINT64 uvalue = (value < 0) ? -value : value; - wstr = enc->offset; - // Conversion. Number is reversed. - - do *wstr++ = (char)(48 + (uvalue % 10ULL)); while(uvalue /= 10ULL); - if (value < 0) *wstr++ = '-'; + wstr = enc->offset; + // Conversion. Number is reversed. - // Reverse string - strreverse(enc->offset,wstr - 1); - enc->offset += (wstr - (enc->offset)); + do *wstr++ = (char)(48 + (uvalue % 10ULL)); while(uvalue /= 10ULL); + if (value < 0) *wstr++ = '-'; + + // Reverse string + strreverse(enc->offset,wstr - 1); + enc->offset += (wstr - (enc->offset)); } int Buffer_AppendDoubleUnchecked(JSOBJ obj, JSONObjectEncoder *enc, double value) { - /* if input is larger than thres_max, revert to exponential */ - const double thres_max = (double) 1e16 - 1; - int count; - double diff = 0.0; - char* str = enc->offset; - char* wstr = str; - unsigned long long whole; - double tmp; - unsigned long long frac; - int neg; - double pow10; - - if (value == HUGE_VAL || value == -HUGE_VAL) - { - SetError (obj, enc, "Invalid Inf value when encoding double"); - return FALSE; - } - if (! (value == value)) - { - SetError (obj, enc, "Invalid Nan value when encoding double"); - return FALSE; - } - + /* if input is larger than thres_max, revert to exponential */ + const double thres_max = (double) 1e16 - 1; + int count; + double diff = 0.0; + char* str = enc->offset; + char* wstr = str; + unsigned long long whole; + double tmp; + unsigned long long frac; + int neg; + double pow10; + + if (value == HUGE_VAL || value == -HUGE_VAL) + { + SetError (obj, enc, "Invalid Inf value when encoding double"); + return FALSE; + } - /* we'll work in positive values and deal with the - negative sign issue later */ - neg = 0; - if (value < 0) + if (!(value == value)) + { + SetError (obj, enc, "Invalid Nan value when encoding double"); + return FALSE; + } + + /* we'll work in positive values and deal with the + negative sign issue later */ + neg = 0; + if (value < 0) + { + neg = 1; + value = -value; + } + + pow10 = g_pow10[enc->doublePrecision]; + + whole = (unsigned long long) value; + tmp = (value - whole) * pow10; + frac = (unsigned long long)(tmp); + diff = tmp - frac; + + if (diff > 0.5) + { + ++frac; + /* handle rollover, e.g. case 0.99 with prec 1 is 1.0 */ + if (frac >= pow10) { - neg = 1; - value = -value; + frac = 0; + ++whole; } + } + else + if (diff == 0.5 && ((frac == 0) || (frac & 1))) + { + /* if halfway, round up if odd, OR + if last digit is 0. That last part is strange */ + ++frac; + } + + /* for very large numbers switch back to native sprintf for exponentials. + anyone want to write code to replace this? */ + /* + normal printf behavior is to print EVERY whole number digit + which can be 100s of characters overflowing your buffers == bad + */ + if (value > thres_max) + { +#ifdef _WIN32 + enc->offset += sprintf_s(str, enc->end - enc->offset, "%.15e", neg ? -value : value); +#else + enc->offset += snprintf(str, enc->end - enc->offset, "%.15e", neg ? -value : value); +#endif + return TRUE; + } - pow10 = g_pow10[enc->doublePrecision]; - - whole = (unsigned long long) value; - tmp = (value - whole) * pow10; - frac = (unsigned long long)(tmp); - diff = tmp - frac; + if (enc->doublePrecision == 0) + { + diff = value - whole; - if (diff > 0.5) + if (diff > 0.5) { - ++frac; - /* handle rollover, e.g. case 0.99 with prec 1 is 1.0 */ - if (frac >= pow10) - { - frac = 0; - ++whole; - } - } - else - if (diff == 0.5 && ((frac == 0) || (frac & 1))) - { - /* if halfway, round up if odd, OR - if last digit is 0. That last part is strange */ - ++frac; + /* greater than 0.5, round up, e.g. 1.6 -> 2 */ + ++whole; } - - /* for very large numbers switch back to native sprintf for exponentials. - anyone want to write code to replace this? */ - /* - normal printf behavior is to print EVERY whole number digit - which can be 100s of characters overflowing your buffers == bad - */ - if (value > thres_max) + else + if (diff == 0.5 && (whole & 1)) { - enc->offset += sprintf(str, "%.15e", neg ? -value : value); - return TRUE; + /* exactly 0.5 and ODD, then round up */ + /* 1.5 -> 2, but 2.5 -> 2 */ + ++whole; } - if (enc->doublePrecision == 0) + //vvvvvvvvvvvvvvvvvvv Diff from modp_dto2 + } + else + if (frac) { - diff = value - whole; - - if (diff > 0.5) - { - /* greater than 0.5, round up, e.g. 1.6 -> 2 */ - ++whole; - } - else - if (diff == 0.5 && (whole & 1)) - { - /* exactly 0.5 and ODD, then round up */ - /* 1.5 -> 2, but 2.5 -> 2 */ - ++whole; - } - - //vvvvvvvvvvvvvvvvvvv Diff from modp_dto2 - } - else - if (frac) - { - count = enc->doublePrecision; - // now do fractional part, as an unsigned number - // we know it is not 0 but we can have leading zeros, these - // should be removed - while (!(frac % 10)) - { + count = enc->doublePrecision; + // now do fractional part, as an unsigned number + // we know it is not 0 but we can have leading zeros, these + // should be removed + while (!(frac % 10)) + { --count; frac /= 10; - } - //^^^^^^^^^^^^^^^^^^^ Diff from modp_dto2 + } + //^^^^^^^^^^^^^^^^^^^ Diff from modp_dto2 - // now do fractional part, as an unsigned number - do - { - --count; - *wstr++ = (char)(48 + (frac % 10)); - } while (frac /= 10); - // add extra 0s - while (count-- > 0) - { - *wstr++ = '0'; - } - // add decimal - *wstr++ = '.'; + // now do fractional part, as an unsigned number + do + { + --count; + *wstr++ = (char)(48 + (frac % 10)); + } while (frac /= 10); + // add extra 0s + while (count-- > 0) + { + *wstr++ = '0'; + } + // add decimal + *wstr++ = '.'; } else { - *wstr++ = '0'; - *wstr++ = '.'; + *wstr++ = '0'; + *wstr++ = '.'; } // do whole part // Take care of sign // Conversion. Number is reversed. do *wstr++ = (char)(48 + (whole % 10)); while (whole /= 10); - - if (neg) + + if (neg) { - *wstr++ = '-'; + *wstr++ = '-'; } strreverse(str, wstr-1); enc->offset += (wstr - (enc->offset)); @@ -604,11 +649,6 @@ int Buffer_AppendDoubleUnchecked(JSOBJ obj, JSONObjectEncoder *enc, double value return TRUE; } - - - - - /* FIXME: Handle integration functions returning NULL here */ @@ -619,62 +659,57 @@ Perhaps implement recursion detection */ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name, size_t cbName) { - const char *value; - char *objName; - int count; - JSOBJ iterObj; - size_t szlen; - JSONTypeContext tc; - tc.encoder = enc; - - if (enc->level > enc->recursionMax) + const char *value; + char *objName; + int count; + JSOBJ iterObj; + size_t szlen; + JSONTypeContext tc; + tc.encoder = enc; + + if (enc->level > enc->recursionMax) + { + SetError (obj, enc, "Maximum recursion level reached"); + return; + } + + /* + This reservation must hold + + length of _name as encoded worst case + + maxLength of double to string OR maxLength of JSLONG to string + */ + + Buffer_Reserve(enc, 256 + RESERVE_STRING(cbName)); + if (enc->errorMsg) + { + return; + } + + if (name) + { + Buffer_AppendCharUnchecked(enc, '\"'); + + if (enc->forceASCII) { - SetError (obj, enc, "Maximum recursion level reached"); + if (!Buffer_EscapeStringValidated(obj, enc, name, name + cbName)) + { return; + } } - - /* - This reservation must hold - - length of _name as encoded worst case + - maxLength of double to string OR maxLength of JSLONG to string - - Since input is assumed to be UTF-8 the worst character length is: - - 4 bytes (of UTF-8) => "\uXXXX\uXXXX" (12 bytes) - */ - - Buffer_Reserve(enc, 256 + (((cbName / 4) + 1) * 12)); - if (enc->errorMsg) + else { + if (!Buffer_EscapeStringUnvalidated(enc, name, name + cbName)) + { return; + } } - if (name) - { - Buffer_AppendCharUnchecked(enc, '\"'); + Buffer_AppendCharUnchecked(enc, '\"'); - if (enc->forceASCII) - { - if (!Buffer_EscapeStringValidated(obj, enc, name, name + cbName)) - { - return; - } - } - else - { - if (!Buffer_EscapeStringUnvalidated(obj, enc, name, name + cbName)) - { - return; - } - } - - - Buffer_AppendCharUnchecked(enc, '\"'); - - Buffer_AppendCharUnchecked (enc, ':'); + Buffer_AppendCharUnchecked (enc, ':'); #ifndef JSON_NO_EXTRA_WHITESPACE - Buffer_AppendCharUnchecked (enc, ' '); + Buffer_AppendCharUnchecked (enc, ' '); #endif } @@ -682,210 +717,209 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name, size_t cbName) switch (tc.type) { - case JT_INVALID: - return; + case JT_INVALID: + { + return; + } - case JT_ARRAY: - { - count = 0; - enc->iterBegin(obj, &tc); + case JT_ARRAY: + { + count = 0; + enc->iterBegin(obj, &tc); - Buffer_AppendCharUnchecked (enc, '['); + Buffer_AppendCharUnchecked (enc, '['); - while (enc->iterNext(obj, &tc)) - { - if (count > 0) - { - Buffer_AppendCharUnchecked (enc, ','); + while (enc->iterNext(obj, &tc)) + { + if (count > 0) + { + Buffer_AppendCharUnchecked (enc, ','); #ifndef JSON_NO_EXTRA_WHITESPACE - Buffer_AppendCharUnchecked (buffer, ' '); + Buffer_AppendCharUnchecked (buffer, ' '); #endif - } + } - iterObj = enc->iterGetValue(obj, &tc); + iterObj = enc->iterGetValue(obj, &tc); - enc->level ++; - encode (iterObj, enc, NULL, 0); - count ++; - } + enc->level ++; + encode (iterObj, enc, NULL, 0); + count ++; + } - enc->iterEnd(obj, &tc); - Buffer_AppendCharUnchecked (enc, ']'); - break; - } + enc->iterEnd(obj, &tc); + Buffer_AppendCharUnchecked (enc, ']'); + break; + } - case JT_OBJECT: - { - count = 0; - enc->iterBegin(obj, &tc); + case JT_OBJECT: + { + count = 0; + enc->iterBegin(obj, &tc); - Buffer_AppendCharUnchecked (enc, '{'); + Buffer_AppendCharUnchecked (enc, '{'); - while (enc->iterNext(obj, &tc)) - { - if (count > 0) - { - Buffer_AppendCharUnchecked (enc, ','); + while (enc->iterNext(obj, &tc)) + { + if (count > 0) + { + Buffer_AppendCharUnchecked (enc, ','); #ifndef JSON_NO_EXTRA_WHITESPACE - Buffer_AppendCharUnchecked (enc, ' '); + Buffer_AppendCharUnchecked (enc, ' '); #endif - } + } - iterObj = enc->iterGetValue(obj, &tc); - objName = enc->iterGetName(obj, &tc, &szlen); - - enc->level ++; - encode (iterObj, enc, objName, szlen); - count ++; - } - - enc->iterEnd(obj, &tc); - Buffer_AppendCharUnchecked (enc, '}'); - break; - } - - case JT_LONG: - { - Buffer_AppendLongUnchecked (enc, enc->getLongValue(obj, &tc)); - break; - } + iterObj = enc->iterGetValue(obj, &tc); + objName = enc->iterGetName(obj, &tc, &szlen); - case JT_INT: - { - Buffer_AppendIntUnchecked (enc, enc->getIntValue(obj, &tc)); - break; - } - - case JT_TRUE: - { - Buffer_AppendCharUnchecked (enc, 't'); - Buffer_AppendCharUnchecked (enc, 'r'); - Buffer_AppendCharUnchecked (enc, 'u'); - Buffer_AppendCharUnchecked (enc, 'e'); - break; - } - - case JT_FALSE: - { - Buffer_AppendCharUnchecked (enc, 'f'); - Buffer_AppendCharUnchecked (enc, 'a'); - Buffer_AppendCharUnchecked (enc, 'l'); - Buffer_AppendCharUnchecked (enc, 's'); - Buffer_AppendCharUnchecked (enc, 'e'); - break; - } + enc->level ++; + encode (iterObj, enc, objName, szlen); + count ++; + } + enc->iterEnd(obj, &tc); + Buffer_AppendCharUnchecked (enc, '}'); + break; + } + + case JT_LONG: + { + Buffer_AppendLongUnchecked (enc, enc->getLongValue(obj, &tc)); + break; + } + + case JT_INT: + { + Buffer_AppendIntUnchecked (enc, enc->getIntValue(obj, &tc)); + break; + } + + case JT_TRUE: + { + Buffer_AppendCharUnchecked (enc, 't'); + Buffer_AppendCharUnchecked (enc, 'r'); + Buffer_AppendCharUnchecked (enc, 'u'); + Buffer_AppendCharUnchecked (enc, 'e'); + break; + } + + case JT_FALSE: + { + Buffer_AppendCharUnchecked (enc, 'f'); + Buffer_AppendCharUnchecked (enc, 'a'); + Buffer_AppendCharUnchecked (enc, 'l'); + Buffer_AppendCharUnchecked (enc, 's'); + Buffer_AppendCharUnchecked (enc, 'e'); + break; + } + + + case JT_NULL: + { + Buffer_AppendCharUnchecked (enc, 'n'); + Buffer_AppendCharUnchecked (enc, 'u'); + Buffer_AppendCharUnchecked (enc, 'l'); + Buffer_AppendCharUnchecked (enc, 'l'); + break; + } + + case JT_DOUBLE: + { + if (!Buffer_AppendDoubleUnchecked (obj, enc, enc->getDoubleValue(obj, &tc))) + { + enc->endTypeContext(obj, &tc); + enc->level --; + return; + } + break; + } + + case JT_UTF8: + { + value = enc->getStringValue(obj, &tc, &szlen); + Buffer_Reserve(enc, RESERVE_STRING(szlen)); + if (enc->errorMsg) + { + enc->endTypeContext(obj, &tc); + return; + } + Buffer_AppendCharUnchecked (enc, '\"'); - case JT_NULL: + if (enc->forceASCII) + { + if (!Buffer_EscapeStringValidated(obj, enc, value, value + szlen)) { - Buffer_AppendCharUnchecked (enc, 'n'); - Buffer_AppendCharUnchecked (enc, 'u'); - Buffer_AppendCharUnchecked (enc, 'l'); - Buffer_AppendCharUnchecked (enc, 'l'); - break; + enc->endTypeContext(obj, &tc); + enc->level --; + return; } - - case JT_DOUBLE: + } + else + { + if (!Buffer_EscapeStringUnvalidated(enc, value, value + szlen)) { - if (!Buffer_AppendDoubleUnchecked (obj, enc, enc->getDoubleValue(obj, &tc))) - { - enc->endTypeContext(obj, &tc); - enc->level --; - return; - } - break; + enc->endTypeContext(obj, &tc); + enc->level --; + return; } + } - case JT_UTF8: - { - value = enc->getStringValue(obj, &tc, &szlen); - Buffer_Reserve(enc, ((szlen / 4) + 1) * 12); - if (enc->errorMsg) - { - enc->endTypeContext(obj, &tc); - return; - } - Buffer_AppendCharUnchecked (enc, '\"'); - - - if (enc->forceASCII) - { - if (!Buffer_EscapeStringValidated(obj, enc, value, value + szlen)) - { - enc->endTypeContext(obj, &tc); - enc->level --; - return; - } - } - else - { - if (!Buffer_EscapeStringUnvalidated(obj, enc, value, value + szlen)) - { - enc->endTypeContext(obj, &tc); - enc->level --; - return; - } - } - - Buffer_AppendCharUnchecked (enc, '\"'); - break; - } + Buffer_AppendCharUnchecked (enc, '\"'); + break; } + } - enc->endTypeContext(obj, &tc); - enc->level --; - + enc->endTypeContext(obj, &tc); + enc->level --; } char *JSON_EncodeObject(JSOBJ obj, JSONObjectEncoder *enc, char *_buffer, size_t _cbBuffer) { - enc->malloc = enc->malloc ? enc->malloc : malloc; - enc->free = enc->free ? enc->free : free; - enc->realloc = enc->realloc ? enc->realloc : realloc; - enc->errorMsg = NULL; - enc->errorObj = NULL; - enc->level = 0; - - if (enc->recursionMax < 1) - { - enc->recursionMax = JSON_MAX_RECURSION_DEPTH; - } - - if (enc->doublePrecision < 0 || - enc->doublePrecision > JSON_DOUBLE_MAX_DECIMALS) - { - enc->doublePrecision = JSON_DOUBLE_MAX_DECIMALS; - } - - if (_buffer == NULL) + enc->malloc = enc->malloc ? enc->malloc : malloc; + enc->free = enc->free ? enc->free : free; + enc->realloc = enc->realloc ? enc->realloc : realloc; + enc->errorMsg = NULL; + enc->errorObj = NULL; + enc->level = 0; + + if (enc->recursionMax < 1) + { + enc->recursionMax = JSON_MAX_RECURSION_DEPTH; + } + + if (enc->doublePrecision < 0 || + enc->doublePrecision > JSON_DOUBLE_MAX_DECIMALS) + { + enc->doublePrecision = JSON_DOUBLE_MAX_DECIMALS; + } + + if (_buffer == NULL) + { + _cbBuffer = 32768; + enc->start = (char *) enc->malloc (_cbBuffer); + if (!enc->start) { - _cbBuffer = 32768; - enc->start = (char *) enc->malloc (_cbBuffer); - if (!enc->start) - { - SetError(obj, enc, "Could not reserve memory block"); - return NULL; - } - enc->heap = 1; - } - else - { - enc->start = _buffer; - enc->heap = 0; + SetError(obj, enc, "Could not reserve memory block"); + return NULL; } - - enc->end = enc->start + _cbBuffer; - enc->offset = enc->start; - - - encode (obj, enc, NULL, 0); - - Buffer_Reserve(enc, 1); - if (enc->errorMsg) - { - return NULL; - } - Buffer_AppendCharUnchecked(enc, '\0'); - - return enc->start; + enc->heap = 1; + } + else + { + enc->start = _buffer; + enc->heap = 0; + } + + enc->end = enc->start + _cbBuffer; + enc->offset = enc->start; + + encode (obj, enc, NULL, 0); + + Buffer_Reserve(enc, 1); + if (enc->errorMsg) + { + return NULL; + } + Buffer_AppendCharUnchecked(enc, '\0'); + + return enc->start; } diff --git a/pandas/src/ujson/python/JSONtoObj.c b/pandas/src/ujson/python/JSONtoObj.c index bc42269d9698b..9c1b4febd9895 100644 --- a/pandas/src/ujson/python/JSONtoObj.c +++ b/pandas/src/ujson/python/JSONtoObj.c @@ -1,3 +1,40 @@ +/* +Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the ESN Social Software AB nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) +http://code.google.com/p/stringencoders/ +Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. + +Numeric decoder derived from from TCL library +http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms + * Copyright (c) 1988-1993 The Regents of the University of California. + * Copyright (c) 1994 Sun Microsystems, Inc. +*/ + #include "py_defines.h" #define PY_ARRAY_UNIQUE_SYMBOL UJSON_NUMPY #define NO_IMPORT_ARRAY @@ -5,33 +42,33 @@ #include <ultrajson.h> +//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__) +#define PRINTMARK() + typedef struct __PyObjectDecoder { - JSONObjectDecoder dec; + JSONObjectDecoder dec; - void* npyarr; // Numpy context buffer - void* npyarr_addr; // Ref to npyarr ptr to track DECREF calls - npy_intp curdim; // Current array dimension + void* npyarr; // Numpy context buffer + void* npyarr_addr; // Ref to npyarr ptr to track DECREF calls + npy_intp curdim; // Current array dimension - PyArray_Descr* dtype; + PyArray_Descr* dtype; } PyObjectDecoder; typedef struct __NpyArrContext { - PyObject* ret; - PyObject* labels[2]; - PyArray_Dims shape; + PyObject* ret; + PyObject* labels[2]; + PyArray_Dims shape; - PyObjectDecoder* dec; + PyObjectDecoder* dec; - npy_intp i; - npy_intp elsize; - npy_intp elcount; + npy_intp i; + npy_intp elsize; + npy_intp elcount; } NpyArrContext; -//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__) -#define PRINTMARK() - // Numpy handling based on numpy internal code, specifically the function // PyArray_FromIter. @@ -39,638 +76,661 @@ typedef struct __NpyArrContext // to ensure the compiler catches any errors // standard numpy array handling -JSOBJ Object_npyNewArray(void* decoder); -JSOBJ Object_npyEndArray(JSOBJ obj); -int Object_npyArrayAddItem(JSOBJ obj, JSOBJ value); +JSOBJ Object_npyNewArray(void *prv, void* decoder); +JSOBJ Object_npyEndArray(void *prv, JSOBJ obj); +int Object_npyArrayAddItem(void *prv, JSOBJ obj, JSOBJ value); // for more complex dtypes (object and string) fill a standard Python list // and convert to a numpy array when done. -JSOBJ Object_npyNewArrayList(void* decoder); -JSOBJ Object_npyEndArrayList(JSOBJ obj); -int Object_npyArrayListAddItem(JSOBJ obj, JSOBJ value); +JSOBJ Object_npyNewArrayList(void *prv, void* decoder); +JSOBJ Object_npyEndArrayList(void *prv, JSOBJ obj); +int Object_npyArrayListAddItem(void *prv, JSOBJ obj, JSOBJ value); // labelled support, encode keys and values of JS object into separate numpy // arrays -JSOBJ Object_npyNewObject(void* decoder); -JSOBJ Object_npyEndObject(JSOBJ obj); -int Object_npyObjectAddKey(JSOBJ obj, JSOBJ name, JSOBJ value); - +JSOBJ Object_npyNewObject(void *prv, void* decoder); +JSOBJ Object_npyEndObject(void *prv, JSOBJ obj); +int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value); // free the numpy context buffer void Npy_releaseContext(NpyArrContext* npyarr) { - PRINTMARK(); - if (npyarr) + PRINTMARK(); + if (npyarr) + { + if (npyarr->shape.ptr) { - if (npyarr->shape.ptr) - { - PyObject_Free(npyarr->shape.ptr); - } - if (npyarr->dec) - { - npyarr->dec->npyarr = NULL; - npyarr->dec->curdim = 0; - } - Py_XDECREF(npyarr->labels[0]); - Py_XDECREF(npyarr->labels[1]); - Py_XDECREF(npyarr->ret); - PyObject_Free(npyarr); + PyObject_Free(npyarr->shape.ptr); } + if (npyarr->dec) + { + npyarr->dec->npyarr = NULL; + npyarr->dec->curdim = 0; + } + Py_XDECREF(npyarr->labels[0]); + Py_XDECREF(npyarr->labels[1]); + Py_XDECREF(npyarr->ret); + PyObject_Free(npyarr); + } } -JSOBJ Object_npyNewArray(void* _decoder) +JSOBJ Object_npyNewArray(void *prv, void* _decoder) { - NpyArrContext* npyarr; - PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder; - PRINTMARK(); - if (decoder->curdim <= 0) + NpyArrContext* npyarr; + PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder; + PRINTMARK(); + if (decoder->curdim <= 0) + { + // start of array - initialise the context buffer + npyarr = decoder->npyarr = PyObject_Malloc(sizeof(NpyArrContext)); + decoder->npyarr_addr = npyarr; + + if (!npyarr) { - // start of array - initialise the context buffer - npyarr = decoder->npyarr = PyObject_Malloc(sizeof(NpyArrContext)); - decoder->npyarr_addr = npyarr; - - if (!npyarr) - { - PyErr_NoMemory(); - return NULL; - } - - npyarr->dec = decoder; - npyarr->labels[0] = npyarr->labels[1] = NULL; - - npyarr->shape.ptr = PyObject_Malloc(sizeof(npy_intp)*NPY_MAXDIMS); - npyarr->shape.len = 1; - npyarr->ret = NULL; - - npyarr->elsize = 0; - npyarr->elcount = 4; - npyarr->i = 0; - } - else + PyErr_NoMemory(); + return NULL; + } + + npyarr->dec = decoder; + npyarr->labels[0] = npyarr->labels[1] = NULL; + + npyarr->shape.ptr = PyObject_Malloc(sizeof(npy_intp)*NPY_MAXDIMS); + npyarr->shape.len = 1; + npyarr->ret = NULL; + + npyarr->elsize = 0; + npyarr->elcount = 4; + npyarr->i = 0; + } + else + { + // starting a new dimension continue the current array (and reshape after) + npyarr = (NpyArrContext*) decoder->npyarr; + if (decoder->curdim >= npyarr->shape.len) { - // starting a new dimension continue the current array (and reshape after) - npyarr = (NpyArrContext*) decoder->npyarr; - if (decoder->curdim >= npyarr->shape.len) - { - npyarr->shape.len++; - } + npyarr->shape.len++; } + } - npyarr->shape.ptr[decoder->curdim] = 0; - decoder->curdim++; - return npyarr; + npyarr->shape.ptr[decoder->curdim] = 0; + decoder->curdim++; + return npyarr; } PyObject* Npy_returnLabelled(NpyArrContext* npyarr) { - PyObject* ret = npyarr->ret; - npy_intp i; - - if (npyarr->labels[0] || npyarr->labels[1]) + PyObject* ret = npyarr->ret; + npy_intp i; + + if (npyarr->labels[0] || npyarr->labels[1]) + { + // finished decoding, build tuple with values and labels + ret = PyTuple_New(npyarr->shape.len+1); + for (i = 0; i < npyarr->shape.len; i++) { - // finished decoding, build tuple with values and labels - ret = PyTuple_New(npyarr->shape.len+1); - for (i = 0; i < npyarr->shape.len; i++) - { - if (npyarr->labels[i]) - { - PyTuple_SET_ITEM(ret, i+1, npyarr->labels[i]); - npyarr->labels[i] = NULL; - } - else - { - Py_INCREF(Py_None); - PyTuple_SET_ITEM(ret, i+1, Py_None); - } - } - PyTuple_SET_ITEM(ret, 0, npyarr->ret); - } - - return ret; + if (npyarr->labels[i]) + { + PyTuple_SET_ITEM(ret, i+1, npyarr->labels[i]); + npyarr->labels[i] = NULL; + } + else + { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(ret, i+1, Py_None); + } + } + PyTuple_SET_ITEM(ret, 0, npyarr->ret); + } + + return ret; } -JSOBJ Object_npyEndArray(JSOBJ obj) +JSOBJ Object_npyEndArray(void *prv, JSOBJ obj) { - PyObject *ret; - char* new_data; - NpyArrContext* npyarr = (NpyArrContext*) obj; - int emptyType = NPY_DEFAULT_TYPE; - npy_intp i; - PRINTMARK(); - if (!npyarr) - { - return NULL; - } + PyObject *ret; + char* new_data; + NpyArrContext* npyarr = (NpyArrContext*) obj; + int emptyType = NPY_DEFAULT_TYPE; + npy_intp i; + PRINTMARK(); + if (!npyarr) + { + return NULL; + } - ret = npyarr->ret; - i = npyarr->i; + ret = npyarr->ret; + i = npyarr->i; - npyarr->dec->curdim--; + npyarr->dec->curdim--; - if (i == 0 || !npyarr->ret) { - // empty array would not have been initialised so do it now. - if (npyarr->dec->dtype) - { - emptyType = npyarr->dec->dtype->type_num; - } - npyarr->ret = ret = PyArray_EMPTY(npyarr->shape.len, npyarr->shape.ptr, emptyType, 0); - } - else if (npyarr->dec->curdim <= 0) + if (i == 0 || !npyarr->ret) { + // empty array would not have been initialised so do it now. + if (npyarr->dec->dtype) { - // realloc to final size - new_data = PyDataMem_RENEW(PyArray_DATA(ret), i * npyarr->elsize); - if (new_data == NULL) { - PyErr_NoMemory(); - Npy_releaseContext(npyarr); - return NULL; - } - ((PyArrayObject*) ret)->data = (void*) new_data; - // PyArray_BYTES(ret) = new_data; - } - - if (npyarr->dec->curdim <= 0) + emptyType = npyarr->dec->dtype->type_num; + } + npyarr->ret = ret = PyArray_EMPTY(npyarr->shape.len, npyarr->shape.ptr, emptyType, 0); + } + else if (npyarr->dec->curdim <= 0) + { + // realloc to final size + new_data = PyDataMem_RENEW(PyArray_DATA(ret), i * npyarr->elsize); + if (new_data == NULL) { + PyErr_NoMemory(); + Npy_releaseContext(npyarr); + return NULL; + } + ((PyArrayObject*) ret)->data = (void*) new_data; + // PyArray_BYTES(ret) = new_data; + } + + if (npyarr->dec->curdim <= 0) + { + // finished decoding array, reshape if necessary + if (npyarr->shape.len > 1) { - // finished decoding array, reshape if necessary - if (npyarr->shape.len > 1) - { - npyarr->ret = PyArray_Newshape((PyArrayObject*) ret, &npyarr->shape, NPY_ANYORDER); - Py_DECREF(ret); - } + npyarr->ret = PyArray_Newshape((PyArrayObject*) ret, &npyarr->shape, NPY_ANYORDER); + Py_DECREF(ret); + } - ret = Npy_returnLabelled(npyarr); + ret = Npy_returnLabelled(npyarr); - npyarr->ret = NULL; - Npy_releaseContext(npyarr); - } + npyarr->ret = NULL; + Npy_releaseContext(npyarr); + } - return ret; + return ret; } -int Object_npyArrayAddItem(JSOBJ obj, JSOBJ value) +int Object_npyArrayAddItem(void *prv, JSOBJ obj, JSOBJ value) { - PyObject* type; - PyArray_Descr* dtype; - npy_intp i; - char *new_data, *item; - NpyArrContext* npyarr = (NpyArrContext*) obj; - PRINTMARK(); - if (!npyarr) + PyObject* type; + PyArray_Descr* dtype; + npy_intp i; + char *new_data, *item; + NpyArrContext* npyarr = (NpyArrContext*) obj; + PRINTMARK(); + if (!npyarr) + { + return 0; + } + + i = npyarr->i; + + npyarr->shape.ptr[npyarr->dec->curdim-1]++; + + if (PyArray_Check((PyObject*)value)) + { + // multidimensional array, keep decoding values. + return 1; + } + + if (!npyarr->ret) + { + // Array not initialised yet. + // We do it here so we can 'sniff' the data type if none was provided + if (!npyarr->dec->dtype) + { + type = PyObject_Type(value); + if(!PyArray_DescrConverter(type, &dtype)) + { + Py_DECREF(type); + goto fail; + } + Py_INCREF(dtype); + Py_DECREF(type); + } + else { - return 0; + dtype = PyArray_DescrNew(npyarr->dec->dtype); } - i = npyarr->i; + // If it's an object or string then fill a Python list and subsequently + // convert. Otherwise we would need to somehow mess about with + // reference counts when renewing memory. + npyarr->elsize = dtype->elsize; + if (PyDataType_REFCHK(dtype) || npyarr->elsize == 0) + { + Py_XDECREF(dtype); - npyarr->shape.ptr[npyarr->dec->curdim-1]++; + if (npyarr->dec->curdim > 1) + { + PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy"); + goto fail; + } + npyarr->elcount = 0; + npyarr->ret = PyList_New(0); + if (!npyarr->ret) + { + goto fail; + } + ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArrayList; + ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayListAddItem; + ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArrayList; + return Object_npyArrayListAddItem(prv, obj, value); + } + + npyarr->ret = PyArray_NewFromDescr(&PyArray_Type, dtype, 1, + &npyarr->elcount, NULL,NULL, 0, NULL); - if (PyArray_Check((PyObject*)value)) + if (!npyarr->ret) { - // multidimensional array, keep decoding values. - return 1; + goto fail; } + } - if (!npyarr->ret) + if (i >= npyarr->elcount) { + // Grow PyArray_DATA(ret): + // this is similar for the strategy for PyListObject, but we use + // 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ... + if (npyarr->elsize == 0) { - // Array not initialised yet. - // We do it here so we can 'sniff' the data type if none was provided - if (!npyarr->dec->dtype) - { - type = PyObject_Type(value); - if(!PyArray_DescrConverter(type, &dtype)) - { - Py_DECREF(type); - goto fail; - } - Py_INCREF(dtype); - Py_DECREF(type); - } - else - { - dtype = PyArray_DescrNew(npyarr->dec->dtype); - } - - // If it's an object or string then fill a Python list and subsequently - // convert. Otherwise we would need to somehow mess about with - // reference counts when renewing memory. - npyarr->elsize = dtype->elsize; - if (PyDataType_REFCHK(dtype) || npyarr->elsize == 0) - { - Py_XDECREF(dtype); - - if (npyarr->dec->curdim > 1) - { - PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy"); - goto fail; - } - npyarr->elcount = 0; - npyarr->ret = PyList_New(0); - if (!npyarr->ret) - { - goto fail; - } - ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArrayList; - ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayListAddItem; - ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArrayList; - return Object_npyArrayListAddItem(obj, value); - } - - npyarr->ret = PyArray_NewFromDescr(&PyArray_Type, dtype, 1, - &npyarr->elcount, NULL,NULL, 0, NULL); - - if (!npyarr->ret) - { - goto fail; - } + PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy"); + goto fail; } - if (i >= npyarr->elcount) { - // Grow PyArray_DATA(ret): - // this is similar for the strategy for PyListObject, but we use - // 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ... - if (npyarr->elsize == 0) - { - PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy"); - goto fail; - } - - npyarr->elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (npyarr->elcount <= NPY_MAX_INTP/npyarr->elsize) { - new_data = PyDataMem_RENEW(PyArray_DATA(npyarr->ret), npyarr->elcount * npyarr->elsize); - } - else { - PyErr_NoMemory(); - goto fail; - } - ((PyArrayObject*) npyarr->ret)->data = (void*) new_data; - - // PyArray_BYTES(npyarr->ret) = new_data; + npyarr->elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; + if (npyarr->elcount <= NPY_MAX_INTP/npyarr->elsize) { + new_data = PyDataMem_RENEW(PyArray_DATA(npyarr->ret), npyarr->elcount * npyarr->elsize); + } + else { + PyErr_NoMemory(); + goto fail; } + ((PyArrayObject*) npyarr->ret)->data = (void*) new_data; - PyArray_DIMS(npyarr->ret)[0] = i + 1; + // PyArray_BYTES(npyarr->ret) = new_data; + } - if ((item = PyArray_GETPTR1(npyarr->ret, i)) == NULL - || PyArray_SETITEM(npyarr->ret, item, value) == -1) { - goto fail; - } + PyArray_DIMS(npyarr->ret)[0] = i + 1; - Py_DECREF( (PyObject *) value); - npyarr->i++; - return 1; + if ((item = PyArray_GETPTR1(npyarr->ret, i)) == NULL + || PyArray_SETITEM(npyarr->ret, item, value) == -1) { + goto fail; + } + + Py_DECREF( (PyObject *) value); + npyarr->i++; + return 1; fail: - Npy_releaseContext(npyarr); - return 0; + Npy_releaseContext(npyarr); + return 0; } -JSOBJ Object_npyNewArrayList(void* _decoder) +JSOBJ Object_npyNewArrayList(void *prv, void* _decoder) { - PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder; - PRINTMARK(); - PyErr_SetString(PyExc_ValueError, "nesting not supported for object or variable length dtypes"); - Npy_releaseContext(decoder->npyarr); - return NULL; + PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder; + PRINTMARK(); + PyErr_SetString(PyExc_ValueError, "nesting not supported for object or variable length dtypes"); + Npy_releaseContext(decoder->npyarr); + return NULL; } -JSOBJ Object_npyEndArrayList(JSOBJ obj) +JSOBJ Object_npyEndArrayList(void *prv, JSOBJ obj) { - PyObject *list, *ret; - NpyArrContext* npyarr = (NpyArrContext*) obj; - PRINTMARK(); - if (!npyarr) - { - return NULL; - } + PyObject *list, *ret; + NpyArrContext* npyarr = (NpyArrContext*) obj; + PRINTMARK(); + if (!npyarr) + { + return NULL; + } - // convert decoded list to numpy array - list = (PyObject *) npyarr->ret; - npyarr->ret = PyArray_FROM_O(list); + // convert decoded list to numpy array + list = (PyObject *) npyarr->ret; + npyarr->ret = PyArray_FROM_O(list); - ret = Npy_returnLabelled(npyarr); - npyarr->ret = list; + ret = Npy_returnLabelled(npyarr); + npyarr->ret = list; - ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArray; - ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayAddItem; - ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArray; - Npy_releaseContext(npyarr); - return ret; + ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArray; + ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayAddItem; + ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArray; + Npy_releaseContext(npyarr); + return ret; } -int Object_npyArrayListAddItem(JSOBJ obj, JSOBJ value) +int Object_npyArrayListAddItem(void *prv, JSOBJ obj, JSOBJ value) { - NpyArrContext* npyarr = (NpyArrContext*) obj; - PRINTMARK(); - if (!npyarr) - { - return 0; - } - PyList_Append((PyObject*) npyarr->ret, value); - Py_DECREF( (PyObject *) value); - npyarr->elcount++; - return 1; + NpyArrContext* npyarr = (NpyArrContext*) obj; + PRINTMARK(); + if (!npyarr) + { + return 0; + } + PyList_Append((PyObject*) npyarr->ret, value); + Py_DECREF( (PyObject *) value); + npyarr->elcount++; + return 1; } -JSOBJ Object_npyNewObject(void* _decoder) +JSOBJ Object_npyNewObject(void *prv, void* _decoder) { - PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder; - PRINTMARK(); - if (decoder->curdim > 1) - { - PyErr_SetString(PyExc_ValueError, "labels only supported up to 2 dimensions"); - return NULL; - } + PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder; + PRINTMARK(); + if (decoder->curdim > 1) + { + PyErr_SetString(PyExc_ValueError, "labels only supported up to 2 dimensions"); + return NULL; + } - return ((JSONObjectDecoder*)decoder)->newArray(decoder); + return ((JSONObjectDecoder*)decoder)->newArray(prv, decoder); } -JSOBJ Object_npyEndObject(JSOBJ obj) +JSOBJ Object_npyEndObject(void *prv, JSOBJ obj) { - PyObject *list; - npy_intp labelidx; - NpyArrContext* npyarr = (NpyArrContext*) obj; - PRINTMARK(); - if (!npyarr) - { - return NULL; - } + PyObject *list; + npy_intp labelidx; + NpyArrContext* npyarr = (NpyArrContext*) obj; + PRINTMARK(); + if (!npyarr) + { + return NULL; + } - labelidx = npyarr->dec->curdim-1; + labelidx = npyarr->dec->curdim-1; - list = npyarr->labels[labelidx]; - if (list) - { - npyarr->labels[labelidx] = PyArray_FROM_O(list); - Py_DECREF(list); - } + list = npyarr->labels[labelidx]; + if (list) + { + npyarr->labels[labelidx] = PyArray_FROM_O(list); + Py_DECREF(list); + } - return (PyObject*) ((JSONObjectDecoder*)npyarr->dec)->endArray(obj); + return (PyObject*) ((JSONObjectDecoder*)npyarr->dec)->endArray(prv, obj); } -int Object_npyObjectAddKey(JSOBJ obj, JSOBJ name, JSOBJ value) +int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) { - PyObject *label; - npy_intp labelidx; - // add key to label array, value to values array - NpyArrContext* npyarr = (NpyArrContext*) obj; - PRINTMARK(); - if (!npyarr) - { - return 0; - } - - label = (PyObject*) name; - labelidx = npyarr->dec->curdim-1; - - if (!npyarr->labels[labelidx]) - { - npyarr->labels[labelidx] = PyList_New(0); - } - - // only fill label array once, assumes all column labels are the same - // for 2-dimensional arrays. - if (PyList_GET_SIZE(npyarr->labels[labelidx]) <= npyarr->elcount) - { - PyList_Append(npyarr->labels[labelidx], label); - } - - if(((JSONObjectDecoder*)npyarr->dec)->arrayAddItem(obj, value)) - { - Py_DECREF(label); - return 1; - } + PyObject *label; + npy_intp labelidx; + // add key to label array, value to values array + NpyArrContext* npyarr = (NpyArrContext*) obj; + PRINTMARK(); + if (!npyarr) + { return 0; + } + + label = (PyObject*) name; + labelidx = npyarr->dec->curdim-1; + + if (!npyarr->labels[labelidx]) + { + npyarr->labels[labelidx] = PyList_New(0); + } + + // only fill label array once, assumes all column labels are the same + // for 2-dimensional arrays. + if (PyList_GET_SIZE(npyarr->labels[labelidx]) <= npyarr->elcount) + { + PyList_Append(npyarr->labels[labelidx], label); + } + + if(((JSONObjectDecoder*)npyarr->dec)->arrayAddItem(prv, obj, value)) + { + Py_DECREF(label); + return 1; + } + return 0; } -int Object_objectAddKey(JSOBJ obj, JSOBJ name, JSOBJ value) +int Object_objectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) { - PyDict_SetItem (obj, name, value); - Py_DECREF( (PyObject *) name); - Py_DECREF( (PyObject *) value); - return 1; + PyDict_SetItem (obj, name, value); + Py_DECREF( (PyObject *) name); + Py_DECREF( (PyObject *) value); + return 1; } -int Object_arrayAddItem(JSOBJ obj, JSOBJ value) +int Object_arrayAddItem(void *prv, JSOBJ obj, JSOBJ value) { - PyList_Append(obj, value); - Py_DECREF( (PyObject *) value); - return 1; + PyList_Append(obj, value); + Py_DECREF( (PyObject *) value); + return 1; } -JSOBJ Object_newString(wchar_t *start, wchar_t *end) +JSOBJ Object_newString(void *prv, wchar_t *start, wchar_t *end) { - return PyUnicode_FromWideChar (start, (end - start)); + return PyUnicode_FromWideChar (start, (end - start)); } -JSOBJ Object_newTrue(void) +JSOBJ Object_newTrue(void *prv) { - Py_RETURN_TRUE; + Py_RETURN_TRUE; } -JSOBJ Object_newFalse(void) +JSOBJ Object_newFalse(void *prv) { - Py_RETURN_FALSE; + Py_RETURN_FALSE; } -JSOBJ Object_newNull(void) +JSOBJ Object_newNull(void *prv) { - Py_RETURN_NONE; + Py_RETURN_NONE; } -JSOBJ Object_newObject(void* decoder) +JSOBJ Object_newObject(void *prv, void* decoder) { - return PyDict_New(); + return PyDict_New(); } -JSOBJ Object_endObject(JSOBJ obj) +JSOBJ Object_endObject(void *prv, JSOBJ obj) { - return obj; + return obj; } -JSOBJ Object_newArray(void* decoder) +JSOBJ Object_newArray(void *prv, void* decoder) { - return PyList_New(0); + return PyList_New(0); } -JSOBJ Object_endArray(JSOBJ obj) +JSOBJ Object_endArray(void *prv, JSOBJ obj) { - return obj; + return obj; } -JSOBJ Object_newInteger(JSINT32 value) +JSOBJ Object_newInteger(void *prv, JSINT32 value) { - return PyInt_FromLong( (long) value); + return PyInt_FromLong( (long) value); } -JSOBJ Object_newLong(JSINT64 value) +JSOBJ Object_newLong(void *prv, JSINT64 value) { - return PyLong_FromLongLong (value); + return PyLong_FromLongLong (value); } -JSOBJ Object_newDouble(double value) +JSOBJ Object_newDouble(void *prv, double value) { - return PyFloat_FromDouble(value); + return PyFloat_FromDouble(value); } -static void Object_releaseObject(JSOBJ obj, void* _decoder) +static void Object_releaseObject(void *prv, JSOBJ obj, void* _decoder) { - PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder; - if (obj != decoder->npyarr_addr) - { - Py_XDECREF( ((PyObject *)obj)); - } + PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder; + if (obj != decoder->npyarr_addr) + { + Py_XDECREF( ((PyObject *)obj)); + } } +static char *g_kwlist[] = {"obj", "precise_float", "numpy", "labelled", "dtype", NULL}; PyObject* JSONToObj(PyObject* self, PyObject *args, PyObject *kwargs) { - PyObject *ret; - PyObject *sarg; - JSONObjectDecoder *decoder; - PyObjectDecoder pyDecoder; - PyArray_Descr *dtype = NULL; - static char *kwlist[] = { "obj", "numpy", "labelled", "dtype", NULL}; - int numpy = 0, labelled = 0, decref = 0; - // PRINTMARK(); - - JSONObjectDecoder dec = { - Object_newString, - Object_objectAddKey, - Object_arrayAddItem, - Object_newTrue, - Object_newFalse, - Object_newNull, - Object_newObject, - Object_endObject, - Object_newArray, - Object_endArray, - Object_newInteger, - Object_newLong, - Object_newDouble, - Object_releaseObject, - PyObject_Malloc, - PyObject_Free, - PyObject_Realloc, - }; - pyDecoder.dec = dec; - pyDecoder.curdim = 0; - pyDecoder.npyarr = NULL; - pyDecoder.npyarr_addr = NULL; - - decoder = (JSONObjectDecoder*) &pyDecoder; - - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|iiO&", kwlist, &sarg, &numpy, &labelled, PyArray_DescrConverter2, &dtype)) + PyObject *ret; + PyObject *sarg; + PyObject *arg; + PyObject *opreciseFloat = NULL; + JSONObjectDecoder *decoder; + PyObjectDecoder pyDecoder; + PyArray_Descr *dtype = NULL; + int numpy = 0, labelled = 0; + + JSONObjectDecoder dec = + { + Object_newString, + Object_objectAddKey, + Object_arrayAddItem, + Object_newTrue, + Object_newFalse, + Object_newNull, + Object_newObject, + Object_endObject, + Object_newArray, + Object_endArray, + Object_newInteger, + Object_newLong, + Object_newDouble, + Object_releaseObject, + PyObject_Malloc, + PyObject_Free, + PyObject_Realloc + }; + + dec.preciseFloat = 0; + dec.prv = NULL; + + pyDecoder.dec = dec; + pyDecoder.curdim = 0; + pyDecoder.npyarr = NULL; + pyDecoder.npyarr_addr = NULL; + + decoder = (JSONObjectDecoder*) &pyDecoder; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OiiO&", g_kwlist, &arg, &opreciseFloat, &numpy, &labelled, PyArray_DescrConverter2, &dtype)) + { + Npy_releaseContext(pyDecoder.npyarr); + return NULL; + } + + if (opreciseFloat && PyObject_IsTrue(opreciseFloat)) + { + decoder->preciseFloat = 1; + } + + if (PyString_Check(arg)) + { + sarg = arg; + } + else + if (PyUnicode_Check(arg)) + { + sarg = PyUnicode_AsUTF8String(arg); + if (sarg == NULL) { - Npy_releaseContext(pyDecoder.npyarr); - return NULL; + //Exception raised above us by codec according to docs + return NULL; } + } + else + { + PyErr_Format(PyExc_TypeError, "Expected String or Unicode"); + return NULL; + } - if (PyUnicode_Check(sarg)) - { - sarg = PyUnicode_AsUTF8String(sarg); - if (sarg == NULL) - { - //Exception raised above us by codec according to docs - return NULL; - } - decref = 1; - } - else - if (!PyString_Check(sarg)) - { - PyErr_Format(PyExc_TypeError, "Expected String or Unicode"); - return NULL; - } + decoder->errorStr = NULL; + decoder->errorOffset = NULL; - if (numpy) + if (numpy) + { + pyDecoder.dtype = dtype; + decoder->newArray = Object_npyNewArray; + decoder->endArray = Object_npyEndArray; + decoder->arrayAddItem = Object_npyArrayAddItem; + + if (labelled) { - pyDecoder.dtype = dtype; - decoder->newArray = Object_npyNewArray; - decoder->endArray = Object_npyEndArray; - decoder->arrayAddItem = Object_npyArrayAddItem; - - if (labelled) - { - decoder->newObject = Object_npyNewObject; - decoder->endObject = Object_npyEndObject; - decoder->objectAddKey = Object_npyObjectAddKey; - } + decoder->newObject = Object_npyNewObject; + decoder->endObject = Object_npyEndObject; + decoder->objectAddKey = Object_npyObjectAddKey; } + } - decoder->errorStr = NULL; - decoder->errorOffset = NULL; + ret = JSON_DecodeObject(decoder, PyString_AS_STRING(sarg), PyString_GET_SIZE(sarg)); - PRINTMARK(); - ret = JSON_DecodeObject(decoder, PyString_AS_STRING(sarg), PyString_GET_SIZE(sarg)); - PRINTMARK(); + if (sarg != arg) + { + Py_DECREF(sarg); + } - if (decref) + if (PyErr_Occurred()) + { + if (ret) { - Py_DECREF(sarg); + Py_DECREF( (PyObject *) ret); } + Npy_releaseContext(pyDecoder.npyarr); + return NULL; + } - if (PyErr_Occurred()) - { - return NULL; - } + if (decoder->errorStr) + { + /* + FIXME: It's possible to give a much nicer error message here with actual failing element in input etc*/ - if (decoder->errorStr) - { - /*FIXME: It's possible to give a much nicer error message here with actual failing element in input etc*/ - PyErr_Format (PyExc_ValueError, "%s", decoder->errorStr); - Py_XDECREF( (PyObject *) ret); - Npy_releaseContext(pyDecoder.npyarr); + PyErr_Format (PyExc_ValueError, "%s", decoder->errorStr); - return NULL; + if (ret) + { + Py_DECREF( (PyObject *) ret); } + Npy_releaseContext(pyDecoder.npyarr); - return ret; + return NULL; + } + + return ret; } PyObject* JSONFileToObj(PyObject* self, PyObject *args, PyObject *kwargs) { - PyObject *file; - PyObject *read; - PyObject *string; - PyObject *result; - PyObject *argtuple; - - if (!PyArg_ParseTuple (args, "O", &file)) { - return NULL; - } - - if (!PyObject_HasAttrString (file, "read")) - { - PyErr_Format (PyExc_TypeError, "expected file"); - return NULL; - } + PyObject *read; + PyObject *string; + PyObject *result; + PyObject *file = NULL; + PyObject *argtuple; + + if (!PyArg_ParseTuple (args, "O", &file)) + { + return NULL; + } - read = PyObject_GetAttrString (file, "read"); + if (!PyObject_HasAttrString (file, "read")) + { + PyErr_Format (PyExc_TypeError, "expected file"); + return NULL; + } - if (!PyCallable_Check (read)) { - Py_XDECREF(read); - PyErr_Format (PyExc_TypeError, "expected file"); - return NULL; - } + read = PyObject_GetAttrString (file, "read"); - string = PyObject_CallObject (read, NULL); + if (!PyCallable_Check (read)) { Py_XDECREF(read); + PyErr_Format (PyExc_TypeError, "expected file"); + return NULL; + } - if (string == NULL) - { - return NULL; - } + string = PyObject_CallObject (read, NULL); + Py_XDECREF(read); - argtuple = PyTuple_Pack(1, string); + if (string == NULL) + { + return NULL; + } - result = JSONToObj (self, argtuple, kwargs); - Py_XDECREF(string); - Py_DECREF(argtuple); + argtuple = PyTuple_Pack(1, string); - if (result == NULL) { - return NULL; - } + result = JSONToObj (self, argtuple, kwargs); - return result; -} + Py_XDECREF(argtuple); + Py_XDECREF(string); + if (result == NULL) { + return NULL; + } + + return result; +} diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c index 4fdd8dc91ab04..89d3c203fbb7d 100644 --- a/pandas/src/ujson/python/objToJSON.c +++ b/pandas/src/ujson/python/objToJSON.c @@ -1,3 +1,39 @@ +/* +Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the ESN Social Software AB nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) +http://code.google.com/p/stringencoders/ +Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. + +Numeric decoder derived from from TCL library +http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +* Copyright (c) 1988-1993 The Regents of the University of California. +* Copyright (c) 1994 Sun Microsystems, Inc. +*/ #define PY_ARRAY_UNIQUE_SYMBOL UJSON_NUMPY #include "py_defines.h" @@ -8,6 +44,9 @@ #include <datetime.h> #include <ultrajson.h> +#define EPOCH_ORD 719163 +static PyObject* type_decimal; + #define NPY_JSON_BUFSIZE 32768 static PyObject* cls_dataframe; @@ -16,55 +55,54 @@ static PyObject* cls_index; typedef void *(*PFN_PyTypeToJSON)(JSOBJ obj, JSONTypeContext *ti, void *outValue, size_t *_outLen); - #if (PY_VERSION_HEX < 0x02050000) typedef ssize_t Py_ssize_t; #endif typedef struct __NpyArrContext { - PyObject *array; - char* dataptr; - int was_datetime64; - int curdim; // current dimension in array's order - int stridedim; // dimension we are striding over - int inc; // stride dimension increment (+/- 1) - npy_intp dim; - npy_intp stride; - npy_intp ndim; - npy_intp index[NPY_MAXDIMS]; - PyArray_GetItemFunc* getitem; - - char** rowLabels; - char** columnLabels; + PyObject *array; + char* dataptr; + int was_datetime64; + int curdim; // current dimension in array's order + int stridedim; // dimension we are striding over + int inc; // stride dimension increment (+/- 1) + npy_intp dim; + npy_intp stride; + npy_intp ndim; + npy_intp index[NPY_MAXDIMS]; + PyArray_GetItemFunc* getitem; + + char** rowLabels; + char** columnLabels; } NpyArrContext; typedef struct __TypeContext { - JSPFN_ITERBEGIN iterBegin; - JSPFN_ITEREND iterEnd; - JSPFN_ITERNEXT iterNext; - JSPFN_ITERGETNAME iterGetName; - JSPFN_ITERGETVALUE iterGetValue; - PFN_PyTypeToJSON PyTypeToJSON; - PyObject *newObj; - PyObject *dictObj; - Py_ssize_t index; - Py_ssize_t size; - PyObject *itemValue; - PyObject *itemName; - PyObject *attrList; - char *citemName; - - JSINT64 longValue; - - NpyArrContext *npyarr; - int transpose; - char** rowLabels; - char** columnLabels; - npy_intp rowLabelsLen; - npy_intp columnLabelsLen; - + JSPFN_ITERBEGIN iterBegin; + JSPFN_ITEREND iterEnd; + JSPFN_ITERNEXT iterNext; + JSPFN_ITERGETNAME iterGetName; + JSPFN_ITERGETVALUE iterGetValue; + PFN_PyTypeToJSON PyTypeToJSON; + PyObject *newObj; + PyObject *dictObj; + Py_ssize_t index; + Py_ssize_t size; + PyObject *itemValue; + PyObject *itemName; + PyObject *attrList; + PyObject *iterator; + + JSINT64 longValue; + + char *citemName; + NpyArrContext *npyarr; + int transpose; + char** rowLabels; + char** columnLabels; + npy_intp rowLabelsLen; + npy_intp columnLabelsLen; } TypeContext; typedef struct __PyObjectEncoder @@ -83,18 +121,18 @@ typedef struct __PyObjectEncoder struct PyDictIterState { - PyObject *keys; - size_t i; - size_t sz; + PyObject *keys; + size_t i; + size_t sz; }; enum PANDAS_FORMAT { - SPLIT, - RECORDS, - INDEX, - COLUMNS, - VALUES + SPLIT, + RECORDS, + INDEX, + COLUMNS, + VALUES }; //#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__) @@ -106,40 +144,45 @@ void initObjToJSON(void) int initObjToJSON(void) #endif { - PyObject *mod_frame; - PyDateTime_IMPORT; + PyObject *mod_frame; + PyObject* mod_decimal = PyImport_ImportModule("decimal"); + type_decimal = PyObject_GetAttrString(mod_decimal, "Decimal"); + Py_INCREF(type_decimal); + Py_DECREF(mod_decimal); - mod_frame = PyImport_ImportModule("pandas.core.frame"); - if (mod_frame) - { - cls_dataframe = PyObject_GetAttrString(mod_frame, "DataFrame"); - cls_index = PyObject_GetAttrString(mod_frame, "Index"); - cls_series = PyObject_GetAttrString(mod_frame, "Series"); - Py_DECREF(mod_frame); - } + PyDateTime_IMPORT; + + mod_frame = PyImport_ImportModule("pandas.core.frame"); + if (mod_frame) + { + cls_dataframe = PyObject_GetAttrString(mod_frame, "DataFrame"); + cls_index = PyObject_GetAttrString(mod_frame, "Index"); + cls_series = PyObject_GetAttrString(mod_frame, "Series"); + Py_DECREF(mod_frame); + } - /* Initialise numpy API */ - import_array(); + /* Initialise numpy API */ + import_array(); } static void *PyIntToINT32(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - PyObject *obj = (PyObject *) _obj; - *((JSINT32 *) outValue) = PyInt_AS_LONG (obj); - return NULL; + PyObject *obj = (PyObject *) _obj; + *((JSINT32 *) outValue) = PyInt_AS_LONG (obj); + return NULL; } static void *PyIntToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - PyObject *obj = (PyObject *) _obj; - *((JSINT64 *) outValue) = PyInt_AS_LONG (obj); - return NULL; + PyObject *obj = (PyObject *) _obj; + *((JSINT64 *) outValue) = PyInt_AS_LONG (obj); + return NULL; } static void *PyLongToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - *((JSINT64 *) outValue) = GET_TC(tc)->longValue; - return NULL; + *((JSINT64 *) outValue) = GET_TC(tc)->longValue; + return NULL; } static void *NpyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) @@ -151,27 +194,27 @@ static void *NpyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue, s static void *PyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - PyObject *obj = (PyObject *) _obj; - *((double *) outValue) = PyFloat_AS_DOUBLE (obj); - return NULL; + PyObject *obj = (PyObject *) _obj; + *((double *) outValue) = PyFloat_AsDouble (obj); + return NULL; } static void *PyStringToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - PyObject *obj = (PyObject *) _obj; - *_outLen = PyString_GET_SIZE(obj); - return PyString_AS_STRING(obj); + PyObject *obj = (PyObject *) _obj; + *_outLen = PyString_GET_SIZE(obj); + return PyString_AS_STRING(obj); } static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - PyObject *obj = (PyObject *) _obj; - PyObject *newObj = PyUnicode_AsUTF8String (obj); + PyObject *obj = (PyObject *) _obj; + PyObject *newObj = PyUnicode_EncodeUTF8 (PyUnicode_AS_UNICODE(obj), PyUnicode_GET_SIZE(obj), NULL); - GET_TC(tc)->newObj = newObj; + GET_TC(tc)->newObj = newObj; - *_outLen = PyString_GET_SIZE(newObj); - return PyString_AS_STRING(newObj); + *_outLen = PyString_GET_SIZE(newObj); + return PyString_AS_STRING(newObj); } static void *NpyDateTimeToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) @@ -183,32 +226,32 @@ static void *NpyDateTimeToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, static void *PyDateTimeToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - pandas_datetimestruct dts; - PyObject *obj = (PyObject *) _obj; + pandas_datetimestruct dts; + PyObject *obj = (PyObject *) _obj; - dts.year = PyDateTime_GET_YEAR(obj); - dts.month = PyDateTime_GET_MONTH(obj); - dts.day = PyDateTime_GET_DAY(obj); - dts.hour = PyDateTime_DATE_GET_HOUR(obj); - dts.min = PyDateTime_DATE_GET_MINUTE(obj); - dts.sec = PyDateTime_DATE_GET_SECOND(obj); - dts.us = PyDateTime_DATE_GET_MICROSECOND(obj); - dts.ps = dts.as = 0; - *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts); - return NULL; + dts.year = PyDateTime_GET_YEAR(obj); + dts.month = PyDateTime_GET_MONTH(obj); + dts.day = PyDateTime_GET_DAY(obj); + dts.hour = PyDateTime_DATE_GET_HOUR(obj); + dts.min = PyDateTime_DATE_GET_MINUTE(obj); + dts.sec = PyDateTime_DATE_GET_SECOND(obj); + dts.us = PyDateTime_DATE_GET_MICROSECOND(obj); + dts.ps = dts.as = 0; + *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts); + return NULL; } static void *PyDateToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - pandas_datetimestruct dts; - PyObject *obj = (PyObject *) _obj; + pandas_datetimestruct dts; + PyObject *obj = (PyObject *) _obj; - dts.year = PyDateTime_GET_YEAR(obj); - dts.month = PyDateTime_GET_MONTH(obj); - dts.day = PyDateTime_GET_DAY(obj); - dts.hour = dts.min = dts.sec = dts.ps = dts.as = 0; - *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts); - return NULL; + dts.year = PyDateTime_GET_YEAR(obj); + dts.month = PyDateTime_GET_MONTH(obj); + dts.day = PyDateTime_GET_DAY(obj); + dts.hour = dts.min = dts.sec = dts.ps = dts.as = 0; + *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts); + return NULL; } //============================================================================= @@ -216,200 +259,200 @@ static void *PyDateToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size //============================================================================= int NpyArr_iterNextNone(JSOBJ _obj, JSONTypeContext *tc) { - return 0; + return 0; } void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { - PyArrayObject *obj; - PyArray_Descr *dtype; - NpyArrContext *npyarr; + PyArrayObject *obj; + PyArray_Descr *dtype; + NpyArrContext *npyarr; + + if (GET_TC(tc)->newObj) + { + obj = (PyArrayObject *) GET_TC(tc)->newObj; + } + else + { + obj = (PyArrayObject *) _obj; + } + + if (PyArray_SIZE(obj) > 0) + { + PRINTMARK(); + npyarr = PyObject_Malloc(sizeof(NpyArrContext)); + GET_TC(tc)->npyarr = npyarr; - if (GET_TC(tc)->newObj) + if (!npyarr) { - obj = (PyArrayObject *) GET_TC(tc)->newObj; - } - else - { - obj = (PyArrayObject *) _obj; + PyErr_NoMemory(); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + return; } - if (PyArray_SIZE(obj) > 0) - { - PRINTMARK(); - npyarr = PyObject_Malloc(sizeof(NpyArrContext)); - GET_TC(tc)->npyarr = npyarr; - - if (!npyarr) - { - PyErr_NoMemory(); - GET_TC(tc)->iterNext = NpyArr_iterNextNone; - return; - } - - // uber hack to support datetime64[ns] arrays - if (PyArray_DESCR(obj)->type_num == NPY_DATETIME) { - npyarr->was_datetime64 = 1; - dtype = PyArray_DescrFromType(NPY_INT64); - obj = (PyArrayObject *) PyArray_CastToType(obj, dtype, 0); - } else { - npyarr->was_datetime64 = 0; - } + // uber hack to support datetime64[ns] arrays + if (PyArray_DESCR(obj)->type_num == NPY_DATETIME) { + npyarr->was_datetime64 = 1; + dtype = PyArray_DescrFromType(NPY_INT64); + obj = (PyArrayObject *) PyArray_CastToType(obj, dtype, 0); + } else { + npyarr->was_datetime64 = 0; + } - npyarr->array = (PyObject*) obj; - npyarr->getitem = (PyArray_GetItemFunc*) PyArray_DESCR(obj)->f->getitem; - npyarr->dataptr = PyArray_DATA(obj); - npyarr->ndim = PyArray_NDIM(obj) - 1; - npyarr->curdim = 0; + npyarr->array = (PyObject*) obj; + npyarr->getitem = (PyArray_GetItemFunc*) PyArray_DESCR(obj)->f->getitem; + npyarr->dataptr = PyArray_DATA(obj); + npyarr->ndim = PyArray_NDIM(obj) - 1; + npyarr->curdim = 0; - if (GET_TC(tc)->transpose) - { - npyarr->dim = PyArray_DIM(obj, npyarr->ndim); - npyarr->stride = PyArray_STRIDE(obj, npyarr->ndim); - npyarr->stridedim = npyarr->ndim; - npyarr->index[npyarr->ndim] = 0; - npyarr->inc = -1; - } - else - { - npyarr->dim = PyArray_DIM(obj, 0); - npyarr->stride = PyArray_STRIDE(obj, 0); - npyarr->stridedim = 0; - npyarr->index[0] = 0; - npyarr->inc = 1; - } - - npyarr->columnLabels = GET_TC(tc)->columnLabels; - npyarr->rowLabels = GET_TC(tc)->rowLabels; + if (GET_TC(tc)->transpose) + { + npyarr->dim = PyArray_DIM(obj, npyarr->ndim); + npyarr->stride = PyArray_STRIDE(obj, npyarr->ndim); + npyarr->stridedim = npyarr->ndim; + npyarr->index[npyarr->ndim] = 0; + npyarr->inc = -1; } else { - GET_TC(tc)->iterNext = NpyArr_iterNextNone; + npyarr->dim = PyArray_DIM(obj, 0); + npyarr->stride = PyArray_STRIDE(obj, 0); + npyarr->stridedim = 0; + npyarr->index[0] = 0; + npyarr->inc = 1; } - PRINTMARK(); + + npyarr->columnLabels = GET_TC(tc)->columnLabels; + npyarr->rowLabels = GET_TC(tc)->rowLabels; + } + else + { + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + } + PRINTMARK(); } void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc) { - NpyArrContext *npyarr = GET_TC(tc)->npyarr; - - if (npyarr) - { - if (npyarr->was_datetime64) { - Py_XDECREF(npyarr->array); - } + NpyArrContext *npyarr = GET_TC(tc)->npyarr; - if (GET_TC(tc)->itemValue != npyarr->array) - { - Py_XDECREF(GET_TC(tc)->itemValue); - } - GET_TC(tc)->itemValue = NULL; + if (npyarr) + { + if (npyarr->was_datetime64) { + Py_XDECREF(npyarr->array); + } - PyObject_Free(npyarr); + if (GET_TC(tc)->itemValue != npyarr->array) + { + Py_XDECREF(GET_TC(tc)->itemValue); } - PRINTMARK(); + GET_TC(tc)->itemValue = NULL; + + PyObject_Free(npyarr); + } + PRINTMARK(); } void NpyArrPassThru_iterBegin(JSOBJ obj, JSONTypeContext *tc) { - PRINTMARK(); + PRINTMARK(); } void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) { - NpyArrContext* npyarr; - PRINTMARK(); - // finished this dimension, reset the data pointer - npyarr = GET_TC(tc)->npyarr; - npyarr->curdim--; - npyarr->dataptr -= npyarr->stride * npyarr->index[npyarr->stridedim]; - npyarr->stridedim -= npyarr->inc; - npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim); - npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim); - npyarr->dataptr += npyarr->stride; - - if (GET_TC(tc)->itemValue != npyarr->array) - { - Py_XDECREF(GET_TC(tc)->itemValue); - GET_TC(tc)->itemValue = NULL; - } + NpyArrContext* npyarr; + PRINTMARK(); + // finished this dimension, reset the data pointer + npyarr = GET_TC(tc)->npyarr; + npyarr->curdim--; + npyarr->dataptr -= npyarr->stride * npyarr->index[npyarr->stridedim]; + npyarr->stridedim -= npyarr->inc; + npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim); + npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim); + npyarr->dataptr += npyarr->stride; + + if (GET_TC(tc)->itemValue != npyarr->array) + { + Py_XDECREF(GET_TC(tc)->itemValue); + GET_TC(tc)->itemValue = NULL; + } } int NpyArr_iterNextItem(JSOBJ _obj, JSONTypeContext *tc) { - NpyArrContext* npyarr; - PRINTMARK(); - npyarr = GET_TC(tc)->npyarr; + NpyArrContext* npyarr; + PRINTMARK(); + npyarr = GET_TC(tc)->npyarr; - if (GET_TC(tc)->itemValue != npyarr->array) - { - Py_XDECREF(GET_TC(tc)->itemValue); - GET_TC(tc)->itemValue = NULL; - } + if (GET_TC(tc)->itemValue != npyarr->array) + { + Py_XDECREF(GET_TC(tc)->itemValue); + GET_TC(tc)->itemValue = NULL; + } - if (npyarr->index[npyarr->stridedim] >= npyarr->dim) - { - return 0; - } + if (npyarr->index[npyarr->stridedim] >= npyarr->dim) + { + return 0; + } - GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array); + GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array); - npyarr->dataptr += npyarr->stride; - npyarr->index[npyarr->stridedim]++; - return 1; + npyarr->dataptr += npyarr->stride; + npyarr->index[npyarr->stridedim]++; + return 1; } int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) { - NpyArrContext* npyarr; - PRINTMARK(); - npyarr = GET_TC(tc)->npyarr; + NpyArrContext* npyarr; + PRINTMARK(); + npyarr = GET_TC(tc)->npyarr; - if (npyarr->curdim >= npyarr->ndim || npyarr->index[npyarr->stridedim] >= npyarr->dim) - { - // innermost dimension, start retrieving item values - GET_TC(tc)->iterNext = NpyArr_iterNextItem; - return NpyArr_iterNextItem(_obj, tc); - } + if (npyarr->curdim >= npyarr->ndim || npyarr->index[npyarr->stridedim] >= npyarr->dim) + { + // innermost dimension, start retrieving item values + GET_TC(tc)->iterNext = NpyArr_iterNextItem; + return NpyArr_iterNextItem(_obj, tc); + } - // dig a dimension deeper - npyarr->index[npyarr->stridedim]++; + // dig a dimension deeper + npyarr->index[npyarr->stridedim]++; - npyarr->curdim++; - npyarr->stridedim += npyarr->inc; - npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim); - npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim); - npyarr->index[npyarr->stridedim] = 0; + npyarr->curdim++; + npyarr->stridedim += npyarr->inc; + npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim); + npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim); + npyarr->index[npyarr->stridedim] = 0; - ((PyObjectEncoder*) tc->encoder)->npyCtxtPassthru = npyarr; - GET_TC(tc)->itemValue = npyarr->array; - return 1; + ((PyObjectEncoder*) tc->encoder)->npyCtxtPassthru = npyarr; + GET_TC(tc)->itemValue = npyarr->array; + return 1; } JSOBJ NpyArr_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { - PRINTMARK(); - return GET_TC(tc)->itemValue; + PRINTMARK(); + return GET_TC(tc)->itemValue; } char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { - NpyArrContext* npyarr; - npy_intp idx; - PRINTMARK(); - npyarr = GET_TC(tc)->npyarr; - if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) - { - idx = npyarr->index[npyarr->stridedim] - 1; - *outLen = strlen(npyarr->columnLabels[idx]); - return npyarr->columnLabels[idx]; - } - else - { - idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1; - *outLen = strlen(npyarr->rowLabels[idx]); - return npyarr->rowLabels[idx]; - } + NpyArrContext* npyarr; + npy_intp idx; + PRINTMARK(); + npyarr = GET_TC(tc)->npyarr; + if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) + { + idx = npyarr->index[npyarr->stridedim] - 1; + *outLen = strlen(npyarr->columnLabels[idx]); + return npyarr->columnLabels[idx]; + } + else + { + idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1; + *outLen = strlen(npyarr->rowLabels[idx]); + return npyarr->rowLabels[idx]; + } } //============================================================================= @@ -418,25 +461,25 @@ char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) //============================================================================= void Tuple_iterBegin(JSOBJ obj, JSONTypeContext *tc) { - GET_TC(tc)->index = 0; - GET_TC(tc)->size = PyTuple_GET_SIZE( (PyObject *) obj); - GET_TC(tc)->itemValue = NULL; + GET_TC(tc)->index = 0; + GET_TC(tc)->size = PyTuple_GET_SIZE( (PyObject *) obj); + GET_TC(tc)->itemValue = NULL; } int Tuple_iterNext(JSOBJ obj, JSONTypeContext *tc) { - PyObject *item; + PyObject *item; - if (GET_TC(tc)->index >= GET_TC(tc)->size) - { - return 0; - } + if (GET_TC(tc)->index >= GET_TC(tc)->size) + { + return 0; + } - item = PyTuple_GET_ITEM (obj, GET_TC(tc)->index); + item = PyTuple_GET_ITEM (obj, GET_TC(tc)->index); - GET_TC(tc)->itemValue = item; - GET_TC(tc)->index ++; - return 1; + GET_TC(tc)->itemValue = item; + GET_TC(tc)->index ++; + return 1; } void Tuple_iterEnd(JSOBJ obj, JSONTypeContext *tc) @@ -445,12 +488,68 @@ void Tuple_iterEnd(JSOBJ obj, JSONTypeContext *tc) JSOBJ Tuple_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { - return GET_TC(tc)->itemValue; + return GET_TC(tc)->itemValue; } char *Tuple_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { - return NULL; + return NULL; +} + +//============================================================================= +// Iterator iteration functions +// itemValue is borrowed reference, no ref counting +//============================================================================= +void Iter_iterBegin(JSOBJ obj, JSONTypeContext *tc) +{ + GET_TC(tc)->itemValue = NULL; + GET_TC(tc)->iterator = PyObject_GetIter(obj); +} + +int Iter_iterNext(JSOBJ obj, JSONTypeContext *tc) +{ + PyObject *item; + + if (GET_TC(tc)->itemValue) + { + Py_DECREF(GET_TC(tc)->itemValue); + GET_TC(tc)->itemValue = NULL; + } + + item = PyIter_Next(GET_TC(tc)->iterator); + + if (item == NULL) + { + return 0; + } + + GET_TC(tc)->itemValue = item; + return 1; +} + +void Iter_iterEnd(JSOBJ obj, JSONTypeContext *tc) +{ + if (GET_TC(tc)->itemValue) + { + Py_DECREF(GET_TC(tc)->itemValue); + GET_TC(tc)->itemValue = NULL; + } + + if (GET_TC(tc)->iterator) + { + Py_DECREF(GET_TC(tc)->iterator); + GET_TC(tc)->iterator = NULL; + } +} + +JSOBJ Iter_iterGetValue(JSOBJ obj, JSONTypeContext *tc) +{ + return GET_TC(tc)->itemValue; +} + +char *Iter_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) +{ + return NULL; } //============================================================================= @@ -460,97 +559,84 @@ char *Tuple_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) //============================================================================= void Dir_iterBegin(JSOBJ obj, JSONTypeContext *tc) { - GET_TC(tc)->attrList = PyObject_Dir(obj); - GET_TC(tc)->index = 0; - GET_TC(tc)->size = PyList_GET_SIZE(GET_TC(tc)->attrList); - PRINTMARK(); + GET_TC(tc)->attrList = PyObject_Dir(obj); + GET_TC(tc)->index = 0; + GET_TC(tc)->size = PyList_GET_SIZE(GET_TC(tc)->attrList); + PRINTMARK(); } void Dir_iterEnd(JSOBJ obj, JSONTypeContext *tc) { - if (GET_TC(tc)->itemValue) - { - Py_DECREF(GET_TC(tc)->itemValue); - GET_TC(tc)->itemValue = NULL; - } + if (GET_TC(tc)->itemValue) + { + Py_DECREF(GET_TC(tc)->itemValue); + GET_TC(tc)->itemValue = NULL; + } - if (GET_TC(tc)->itemName) - { - Py_DECREF(GET_TC(tc)->itemName); - GET_TC(tc)->itemName = NULL; - } + if (GET_TC(tc)->itemName) + { + Py_DECREF(GET_TC(tc)->itemName); + GET_TC(tc)->itemName = NULL; + } - Py_DECREF( (PyObject *) GET_TC(tc)->attrList); - PRINTMARK(); + Py_DECREF( (PyObject *) GET_TC(tc)->attrList); + PRINTMARK(); } int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) { - PyObject *obj = (PyObject *) _obj; - PyObject *itemValue = GET_TC(tc)->itemValue; - PyObject *itemName = GET_TC(tc)->itemName; - PyObject* attr; - PyObject* attrName; - char* attrStr; - + PyObject *obj = (PyObject *) _obj; + PyObject *itemValue = GET_TC(tc)->itemValue; + PyObject *itemName = GET_TC(tc)->itemName; + PyObject* attr; + PyObject* attrName; + char* attrStr; + + if (itemValue) + { + Py_DECREF(GET_TC(tc)->itemValue); + GET_TC(tc)->itemValue = itemValue = NULL; + } + + if (itemName) + { + Py_DECREF(GET_TC(tc)->itemName); + GET_TC(tc)->itemName = itemName = NULL; + } + + for (; GET_TC(tc)->index < GET_TC(tc)->size; GET_TC(tc)->index ++) + { + attrName = PyList_GET_ITEM(GET_TC(tc)->attrList, GET_TC(tc)->index); +#if PY_MAJOR_VERSION >= 3 + attr = PyUnicode_AsUTF8String(attrName); +#else + attr = attrName; + Py_INCREF(attr); +#endif + attrStr = PyString_AS_STRING(attr); - if (itemValue) + if (attrStr[0] == '_') { - Py_DECREF(GET_TC(tc)->itemValue); - GET_TC(tc)->itemValue = itemValue = NULL; + PRINTMARK(); + Py_DECREF(attr); + continue; } - if (itemName) + itemValue = PyObject_GetAttr(obj, attrName); + if (itemValue == NULL) { - Py_DECREF(GET_TC(tc)->itemName); - GET_TC(tc)->itemName = itemName = NULL; + PyErr_Clear(); + Py_DECREF(attr); + PRINTMARK(); + continue; } - for (; GET_TC(tc)->index < GET_TC(tc)->size; GET_TC(tc)->index ++) + if (PyCallable_Check(itemValue)) { - attrName = PyList_GET_ITEM(GET_TC(tc)->attrList, GET_TC(tc)->index); -#if PY_MAJOR_VERSION >= 3 - attr = PyUnicode_AsUTF8String(attrName); -#else - attr = attrName; - Py_INCREF(attr); -#endif - attrStr = PyString_AS_STRING(attr); - - if (attrStr[0] == '_') - { - PRINTMARK(); - Py_DECREF(attr); - continue; - } - - itemValue = PyObject_GetAttr(obj, attrName); - if (itemValue == NULL) - { - PyErr_Clear(); - Py_DECREF(attr); - PRINTMARK(); - continue; - } - - if (PyCallable_Check(itemValue)) - { - Py_DECREF(itemValue); - Py_DECREF(attr); - PRINTMARK(); - continue; - } - - PRINTMARK(); - itemName = attr; - break; - } - - if (itemName == NULL) - { - GET_TC(tc)->index = GET_TC(tc)->size; - GET_TC(tc)->itemValue = NULL; - return 0; + Py_DECREF(itemValue); + Py_DECREF(attr); + PRINTMARK(); + continue; } GET_TC(tc)->itemName = itemName; @@ -558,48 +644,60 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) GET_TC(tc)->index ++; PRINTMARK(); - return 1; -} + itemName = attr; + break; + } + if (itemName == NULL) + { + GET_TC(tc)->index = GET_TC(tc)->size; + GET_TC(tc)->itemValue = NULL; + return 0; + } + + GET_TC(tc)->itemName = itemName; + GET_TC(tc)->itemValue = itemValue; + GET_TC(tc)->index ++; + PRINTMARK(); + return 1; +} JSOBJ Dir_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { - PRINTMARK(); - return GET_TC(tc)->itemValue; + PRINTMARK(); + return GET_TC(tc)->itemValue; } char *Dir_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { - PRINTMARK(); - *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName); - return PyString_AS_STRING(GET_TC(tc)->itemName); + PRINTMARK(); + *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName); + return PyString_AS_STRING(GET_TC(tc)->itemName); } - - //============================================================================= // List iteration functions // itemValue is borrowed from object (which is list). No refcounting //============================================================================= void List_iterBegin(JSOBJ obj, JSONTypeContext *tc) { - GET_TC(tc)->index = 0; - GET_TC(tc)->size = PyList_GET_SIZE( (PyObject *) obj); + GET_TC(tc)->index = 0; + GET_TC(tc)->size = PyList_GET_SIZE( (PyObject *) obj); } int List_iterNext(JSOBJ obj, JSONTypeContext *tc) { - if (GET_TC(tc)->index >= GET_TC(tc)->size) - { - PRINTMARK(); - return 0; - } + if (GET_TC(tc)->index >= GET_TC(tc)->size) + { + PRINTMARK(); + return 0; + } - GET_TC(tc)->itemValue = PyList_GET_ITEM (obj, GET_TC(tc)->index); - GET_TC(tc)->index ++; - return 1; + GET_TC(tc)->itemValue = PyList_GET_ITEM (obj, GET_TC(tc)->index); + GET_TC(tc)->index ++; + return 1; } void List_iterEnd(JSOBJ obj, JSONTypeContext *tc) @@ -608,12 +706,12 @@ void List_iterEnd(JSOBJ obj, JSONTypeContext *tc) JSOBJ List_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { - return GET_TC(tc)->itemValue; + return GET_TC(tc)->itemValue; } char *List_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { - return NULL; + return NULL; } //============================================================================= @@ -621,65 +719,65 @@ char *List_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) //============================================================================= void Index_iterBegin(JSOBJ obj, JSONTypeContext *tc) { - GET_TC(tc)->index = 0; - GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char)); - if (!GET_TC(tc)->citemName) - { - PyErr_NoMemory(); - } - PRINTMARK(); + GET_TC(tc)->index = 0; + GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char)); + if (!GET_TC(tc)->citemName) + { + PyErr_NoMemory(); + } + PRINTMARK(); } int Index_iterNext(JSOBJ obj, JSONTypeContext *tc) { - Py_ssize_t index; - if (!GET_TC(tc)->citemName) - { - return 0; - } - - index = GET_TC(tc)->index; - Py_XDECREF(GET_TC(tc)->itemValue); - if (index == 0) - { - memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name"); - } - else + Py_ssize_t index; + if (!GET_TC(tc)->citemName) + { + return 0; + } + + index = GET_TC(tc)->index; + Py_XDECREF(GET_TC(tc)->itemValue); + if (index == 0) + { + memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name"); + } + else if (index == 1) { - memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values"); + memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values"); } else { - PRINTMARK(); - return 0; + PRINTMARK(); + return 0; } - GET_TC(tc)->index++; - PRINTMARK(); - return 1; + GET_TC(tc)->index++; + PRINTMARK(); + return 1; } void Index_iterEnd(JSOBJ obj, JSONTypeContext *tc) { - if (GET_TC(tc)->citemName) - { - PyObject_Free(GET_TC(tc)->citemName); - } - PRINTMARK(); + if (GET_TC(tc)->citemName) + { + PyObject_Free(GET_TC(tc)->citemName); + } + PRINTMARK(); } JSOBJ Index_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { - return GET_TC(tc)->itemValue; + return GET_TC(tc)->itemValue; } char *Index_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { - *outLen = strlen(GET_TC(tc)->citemName); - return GET_TC(tc)->citemName; + *outLen = strlen(GET_TC(tc)->citemName); + return GET_TC(tc)->citemName; } //============================================================================= @@ -687,75 +785,75 @@ char *Index_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) //============================================================================= void Series_iterBegin(JSOBJ obj, JSONTypeContext *tc) { - PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder; - GET_TC(tc)->index = 0; - GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char)); - enc->outputFormat = VALUES; // for contained series - if (!GET_TC(tc)->citemName) - { - PyErr_NoMemory(); - } - PRINTMARK(); + PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder; + GET_TC(tc)->index = 0; + GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char)); + enc->outputFormat = VALUES; // for contained series + if (!GET_TC(tc)->citemName) + { + PyErr_NoMemory(); + } + PRINTMARK(); } int Series_iterNext(JSOBJ obj, JSONTypeContext *tc) { - Py_ssize_t index; - if (!GET_TC(tc)->citemName) - { - return 0; - } - - index = GET_TC(tc)->index; - Py_XDECREF(GET_TC(tc)->itemValue); - if (index == 0) - { - memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name"); - } - else - if (index == 1) - { - memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index"); - } - else - if (index == 2) - { - memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values"); - } - else - { - PRINTMARK(); - return 0; - } - - GET_TC(tc)->index++; + Py_ssize_t index; + if (!GET_TC(tc)->citemName) + { + return 0; + } + + index = GET_TC(tc)->index; + Py_XDECREF(GET_TC(tc)->itemValue); + if (index == 0) + { + memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name"); + } + else + if (index == 1) + { + memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index"); + } + else + if (index == 2) + { + memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values"); + } + else + { PRINTMARK(); - return 1; + return 0; + } + + GET_TC(tc)->index++; + PRINTMARK(); + return 1; } void Series_iterEnd(JSOBJ obj, JSONTypeContext *tc) { - PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder; - enc->outputFormat = enc->originalOutputFormat; - if (GET_TC(tc)->citemName) - { - PyObject_Free(GET_TC(tc)->citemName); - } - PRINTMARK(); + PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder; + enc->outputFormat = enc->originalOutputFormat; + if (GET_TC(tc)->citemName) + { + PyObject_Free(GET_TC(tc)->citemName); + } + PRINTMARK(); } JSOBJ Series_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { - return GET_TC(tc)->itemValue; + return GET_TC(tc)->itemValue; } char *Series_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { - *outLen = strlen(GET_TC(tc)->citemName); - return GET_TC(tc)->citemName; + *outLen = strlen(GET_TC(tc)->citemName); + return GET_TC(tc)->citemName; } //============================================================================= @@ -763,75 +861,75 @@ char *Series_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) //============================================================================= void DataFrame_iterBegin(JSOBJ obj, JSONTypeContext *tc) { - PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder; - GET_TC(tc)->index = 0; - GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char)); - enc->outputFormat = VALUES; // for contained series & index - if (!GET_TC(tc)->citemName) - { - PyErr_NoMemory(); - } - PRINTMARK(); + PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder; + GET_TC(tc)->index = 0; + GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char)); + enc->outputFormat = VALUES; // for contained series & index + if (!GET_TC(tc)->citemName) + { + PyErr_NoMemory(); + } + PRINTMARK(); } int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc) { - Py_ssize_t index; - if (!GET_TC(tc)->citemName) - { - return 0; - } - - index = GET_TC(tc)->index; - Py_XDECREF(GET_TC(tc)->itemValue); - if (index == 0) - { - memcpy(GET_TC(tc)->citemName, "columns", sizeof(char)*8); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "columns"); - } - else + Py_ssize_t index; + if (!GET_TC(tc)->citemName) + { + return 0; + } + + index = GET_TC(tc)->index; + Py_XDECREF(GET_TC(tc)->itemValue); + if (index == 0) + { + memcpy(GET_TC(tc)->citemName, "columns", sizeof(char)*8); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "columns"); + } + else if (index == 1) { - memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index"); + memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index"); } else - if (index == 2) - { + if (index == 2) + { memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5); GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values"); - } - else - { + } + else + { PRINTMARK(); return 0; - } + } - GET_TC(tc)->index++; - PRINTMARK(); - return 1; + GET_TC(tc)->index++; + PRINTMARK(); + return 1; } void DataFrame_iterEnd(JSOBJ obj, JSONTypeContext *tc) { - PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder; - enc->outputFormat = enc->originalOutputFormat; - if (GET_TC(tc)->citemName) - { - PyObject_Free(GET_TC(tc)->citemName); - } - PRINTMARK(); + PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder; + enc->outputFormat = enc->originalOutputFormat; + if (GET_TC(tc)->citemName) + { + PyObject_Free(GET_TC(tc)->citemName); + } + PRINTMARK(); } JSOBJ DataFrame_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { - return GET_TC(tc)->itemValue; + return GET_TC(tc)->itemValue; } char *DataFrame_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { - *outLen = strlen(GET_TC(tc)->citemName); - return GET_TC(tc)->citemName; + *outLen = strlen(GET_TC(tc)->citemName); + return GET_TC(tc)->citemName; } //============================================================================= @@ -841,46 +939,46 @@ char *DataFrame_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) //============================================================================= void Dict_iterBegin(JSOBJ obj, JSONTypeContext *tc) { - GET_TC(tc)->index = 0; - PRINTMARK(); + GET_TC(tc)->index = 0; + PRINTMARK(); } int Dict_iterNext(JSOBJ obj, JSONTypeContext *tc) { #if PY_MAJOR_VERSION >= 3 - PyObject* itemNameTmp; + PyObject* itemNameTmp; #endif - if (GET_TC(tc)->itemName) - { - Py_DECREF(GET_TC(tc)->itemName); - GET_TC(tc)->itemName = NULL; - } + if (GET_TC(tc)->itemName) + { + Py_DECREF(GET_TC(tc)->itemName); + GET_TC(tc)->itemName = NULL; + } - if (!PyDict_Next ( (PyObject *)GET_TC(tc)->dictObj, &GET_TC(tc)->index, &GET_TC(tc)->itemName, &GET_TC(tc)->itemValue)) - { - PRINTMARK(); - return 0; - } + if (!PyDict_Next ( (PyObject *)GET_TC(tc)->dictObj, &GET_TC(tc)->index, &GET_TC(tc)->itemName, &GET_TC(tc)->itemValue)) + { + PRINTMARK(); + return 0; + } - if (PyUnicode_Check(GET_TC(tc)->itemName)) - { - GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName); - } - else + if (PyUnicode_Check(GET_TC(tc)->itemName)) + { + GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName); + } + else if (!PyString_Check(GET_TC(tc)->itemName)) { - GET_TC(tc)->itemName = PyObject_Str(GET_TC(tc)->itemName); + GET_TC(tc)->itemName = PyObject_Str(GET_TC(tc)->itemName); #if PY_MAJOR_VERSION >= 3 - itemNameTmp = GET_TC(tc)->itemName; - GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName); - Py_DECREF(itemNameTmp); + itemNameTmp = GET_TC(tc)->itemName; + GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName); + Py_DECREF(itemNameTmp); #endif } else { - Py_INCREF(GET_TC(tc)->itemName); + Py_INCREF(GET_TC(tc)->itemName); } PRINTMARK(); return 1; @@ -888,24 +986,24 @@ int Dict_iterNext(JSOBJ obj, JSONTypeContext *tc) void Dict_iterEnd(JSOBJ obj, JSONTypeContext *tc) { - if (GET_TC(tc)->itemName) - { - Py_DECREF(GET_TC(tc)->itemName); - GET_TC(tc)->itemName = NULL; - } - Py_DECREF(GET_TC(tc)->dictObj); - PRINTMARK(); + if (GET_TC(tc)->itemName) + { + Py_DECREF(GET_TC(tc)->itemName); + GET_TC(tc)->itemName = NULL; + } + Py_DECREF(GET_TC(tc)->dictObj); + PRINTMARK(); } JSOBJ Dict_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { - return GET_TC(tc)->itemValue; + return GET_TC(tc)->itemValue; } char *Dict_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { - *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName); - return PyString_AS_STRING(GET_TC(tc)->itemName); + *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName); + return PyString_AS_STRING(GET_TC(tc)->itemName); } void NpyArr_freeLabels(char** labels, npy_intp len) @@ -1023,433 +1121,456 @@ char** NpyArr_encodeLabels(PyArrayObject* labels, JSONObjectEncoder* enc, npy_in void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) { - PyObject *obj, *exc, *toDictFunc; - TypeContext *pc; - PyObjectEncoder *enc; - double val; - PRINTMARK(); - if (!_obj) { - tc->type = JT_INVALID; - return; - } + PyObject *obj, *exc, *toDictFunc; + TypeContext *pc; + PyObjectEncoder *enc; + double val; + PRINTMARK(); + if (!_obj) { + tc->type = JT_INVALID; + return; + } - obj = (PyObject*) _obj; - enc = (PyObjectEncoder*) tc->encoder; + obj = (PyObject*) _obj; + enc = (PyObjectEncoder*) tc->encoder; - tc->prv = PyObject_Malloc(sizeof(TypeContext)); - pc = (TypeContext *) tc->prv; - if (!pc) - { - tc->type = JT_INVALID; - PyErr_NoMemory(); - return; - } - pc->newObj = NULL; - pc->dictObj = NULL; - pc->itemValue = NULL; - pc->itemName = NULL; - pc->attrList = NULL; - pc->citemName = NULL; - pc->npyarr = NULL; - pc->rowLabels = NULL; - pc->columnLabels = NULL; - pc->index = 0; - pc->size = 0; - pc->longValue = 0; - pc->transpose = 0; - pc->rowLabelsLen = 0; - pc->columnLabelsLen = 0; - - if (PyIter_Check(obj) || PyArray_Check(obj)) - { - goto ISITERABLE; - } + tc->prv = PyObject_Malloc(sizeof(TypeContext)); + pc = (TypeContext *) tc->prv; + if (!pc) + { + tc->type = JT_INVALID; + PyErr_NoMemory(); + return; + } + pc->newObj = NULL; + pc->dictObj = NULL; + pc->itemValue = NULL; + pc->itemName = NULL; + pc->attrList = NULL; + pc->index = 0; + pc->size = 0; + pc->longValue = 0; + pc->citemName = NULL; + pc->npyarr = NULL; + pc->rowLabels = NULL; + pc->columnLabels = NULL; + pc->transpose = 0; + pc->rowLabelsLen = 0; + pc->columnLabelsLen = 0; + + if (PyIter_Check(obj)) + { + PRINTMARK(); + goto ISITERABLE; + } - if (PyBool_Check(obj)) - { - PRINTMARK(); - tc->type = (obj == Py_True) ? JT_TRUE : JT_FALSE; - return; - } - else - if (PyLong_Check(obj)) - { - PRINTMARK(); - pc->PyTypeToJSON = PyLongToINT64; - tc->type = JT_LONG; - GET_TC(tc)->longValue = PyLong_AsLongLong(obj); + if (PyIter_Check(obj) || PyArray_Check(obj)) + { + goto ISITERABLE; + } - exc = PyErr_Occurred(); + if (PyBool_Check(obj)) + { + PRINTMARK(); + tc->type = (obj == Py_True) ? JT_TRUE : JT_FALSE; + return; + } + else + if (PyLong_Check(obj)) + { + PRINTMARK(); + pc->PyTypeToJSON = PyLongToINT64; + tc->type = JT_LONG; + GET_TC(tc)->longValue = PyLong_AsLongLong(obj); - if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) - { - PRINTMARK(); - goto INVALID; - } + exc = PyErr_Occurred(); - return; - } - else - if (PyInt_Check(obj)) + if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) { - PRINTMARK(); + PRINTMARK(); + goto INVALID; + } + + return; + } + else + if (PyInt_Check(obj)) + { + PRINTMARK(); #ifdef _LP64 - pc->PyTypeToJSON = PyIntToINT64; tc->type = JT_LONG; + pc->PyTypeToJSON = PyIntToINT64; tc->type = JT_LONG; #else - pc->PyTypeToJSON = PyIntToINT32; tc->type = JT_INT; + pc->PyTypeToJSON = PyIntToINT32; tc->type = JT_INT; #endif - return; - } - else - if (PyArray_IsScalar(obj, Integer)) - { - PRINTMARK(); - pc->PyTypeToJSON = PyLongToINT64; - tc->type = JT_LONG; - PyArray_CastScalarToCtype(obj, &(GET_TC(tc)->longValue), PyArray_DescrFromType(NPY_INT64)); - - exc = PyErr_Occurred(); + return; + } + else + if (PyArray_IsScalar(obj, Integer)) + { + PRINTMARK(); + pc->PyTypeToJSON = PyLongToINT64; + tc->type = JT_LONG; + PyArray_CastScalarToCtype(obj, &(GET_TC(tc)->longValue), PyArray_DescrFromType(NPY_INT64)); - if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) - { - PRINTMARK(); - goto INVALID; - } + exc = PyErr_Occurred(); - return; - } - else - if (PyString_Check(obj)) - { - PRINTMARK(); - pc->PyTypeToJSON = PyStringToUTF8; tc->type = JT_UTF8; - return; - } - else - if (PyUnicode_Check(obj)) - { - PRINTMARK(); - pc->PyTypeToJSON = PyUnicodeToUTF8; tc->type = JT_UTF8; - return; - } - else - if (PyFloat_Check(obj)) - { - PRINTMARK(); - val = PyFloat_AS_DOUBLE (obj); - if (npy_isnan(val) || npy_isinf(val)) - { - tc->type = JT_NULL; - } - else - { - pc->PyTypeToJSON = PyFloatToDOUBLE; tc->type = JT_DOUBLE; - } - return; - } - else - if (PyArray_IsScalar(obj, Float)) - { - PRINTMARK(); - pc->PyTypeToJSON = NpyFloatToDOUBLE; tc->type = JT_DOUBLE; - return; - } - else - if (PyArray_IsScalar(obj, Datetime)) + if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) { - PRINTMARK(); - pc->PyTypeToJSON = NpyDateTimeToINT64; tc->type = JT_LONG; - return; + PRINTMARK(); + goto INVALID; } - else - if (PyDateTime_Check(obj)) - { - PRINTMARK(); - pc->PyTypeToJSON = PyDateTimeToINT64; tc->type = JT_LONG; - return; - } - else - if (PyDate_Check(obj)) + + return; + } + else + if (PyString_Check(obj)) + { + PRINTMARK(); + pc->PyTypeToJSON = PyStringToUTF8; tc->type = JT_UTF8; + return; + } + else + if (PyUnicode_Check(obj)) + { + PRINTMARK(); + pc->PyTypeToJSON = PyUnicodeToUTF8; tc->type = JT_UTF8; + return; + } + else + if (PyFloat_Check(obj)) + { + PRINTMARK(); + val = PyFloat_AS_DOUBLE (obj); + if (npy_isnan(val) || npy_isinf(val)) { - PRINTMARK(); - pc->PyTypeToJSON = PyDateToINT64; tc->type = JT_LONG; - return; + tc->type = JT_NULL; } else - if (obj == Py_None) { - PRINTMARK(); - tc->type = JT_NULL; - return; + pc->PyTypeToJSON = PyFloatToDOUBLE; tc->type = JT_DOUBLE; } + return; + } + else + if (PyObject_IsInstance(obj, type_decimal)) + { + PRINTMARK(); + pc->PyTypeToJSON = PyFloatToDOUBLE; tc->type = JT_DOUBLE; + return; + } + else + if (PyArray_IsScalar(obj, Float)) + { + PRINTMARK(); + pc->PyTypeToJSON = NpyFloatToDOUBLE; tc->type = JT_DOUBLE; + return; + } + else + if (PyArray_IsScalar(obj, Datetime)) + { + PRINTMARK(); + pc->PyTypeToJSON = NpyDateTimeToINT64; tc->type = JT_LONG; + return; + } + else + if (PyDateTime_Check(obj)) + { + PRINTMARK(); + pc->PyTypeToJSON = PyDateTimeToINT64; tc->type = JT_LONG; + return; + } + else + if (PyDate_Check(obj)) + { + PRINTMARK(); + pc->PyTypeToJSON = PyDateToINT64; tc->type = JT_LONG; + return; + } + else + if (obj == Py_None) + { + PRINTMARK(); + tc->type = JT_NULL; + return; + } ISITERABLE: - if (PyDict_Check(obj)) - { - PRINTMARK(); - tc->type = JT_OBJECT; - pc->iterBegin = Dict_iterBegin; - pc->iterEnd = Dict_iterEnd; - pc->iterNext = Dict_iterNext; - pc->iterGetValue = Dict_iterGetValue; - pc->iterGetName = Dict_iterGetName; - pc->dictObj = obj; - Py_INCREF(obj); - - return; + if (PyDict_Check(obj)) + { + PRINTMARK(); + tc->type = JT_OBJECT; + pc->iterBegin = Dict_iterBegin; + pc->iterEnd = Dict_iterEnd; + pc->iterNext = Dict_iterNext; + pc->iterGetValue = Dict_iterGetValue; + pc->iterGetName = Dict_iterGetName; + pc->dictObj = obj; + Py_INCREF(obj); + + return; + } + else + if (PyList_Check(obj)) + { + PRINTMARK(); + tc->type = JT_ARRAY; + pc->iterBegin = List_iterBegin; + pc->iterEnd = List_iterEnd; + pc->iterNext = List_iterNext; + pc->iterGetValue = List_iterGetValue; + pc->iterGetName = List_iterGetName; + return; + } + else + if (PyTuple_Check(obj)) + { + PRINTMARK(); + tc->type = JT_ARRAY; + pc->iterBegin = Tuple_iterBegin; + pc->iterEnd = Tuple_iterEnd; + pc->iterNext = Tuple_iterNext; + pc->iterGetValue = Tuple_iterGetValue; + pc->iterGetName = Tuple_iterGetName; + return; + } + else + if (PyAnySet_Check(obj)) + { + PRINTMARK(); + tc->type = JT_ARRAY; + pc->iterBegin = Iter_iterBegin; + pc->iterEnd = Iter_iterEnd; + pc->iterNext = Iter_iterNext; + pc->iterGetValue = Iter_iterGetValue; + pc->iterGetName = Iter_iterGetName; + return; + } + else + if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_index)) + { + if (enc->outputFormat == SPLIT) + { + PRINTMARK(); + tc->type = JT_OBJECT; + pc->iterBegin = Index_iterBegin; + pc->iterEnd = Index_iterEnd; + pc->iterNext = Index_iterNext; + pc->iterGetValue = Index_iterGetValue; + pc->iterGetName = Index_iterGetName; + return; } - else - if (PyList_Check(obj)) - { - PRINTMARK(); - tc->type = JT_ARRAY; - pc->iterBegin = List_iterBegin; - pc->iterEnd = List_iterEnd; - pc->iterNext = List_iterNext; - pc->iterGetValue = List_iterGetValue; - pc->iterGetName = List_iterGetName; - return; + + PRINTMARK(); + tc->type = JT_ARRAY; + pc->newObj = PyObject_GetAttrString(obj, "values"); + pc->iterBegin = NpyArr_iterBegin; + pc->iterEnd = NpyArr_iterEnd; + pc->iterNext = NpyArr_iterNext; + pc->iterGetValue = NpyArr_iterGetValue; + pc->iterGetName = NpyArr_iterGetName; + return; + } + else + if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_series)) + { + if (enc->outputFormat == SPLIT) + { + PRINTMARK(); + tc->type = JT_OBJECT; + pc->iterBegin = Series_iterBegin; + pc->iterEnd = Series_iterEnd; + pc->iterNext = Series_iterNext; + pc->iterGetValue = Series_iterGetValue; + pc->iterGetName = Series_iterGetName; + return; + } + + if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS) + { + PRINTMARK(); + tc->type = JT_OBJECT; + pc->columnLabelsLen = PyArray_SIZE(obj); + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); + if (!pc->columnLabels) + { + goto INVALID; + } } else - if (PyTuple_Check(obj)) { - PRINTMARK(); - tc->type = JT_ARRAY; - pc->iterBegin = Tuple_iterBegin; - pc->iterEnd = Tuple_iterEnd; - pc->iterNext = Tuple_iterNext; - pc->iterGetValue = Tuple_iterGetValue; - pc->iterGetName = Tuple_iterGetName; - return; + PRINTMARK(); + tc->type = JT_ARRAY; + } + pc->newObj = PyObject_GetAttrString(obj, "values"); + pc->iterBegin = NpyArr_iterBegin; + pc->iterEnd = NpyArr_iterEnd; + pc->iterNext = NpyArr_iterNext; + pc->iterGetValue = NpyArr_iterGetValue; + pc->iterGetName = NpyArr_iterGetName; + return; + } + else + if (PyArray_Check(obj)) + { + if (enc->npyCtxtPassthru) + { + PRINTMARK(); + pc->npyarr = enc->npyCtxtPassthru; + tc->type = (pc->npyarr->columnLabels ? JT_OBJECT : JT_ARRAY); + pc->iterBegin = NpyArrPassThru_iterBegin; + pc->iterEnd = NpyArrPassThru_iterEnd; + pc->iterNext = NpyArr_iterNext; + pc->iterGetValue = NpyArr_iterGetValue; + pc->iterGetName = NpyArr_iterGetName; + enc->npyCtxtPassthru = NULL; + return; } - else - if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_index)) - { - if (enc->outputFormat == SPLIT) - { - PRINTMARK(); - tc->type = JT_OBJECT; - pc->iterBegin = Index_iterBegin; - pc->iterEnd = Index_iterEnd; - pc->iterNext = Index_iterNext; - pc->iterGetValue = Index_iterGetValue; - pc->iterGetName = Index_iterGetName; - return; - } - PRINTMARK(); - tc->type = JT_ARRAY; - pc->newObj = PyObject_GetAttrString(obj, "values"); - pc->iterBegin = NpyArr_iterBegin; - pc->iterEnd = NpyArr_iterEnd; - pc->iterNext = NpyArr_iterNext; - pc->iterGetValue = NpyArr_iterGetValue; - pc->iterGetName = NpyArr_iterGetName; - return; + PRINTMARK(); + tc->type = JT_ARRAY; + pc->iterBegin = NpyArr_iterBegin; + pc->iterEnd = NpyArr_iterEnd; + pc->iterNext = NpyArr_iterNext; + pc->iterGetValue = NpyArr_iterGetValue; + pc->iterGetName = NpyArr_iterGetName; + return; + } + else + if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_dataframe)) + { + if (enc->outputFormat == SPLIT) + { + PRINTMARK(); + tc->type = JT_OBJECT; + pc->iterBegin = DataFrame_iterBegin; + pc->iterEnd = DataFrame_iterEnd; + pc->iterNext = DataFrame_iterNext; + pc->iterGetValue = DataFrame_iterGetValue; + pc->iterGetName = DataFrame_iterGetName; + return; } - else - if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_series)) - { - if (enc->outputFormat == SPLIT) - { - PRINTMARK(); - tc->type = JT_OBJECT; - pc->iterBegin = Series_iterBegin; - pc->iterEnd = Series_iterEnd; - pc->iterNext = Series_iterNext; - pc->iterGetValue = Series_iterGetValue; - pc->iterGetName = Series_iterGetName; - return; - } - if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS) - { - PRINTMARK(); - tc->type = JT_OBJECT; - pc->columnLabelsLen = PyArray_SIZE(obj); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); - if (!pc->columnLabels) - { - goto INVALID; - } - } - else - { - PRINTMARK(); - tc->type = JT_ARRAY; - } - pc->newObj = PyObject_GetAttrString(obj, "values"); - pc->iterBegin = NpyArr_iterBegin; - pc->iterEnd = NpyArr_iterEnd; - pc->iterNext = NpyArr_iterNext; - pc->iterGetValue = NpyArr_iterGetValue; - pc->iterGetName = NpyArr_iterGetName; - return; + PRINTMARK(); + pc->newObj = PyObject_GetAttrString(obj, "values"); + pc->iterBegin = NpyArr_iterBegin; + pc->iterEnd = NpyArr_iterEnd; + pc->iterNext = NpyArr_iterNext; + pc->iterGetValue = NpyArr_iterGetValue; + pc->iterGetName = NpyArr_iterGetName; + if (enc->outputFormat == VALUES) + { + PRINTMARK(); + tc->type = JT_ARRAY; } else - if (PyArray_Check(obj)) + if (enc->outputFormat == RECORDS) { - if (enc->npyCtxtPassthru) - { - PRINTMARK(); - pc->npyarr = enc->npyCtxtPassthru; - tc->type = (pc->npyarr->columnLabels ? JT_OBJECT : JT_ARRAY); - pc->iterBegin = NpyArrPassThru_iterBegin; - pc->iterEnd = NpyArrPassThru_iterEnd; - pc->iterNext = NpyArr_iterNext; - pc->iterGetValue = NpyArr_iterGetValue; - pc->iterGetName = NpyArr_iterGetName; - enc->npyCtxtPassthru = NULL; - return; - } - - PRINTMARK(); - tc->type = JT_ARRAY; - pc->iterBegin = NpyArr_iterBegin; - pc->iterEnd = NpyArr_iterEnd; - pc->iterNext = NpyArr_iterNext; - pc->iterGetValue = NpyArr_iterGetValue; - pc->iterGetName = NpyArr_iterGetName; - return; + PRINTMARK(); + tc->type = JT_ARRAY; + pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1); + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); + if (!pc->columnLabels) + { + goto INVALID; + } + } + else + if (enc->outputFormat == INDEX) + { + PRINTMARK(); + tc->type = JT_OBJECT; + pc->rowLabelsLen = PyArray_DIM(pc->newObj, 0); + pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->rowLabelsLen); + if (!pc->rowLabels) + { + goto INVALID; + } + pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1); + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); + if (!pc->columnLabels) + { + NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen); + pc->rowLabels = NULL; + goto INVALID; + } } else - if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_dataframe)) { - if (enc->outputFormat == SPLIT) - { - PRINTMARK(); - tc->type = JT_OBJECT; - pc->iterBegin = DataFrame_iterBegin; - pc->iterEnd = DataFrame_iterEnd; - pc->iterNext = DataFrame_iterNext; - pc->iterGetValue = DataFrame_iterGetValue; - pc->iterGetName = DataFrame_iterGetName; - return; - } - - PRINTMARK(); - pc->newObj = PyObject_GetAttrString(obj, "values"); - pc->iterBegin = NpyArr_iterBegin; - pc->iterEnd = NpyArr_iterEnd; - pc->iterNext = NpyArr_iterNext; - pc->iterGetValue = NpyArr_iterGetValue; - pc->iterGetName = NpyArr_iterGetName; - if (enc->outputFormat == VALUES) - { - PRINTMARK(); - tc->type = JT_ARRAY; - } - else - if (enc->outputFormat == RECORDS) - { - PRINTMARK(); - tc->type = JT_ARRAY; - pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); - if (!pc->columnLabels) - { - goto INVALID; - } - } - else - if (enc->outputFormat == INDEX) - { - PRINTMARK(); - tc->type = JT_OBJECT; - pc->rowLabelsLen = PyArray_DIM(pc->newObj, 0); - pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->rowLabelsLen); - if (!pc->rowLabels) - { - goto INVALID; - } - pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); - if (!pc->columnLabels) - { - NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen); - pc->rowLabels = NULL; - goto INVALID; - } - } - else - { - PRINTMARK(); - tc->type = JT_OBJECT; - pc->rowLabelsLen = PyArray_DIM(pc->newObj, 1); - pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->rowLabelsLen); - if (!pc->rowLabels) - { - goto INVALID; - } - pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); - if (!pc->columnLabels) - { - NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen); - pc->rowLabels = NULL; - goto INVALID; - } - pc->transpose = 1; - } - return; + PRINTMARK(); + tc->type = JT_OBJECT; + pc->rowLabelsLen = PyArray_DIM(pc->newObj, 1); + pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->rowLabelsLen); + if (!pc->rowLabels) + { + goto INVALID; + } + pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0); + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); + if (!pc->columnLabels) + { + NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen); + pc->rowLabels = NULL; + goto INVALID; + } + pc->transpose = 1; } + return; + } + toDictFunc = PyObject_GetAttrString(obj, "toDict"); - toDictFunc = PyObject_GetAttrString(obj, "toDict"); + if (toDictFunc) + { + PyObject* tuple = PyTuple_New(0); + PyObject* toDictResult = PyObject_Call(toDictFunc, tuple, NULL); + Py_DECREF(tuple); + Py_DECREF(toDictFunc); - if (toDictFunc) + if (toDictResult == NULL) { - PyObject* tuple = PyTuple_New(0); - PyObject* toDictResult = PyObject_Call(toDictFunc, tuple, NULL); - Py_DECREF(tuple); - Py_DECREF(toDictFunc); - - if (toDictResult == NULL) - { - PyErr_Clear(); - tc->type = JT_NULL; - return; - } - - if (!PyDict_Check(toDictResult)) - { - Py_DECREF(toDictResult); - tc->type = JT_NULL; - return; - } - - PRINTMARK(); - tc->type = JT_OBJECT; - pc->iterBegin = Dict_iterBegin; - pc->iterEnd = Dict_iterEnd; - pc->iterNext = Dict_iterNext; - pc->iterGetValue = Dict_iterGetValue; - pc->iterGetName = Dict_iterGetName; - pc->dictObj = toDictResult; - return; + PyErr_Clear(); + tc->type = JT_NULL; + return; } - PyErr_Clear(); + if (!PyDict_Check(toDictResult)) + { + Py_DECREF(toDictResult); + tc->type = JT_NULL; + return; + } + PRINTMARK(); tc->type = JT_OBJECT; - pc->iterBegin = Dir_iterBegin; - pc->iterEnd = Dir_iterEnd; - pc->iterNext = Dir_iterNext; - pc->iterGetValue = Dir_iterGetValue; - pc->iterGetName = Dir_iterGetName; - + pc->iterBegin = Dict_iterBegin; + pc->iterEnd = Dict_iterEnd; + pc->iterNext = Dict_iterNext; + pc->iterGetValue = Dict_iterGetValue; + pc->iterGetName = Dict_iterGetName; + pc->dictObj = toDictResult; return; + } + + PyErr_Clear(); + + PRINTMARK(); + tc->type = JT_OBJECT; + pc->iterBegin = Dir_iterBegin; + pc->iterEnd = Dir_iterEnd; + pc->iterNext = Dir_iterNext; + pc->iterGetValue = Dir_iterGetValue; + pc->iterGetName = Dir_iterGetName; + return; INVALID: - tc->type = JT_INVALID; - PyObject_Free(tc->prv); - tc->prv = NULL; - return; + tc->type = JT_INVALID; + PyObject_Free(tc->prv); + tc->prv = NULL; + return; } - void Object_endTypeContext(JSOBJ obj, JSONTypeContext *tc) { Py_XDECREF(GET_TC(tc)->newObj); @@ -1462,244 +1583,244 @@ void Object_endTypeContext(JSOBJ obj, JSONTypeContext *tc) const char *Object_getStringValue(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen) { - return GET_TC(tc)->PyTypeToJSON (obj, tc, NULL, _outLen); + return GET_TC(tc)->PyTypeToJSON (obj, tc, NULL, _outLen); } JSINT64 Object_getLongValue(JSOBJ obj, JSONTypeContext *tc) { - JSINT64 ret; - GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL); - - return ret; + JSINT64 ret; + GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL); + return ret; } JSINT32 Object_getIntValue(JSOBJ obj, JSONTypeContext *tc) { - JSINT32 ret; - GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL); - return ret; + JSINT32 ret; + GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL); + return ret; } - double Object_getDoubleValue(JSOBJ obj, JSONTypeContext *tc) { - double ret; - GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL); - return ret; + double ret; + GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL); + return ret; } static void Object_releaseObject(JSOBJ _obj) { - Py_DECREF( (PyObject *) _obj); + Py_DECREF( (PyObject *) _obj); } - - void Object_iterBegin(JSOBJ obj, JSONTypeContext *tc) { - GET_TC(tc)->iterBegin(obj, tc); + GET_TC(tc)->iterBegin(obj, tc); } int Object_iterNext(JSOBJ obj, JSONTypeContext *tc) { - return GET_TC(tc)->iterNext(obj, tc); + return GET_TC(tc)->iterNext(obj, tc); } void Object_iterEnd(JSOBJ obj, JSONTypeContext *tc) { - GET_TC(tc)->iterEnd(obj, tc); + GET_TC(tc)->iterEnd(obj, tc); } JSOBJ Object_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { - return GET_TC(tc)->iterGetValue(obj, tc); + return GET_TC(tc)->iterGetValue(obj, tc); } char *Object_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { - return GET_TC(tc)->iterGetName(obj, tc, outLen); + return GET_TC(tc)->iterGetName(obj, tc, outLen); } - PyObject* objToJSON(PyObject* self, PyObject *args, PyObject *kwargs) { - static char *kwlist[] = { "obj", "ensure_ascii", "double_precision", "orient", NULL}; + static char *kwlist[] = { "obj", "ensure_ascii", "double_precision", "encode_html_chars", "orient", NULL}; + + char buffer[65536]; + char *ret; + PyObject *newobj; + PyObject *oinput = NULL; + PyObject *oensureAscii = NULL; + int idoublePrecision = 10; // default double precision setting + PyObject *oencodeHTMLChars = NULL; + char *sOrient = NULL; + + PyObjectEncoder pyEncoder = + { + { + Object_beginTypeContext, + Object_endTypeContext, + Object_getStringValue, + Object_getLongValue, + Object_getIntValue, + Object_getDoubleValue, + Object_iterBegin, + Object_iterNext, + Object_iterEnd, + Object_iterGetValue, + Object_iterGetName, + Object_releaseObject, + PyObject_Malloc, + PyObject_Realloc, + PyObject_Free, + -1, //recursionMax + idoublePrecision, + 1, //forceAscii + 0, //encodeHTMLChars + } + }; + JSONObjectEncoder* encoder = (JSONObjectEncoder*) &pyEncoder; + + pyEncoder.npyCtxtPassthru = NULL; + pyEncoder.outputFormat = COLUMNS; + + PRINTMARK(); + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OiOs", kwlist, &oinput, &oensureAscii, &idoublePrecision, &oencodeHTMLChars, &sOrient)) + { + return NULL; + } - char buffer[65536]; - char *ret; - PyObject *newobj; - PyObject *oinput = NULL; - PyObject *oensureAscii = NULL; - char *sOrient = NULL; - int idoublePrecision = 5; // default double precision setting + if (oensureAscii != NULL && !PyObject_IsTrue(oensureAscii)) + { + encoder->forceASCII = 0; + } - PyObjectEncoder pyEncoder = - { - { - Object_beginTypeContext, //void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc); - Object_endTypeContext, //void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc); - Object_getStringValue, //const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen); - Object_getLongValue, //JSLONG (*getLongValue)(JSOBJ obj, JSONTypeContext *tc); - Object_getIntValue, //JSLONG (*getLongValue)(JSOBJ obj, JSONTypeContext *tc); - Object_getDoubleValue, //double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc); - Object_iterBegin, //JSPFN_ITERBEGIN iterBegin; - Object_iterNext, //JSPFN_ITERNEXT iterNext; - Object_iterEnd, //JSPFN_ITEREND iterEnd; - Object_iterGetValue, //JSPFN_ITERGETVALUE iterGetValue; - Object_iterGetName, //JSPFN_ITERGETNAME iterGetName; - Object_releaseObject, //void (*releaseValue)(JSONTypeContext *ti); - PyObject_Malloc, //JSPFN_MALLOC malloc; - PyObject_Realloc, //JSPFN_REALLOC realloc; - PyObject_Free, //JSPFN_FREE free; - -1, //recursionMax - idoublePrecision, - 1, //forceAscii - } - }; - JSONObjectEncoder* encoder = (JSONObjectEncoder*) &pyEncoder; + if (oencodeHTMLChars != NULL && PyObject_IsTrue(oencodeHTMLChars)) + { + encoder->encodeHTMLChars = 1; + } - pyEncoder.npyCtxtPassthru = NULL; - pyEncoder.outputFormat = COLUMNS; + encoder->doublePrecision = idoublePrecision; - PRINTMARK(); - - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|Ois", kwlist, &oinput, &oensureAscii, &idoublePrecision, &sOrient)) + if (sOrient != NULL) + { + if (strcmp(sOrient, "records") == 0) { - return NULL; + pyEncoder.outputFormat = RECORDS; } - - if (sOrient != NULL) + else + if (strcmp(sOrient, "index") == 0) { - if (strcmp(sOrient, "records") == 0) - { - pyEncoder.outputFormat = RECORDS; - } - else - if (strcmp(sOrient, "index") == 0) - { - pyEncoder.outputFormat = INDEX; - } - else - if (strcmp(sOrient, "split") == 0) - { - pyEncoder.outputFormat = SPLIT; - } - else - if (strcmp(sOrient, "values") == 0) - { - pyEncoder.outputFormat = VALUES; - } - else - if (strcmp(sOrient, "columns") != 0) - { - PyErr_Format (PyExc_ValueError, "Invalid value '%s' for option 'orient'", sOrient); - return NULL; - } + pyEncoder.outputFormat = INDEX; } - - pyEncoder.originalOutputFormat = pyEncoder.outputFormat; - - if (oensureAscii != NULL && !PyObject_IsTrue(oensureAscii)) + else + if (strcmp(sOrient, "split") == 0) { - encoder->forceASCII = 0; + pyEncoder.outputFormat = SPLIT; } - - encoder->doublePrecision = idoublePrecision; - - PRINTMARK(); - ret = JSON_EncodeObject (oinput, encoder, buffer, sizeof (buffer)); - PRINTMARK(); - - if (PyErr_Occurred()) + else + if (strcmp(sOrient, "values") == 0) { - return NULL; + pyEncoder.outputFormat = VALUES; } - - if (encoder->errorMsg) + else + if (strcmp(sOrient, "columns") != 0) { - if (ret != buffer) - { - encoder->free (ret); - } - - PyErr_Format (PyExc_OverflowError, "%s", encoder->errorMsg); - return NULL; + PyErr_Format (PyExc_ValueError, "Invalid value '%s' for option 'orient'", sOrient); + return NULL; } + } + + pyEncoder.originalOutputFormat = pyEncoder.outputFormat; + PRINTMARK(); + ret = JSON_EncodeObject (oinput, encoder, buffer, sizeof (buffer)); + PRINTMARK(); - newobj = PyString_FromString (ret); + if (PyErr_Occurred()) + { + return NULL; + } + if (encoder->errorMsg) + { if (ret != buffer) { - encoder->free (ret); + encoder->free (ret); } - PRINTMARK(); + PyErr_Format (PyExc_OverflowError, "%s", encoder->errorMsg); + return NULL; + } + + newobj = PyString_FromString (ret); + + if (ret != buffer) + { + encoder->free (ret); + } - return newobj; + PRINTMARK(); + + return newobj; } PyObject* objToJSONFile(PyObject* self, PyObject *args, PyObject *kwargs) { - PyObject *data; - PyObject *file; - PyObject *string; - PyObject *write; - PyObject *argtuple; - - PRINTMARK(); + PyObject *data; + PyObject *file; + PyObject *string; + PyObject *write; + PyObject *argtuple; - if (!PyArg_ParseTuple (args, "OO", &data, &file)) { - return NULL; - } + PRINTMARK(); - if (!PyObject_HasAttrString (file, "write")) - { - PyErr_Format (PyExc_TypeError, "expected file"); - return NULL; - } + if (!PyArg_ParseTuple (args, "OO", &data, &file)) + { + return NULL; + } - write = PyObject_GetAttrString (file, "write"); + if (!PyObject_HasAttrString (file, "write")) + { + PyErr_Format (PyExc_TypeError, "expected file"); + return NULL; + } - if (!PyCallable_Check (write)) { - Py_XDECREF(write); - PyErr_Format (PyExc_TypeError, "expected file"); - return NULL; - } + write = PyObject_GetAttrString (file, "write"); - argtuple = PyTuple_Pack(1, data); + if (!PyCallable_Check (write)) + { + Py_XDECREF(write); + PyErr_Format (PyExc_TypeError, "expected file"); + return NULL; + } - string = objToJSON (self, argtuple, kwargs); + argtuple = PyTuple_Pack(1, data); - if (string == NULL) - { - Py_XDECREF(write); - Py_XDECREF(argtuple); - return NULL; - } + string = objToJSON (self, argtuple, kwargs); + if (string == NULL) + { + Py_XDECREF(write); Py_XDECREF(argtuple); + return NULL; + } - argtuple = PyTuple_Pack (1, string); - if (argtuple == NULL) - { - Py_XDECREF(write); - return NULL; - } - if (PyObject_CallObject (write, argtuple) == NULL) - { - Py_XDECREF(write); - Py_XDECREF(argtuple); - return NULL; - } + Py_XDECREF(argtuple); + argtuple = PyTuple_Pack (1, string); + if (argtuple == NULL) + { Py_XDECREF(write); - Py_DECREF(argtuple); - Py_XDECREF(string); - - PRINTMARK(); + return NULL; + } + if (PyObject_CallObject (write, argtuple) == NULL) + { + Py_XDECREF(write); + Py_XDECREF(argtuple); + return NULL; + } - Py_RETURN_NONE; + Py_XDECREF(write); + Py_DECREF(argtuple); + Py_XDECREF(string); + PRINTMARK(); + Py_RETURN_NONE; } - diff --git a/pandas/src/ujson/python/py_defines.h b/pandas/src/ujson/python/py_defines.h index 1544c2e3cf34d..312914217d8e3 100644 --- a/pandas/src/ujson/python/py_defines.h +++ b/pandas/src/ujson/python/py_defines.h @@ -1,3 +1,40 @@ +/* +Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the ESN Social Software AB nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) +http://code.google.com/p/stringencoders/ +Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. + +Numeric decoder derived from from TCL library +http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms + * Copyright (c) 1988-1993 The Regents of the University of California. + * Copyright (c) 1994 Sun Microsystems, Inc. +*/ + #include <Python.h> #if PY_MAJOR_VERSION >= 3 diff --git a/pandas/src/ujson/python/ujson.c b/pandas/src/ujson/python/ujson.c index e04309e620a1d..33b01b341c20a 100644 --- a/pandas/src/ujson/python/ujson.c +++ b/pandas/src/ujson/python/ujson.c @@ -1,3 +1,40 @@ +/* +Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the ESN Social Software AB nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) +http://code.google.com/p/stringencoders/ +Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. + +Numeric decoder derived from from TCL library +http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +* Copyright (c) 1988-1993 The Regents of the University of California. +* Copyright (c) 1994 Sun Microsystems, Inc. +*/ + #include "py_defines.h" #include "version.h" @@ -15,28 +52,30 @@ PyObject* objToJSONFile(PyObject* self, PyObject *args, PyObject *kwargs); PyObject* JSONFileToObj(PyObject* self, PyObject *args, PyObject *kwargs); +#define ENCODER_HELP_TEXT "Use ensure_ascii=false to output UTF-8. Pass in double_precision to alter the maximum digit precision of doubles. Set encode_html_chars=True to encode < > & as unicode escape sequences." + static PyMethodDef ujsonMethods[] = { - {"encode", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. Use ensure_ascii=false to output UTF-8. Pass in double_precision to alter the maximum digit precision with doubles"}, - {"decode", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure"}, - {"dumps", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. Use ensure_ascii=false to output UTF-8"}, - {"loads", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure"}, - {"dump", (PyCFunction) objToJSONFile, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursively into JSON file. Use ensure_ascii=false to output UTF-8"}, - {"load", (PyCFunction) JSONFileToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as file to dict object structure"}, - {NULL, NULL, 0, NULL} /* Sentinel */ + {"encode", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. " ENCODER_HELP_TEXT}, + {"decode", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure. Use precise_float=True to use high precision float decoder."}, + {"dumps", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. " ENCODER_HELP_TEXT}, + {"loads", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure. Use precise_float=True to use high precision float decoder."}, + {"dump", (PyCFunction) objToJSONFile, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursively into JSON file. " ENCODER_HELP_TEXT}, + {"load", (PyCFunction) JSONFileToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as file to dict object structure. Use precise_float=True to use high precision float decoder."}, + {NULL, NULL, 0, NULL} /* Sentinel */ }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_pandasujson", - 0, /* m_doc */ - -1, /* m_size */ - ujsonMethods, /* m_methods */ - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ + PyModuleDef_HEAD_INIT, + "_pandasujson", + 0, /* m_doc */ + -1, /* m_size */ + ujsonMethods, /* m_methods */ + NULL, /* m_reload */ + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ }; #define PYMODINITFUNC PyObject *PyInit_json(void) @@ -53,21 +92,21 @@ static struct PyModuleDef moduledef = { PYMODINITFUNC { - PyObject *module; - PyObject *version_string; + PyObject *module; + PyObject *version_string; - initObjToJSON(); - module = PYMODULE_CREATE(); + initObjToJSON(); + module = PYMODULE_CREATE(); - if (module == NULL) - { - MODINITERROR; - } + if (module == NULL) + { + MODINITERROR; + } - version_string = PyString_FromString (UJSON_VERSION); - PyModule_AddObject (module, "__version__", version_string); + version_string = PyString_FromString (UJSON_VERSION); + PyModule_AddObject (module, "__version__", version_string); #if PY_MAJOR_VERSION >= 3 - return module; + return module; #endif } diff --git a/pandas/src/ujson/python/version.h b/pandas/src/ujson/python/version.h index 9449441411192..0ccfbfe74521c 100644 --- a/pandas/src/ujson/python/version.h +++ b/pandas/src/ujson/python/version.h @@ -1 +1,38 @@ -#define UJSON_VERSION "1.18" +/* +Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the ESN Social Software AB nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) +http://code.google.com/p/stringencoders/ +Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. + +Numeric decoder derived from from TCL library +http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms + * Copyright (c) 1988-1993 The Regents of the University of California. + * Copyright (c) 1994 Sun Microsystems, Inc. +*/ + +#define UJSON_VERSION "1.33"
This updates pandas JSON to use the latest ujson version. A fair few fixes and enhancements are included. Unfortunately however there were a lot of whitespace changes so it looks like a lot more than it actually is. All tests pass on py27 and py33. Valgrind run of JSON tests with Python 2.7 is clean. Also included are two fixes encountered during merging and pushed upstream to ultrajson: https://github.com/esnme/ultrajson/pull/93 https://github.com/esnme/ultrajson/pull/94
https://api.github.com/repos/pandas-dev/pandas/pulls/3946
2013-06-18T14:12:54Z
2013-06-19T01:46:00Z
2013-06-19T01:46:00Z
2014-07-04T07:35:19Z
DOC: partial string indexing docs in timeseries.rst (GH3938)
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 3dee843e75d3e..f8d1e8323b9f5 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -175,7 +175,7 @@ dates outside of those dates if specified. .. _timeseries.datetimeindex: DatetimeIndex -~~~~~~~~~~~~~ +------------- One of the main uses for ``DatetimeIndex`` is as an index for pandas objects. The ``DatetimeIndex`` class contains many timeseries related optimizations: @@ -189,6 +189,19 @@ The ``DatetimeIndex`` class contains many timeseries related optimizations: - Quick access to date fields via properties such as ``year``, ``month``, etc. - Regularization functions like ``snap`` and very fast ``asof`` logic +DatetimeIndex objects has all the basic functionality of regular Index objects +and a smorgasbord of advanced timeseries-specific methods for easy frequency +processing. + +.. seealso:: + :ref:`Reindexing methods <basics.reindexing>` + +.. note:: + + While pandas does not force you to have a sorted date index, some of these + methods may have unexpected or incorrect behavior if the dates are + unsorted. So please be careful. + ``DatetimeIndex`` can be used like a regular index and offers all of its intelligent functionality like selection, slicing, etc. @@ -200,7 +213,10 @@ intelligent functionality like selection, slicing, etc. ts[:5].index ts[::2].index -You can pass in dates and strings that parses to dates as indexing parameters: +Partial String Indexing +~~~~~~~~~~~~~~~~~~~~~~~ + +You can pass in dates and strings that parse to dates as indexing parameters: .. ipython:: python @@ -210,12 +226,6 @@ You can pass in dates and strings that parses to dates as indexing parameters: ts['10/31/2011':'12/31/2011'] -A ``truncate`` convenience function is provided that is equivalent to slicing: - -.. ipython:: python - - ts.truncate(before='10/31/2011', after='12/31/2011') - To provide convenience for accessing longer time series, you can also pass in the year or year and month as strings: @@ -225,26 +235,72 @@ the year or year and month as strings: ts['2011-6'] -Even complicated fancy indexing that breaks the DatetimeIndex's frequency -regularity will result in a ``DatetimeIndex`` (but frequency is lost): +This type of slicing will work on a DataFrame with a ``DateTimeIndex`` as well. Since the +partial string selection is a form of label slicing, the endpoints **will be** included. This +would include matching times on an included date. Here's an example: .. ipython:: python - ts[[0, 2, 6]].index + dft = DataFrame(randn(100000,1),columns=['A'],index=date_range('20130101',periods=100000,freq='T')) + dft + dft['2013'] -DatetimeIndex objects has all the basic functionality of regular Index objects -and a smorgasbord of advanced timeseries-specific methods for easy frequency -processing. +This starts on the very first time in the month, and includes the last date & time for the month -.. seealso:: - :ref:`Reindexing methods <basics.reindexing>` +.. ipython:: python -.. note:: + dft['2013-1':'2013-2'] - While pandas does not force you to have a sorted date index, some of these - methods may have unexpected or incorrect behavior if the dates are - unsorted. So please be careful. +This specifies a stop time **that includes all of the times on the last day** +.. ipython:: python + + dft['2013-1':'2013-2-28'] + +This specifies an **exact** stop time (and is not the same as the above) + +.. ipython:: python + + dft['2013-1':'2013-2-28 00:00:00'] + +We are stopping on the included end-point as its part of the index + +.. ipython:: python + + dft['2013-1-15':'2013-1-15 12:30:00'] + +.. warning:: + + The following selection will raises a ``KeyError``; otherwise this selection methodology + would be inconsistent with other selection methods in pandas (as this is not a *slice*, nor does it + resolve to one) + + .. code-block:: python + + dft['2013-1-15 12:30:00'] + + To select a single row, use ``.loc`` + + .. ipython:: python + + dft.loc['2013-1-15 12:30:00'] + + +Truncating & Fancy Indexing +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A ``truncate`` convenience function is provided that is equivalent to slicing: + +.. ipython:: python + + ts.truncate(before='10/31/2011', after='12/31/2011') + +Even complicated fancy indexing that breaks the DatetimeIndex's frequency +regularity will result in a ``DatetimeIndex`` (but frequency is lost): + +.. ipython:: python + + ts[[0, 2, 6]].index .. _timeseries.offsets:
closes #3938
https://api.github.com/repos/pandas-dev/pandas/pulls/3939
2013-06-17T23:41:32Z
2013-06-18T00:42:28Z
2013-06-18T00:42:28Z
2014-06-14T06:04:28Z
BUG: fix python3/2 numpy import_array macro build issue with clang
diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c index 534d60970dd81..4fdd8dc91ab04 100644 --- a/pandas/src/ujson/python/objToJSON.c +++ b/pandas/src/ujson/python/objToJSON.c @@ -100,7 +100,7 @@ enum PANDAS_FORMAT //#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__) #define PRINTMARK() -#if (PY_VERSION_HEX >= 0x03000000) +#if (PY_VERSION_HEX < 0x03000000) void initObjToJSON(void) #else int initObjToJSON(void)
closes #3872. no really, it does.
https://api.github.com/repos/pandas-dev/pandas/pulls/3936
2013-06-17T19:28:06Z
2013-06-17T20:22:52Z
2013-06-17T20:22:51Z
2014-06-21T19:34:31Z
FIX: StataReader
diff --git a/pandas/io/stata.py b/pandas/io/stata.py index ddc9db0b76539..632e97c24721f 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -407,7 +407,7 @@ def _null_terminate(self, s): def _next(self): typlist = self.typlist - if self._has_string_data: + if self.has_string_data: data = [None] * self.nvar for i in range(len(data)): if type(typlist[i]) is int: @@ -523,7 +523,8 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None): for i in cols_: if self.dtyplist[i] is not None: col = data.columns[i] - data[col] = Series(data[col], data[col].index, self.dtyplist[i]) + if data[col].dtype is not np.dtype(object): + data[col] = Series(data[col], data[col].index, self.dtyplist[i]) if convert_dates: cols = np.where(map(lambda x: x in _date_formats, self.fmtlist))[0] @@ -856,7 +857,7 @@ def _write_data_nodates(self): typ = ord(typlist[i]) if typ <= 244: # we've got a string if len(var) < typ: - var = _pad_bytes(self._decode_bytes(var), len(var) + 1) + var = _pad_bytes(var, typ) self._write(var) else: try: @@ -884,15 +885,13 @@ def _write_data_dates(self): if i in convert_dates: var = _datetime_to_stata_elapsed(var, self.fmtlist[i]) if typ <= 244: # we've got a string - if isnull(var): - var = "" # missing string if len(var) < typ: - var = _pad_bytes(var, len(var) + 1) + var = _pad_bytes(var, typ) self._write(var) else: if isnull(var): # this only matters for floats var = MISSING_VALUES[typ] - self._write(struct.pack(byteorder+TYPE_MAP[typ], var)) + self._file.write(struct.pack(byteorder+TYPE_MAP[typ], var)) def _null_terminate(self, s, as_string=False): null_byte = '\x00' diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index 4584976c41383..0e32fb91fc743 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -3,19 +3,19 @@ from datetime import datetime import os import unittest -import sys import warnings import nose import numpy as np -from pandas.core.frame import DataFrame +from pandas.core.frame import DataFrame, Series from pandas.io.parsers import read_csv from pandas.io.stata import read_stata, StataReader, StataWriter import pandas.util.testing as tm from pandas.util.testing import ensure_clean from pandas.util.misc import is_little_endian + class StataTests(unittest.TestCase): def setUp(self): @@ -35,6 +35,7 @@ def setUp(self): self.csv8 = os.path.join(self.dirpath, 'tbl19-3.csv') self.dta9 = os.path.join(self.dirpath, 'lbw.dta') self.csv9 = os.path.join(self.dirpath, 'lbw.csv') + self.dta10 = os.path.join(self.dirpath, 'stata10.dta') def read_dta(self, file): return read_stata(file, convert_dates=True) @@ -189,9 +190,24 @@ def test_read_dta9(self): decimal=3 ) + def test_read_dta10(self): + original = DataFrame( + data= + [ + ["string", "object", 1, 1.1, np.datetime64('2003-12-25')] + ], + columns=['string', 'object', 'integer', 'float', 'datetime']) + original["object"] = Series(original["object"], dtype=object) + original.index.name = 'index' + + with ensure_clean(self.dta10) as path: + original.to_stata(path, {'datetime': 'tc'}, False) + written_and_read_again = self.read_dta(path) + tm.assert_frame_equal(written_and_read_again.set_index('index'), original) + def test_stata_doc_examples(self): with ensure_clean(self.dta5) as path: - df = DataFrame(np.random.randn(10,2),columns=list('AB')) + df = DataFrame(np.random.randn(10, 2), columns=list('AB')) df.to_stata(path) if __name__ == '__main__':
Fix for a bug in StataReader resulting in errors when reading Stata file with string columns
https://api.github.com/repos/pandas-dev/pandas/pulls/3935
2013-06-17T14:05:37Z
2013-06-21T00:11:15Z
2013-06-21T00:11:15Z
2014-07-16T08:14:39Z
CLN: cleaned up _try_cast in core/groupby.py to eliminate cruft (GH3920)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 168615c060c2b..d15dcc1510577 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -432,23 +432,13 @@ def picker(arr): def _try_cast(self, result, obj): """ try to cast the result to our obj original type, we may have roundtripped thru object in the mean-time """ - try: - if obj.ndim > 1: - dtype = obj.values.dtype - else: - dtype = obj.dtype - - if _is_numeric_dtype(dtype): - - # need to respect a non-number here (e.g. Decimal) - if len(result) and issubclass(type(result[0]),(np.number,float,int)): - result = _possibly_downcast_to_dtype(result, dtype) + if obj.ndim > 1: + dtype = obj.values.dtype + else: + dtype = obj.dtype - elif issubclass(dtype.type, np.datetime64): - if is_datetime64_dtype(obj.dtype): - result = result.astype(obj.dtype) - except: - pass + if not np.isscalar(result): + result = _possibly_downcast_to_dtype(result, dtype) return result
raised on #3920
https://api.github.com/repos/pandas-dev/pandas/pulls/3934
2013-06-17T12:20:16Z
2013-06-17T12:47:20Z
2013-06-17T12:47:20Z
2014-07-16T08:14:37Z
BUG: (GH3925) partial string selection with seconds resolution
diff --git a/RELEASE.rst b/RELEASE.rst index 500ba2df1ed47..4e5d340c9ab1d 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -254,6 +254,7 @@ pandas 0.11.1 in the ``to_replace`` argument wasn't working (GH3907_) - Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing two integer arrays with at least 10000 cells total (GH3764_) + - Indexing with a string with seconds resolution not selecting from a time index (GH3925_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH2786: https://github.com/pydata/pandas/issues/2786 @@ -355,9 +356,13 @@ pandas 0.11.1 .. _GH3907: https://github.com/pydata/pandas/issues/3907 .. _GH3911: https://github.com/pydata/pandas/issues/3911 .. _GH3912: https://github.com/pydata/pandas/issues/3912 +<<<<<<< HEAD .. _GH3764: https://github.com/pydata/pandas/issues/3764 .. _GH3888: https://github.com/pydata/pandas/issues/3888 +======= +.. _GH3925: https://github.com/pydata/pandas/issues/3925 +>>>>>>> BUG: (GH3925) Indexing with a string with seconds resolution not selecting from a time index pandas 0.11.0 ============= diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 7a7210c479c67..33f72a0d15415 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1,5 +1,6 @@ # pylint: disable=W0223 +from datetime import datetime from pandas.core.common import _asarray_tuplesafe from pandas.core.index import Index, MultiIndex, _ensure_index import pandas.core.common as com diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 1cb986ee6cd7c..109ceced4fd9d 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1102,6 +1102,13 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True): t1 = Timestamp(st, tz=self.tz) t2 = Timestamp(Timestamp(st + offsets.Minute(), tz=self.tz).value - 1) + elif (reso == 'second' and ( + self._resolution == Resolution.RESO_SEC or not is_monotonic)): + st = datetime(parsed.year, parsed.month, parsed.day, + hour=parsed.hour, minute=parsed.minute, second=parsed.second) + t1 = Timestamp(st, tz=self.tz) + t2 = Timestamp(Timestamp(st + offsets.Second(), + tz=self.tz).value - 1) else: raise KeyError @@ -1110,9 +1117,16 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True): if is_monotonic: + # we are out of range + if len(stamps) and ( + (use_lhs and t1.value < stamps[0] and t2.value < stamps[0]) or ( + (use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))): + raise KeyError + # a monotonic (sorted) series can be sliced left = stamps.searchsorted(t1.value, side='left') if use_lhs else None right = stamps.searchsorted(t2.value, side='right') if use_rhs else None + return slice(left, right) lhs_mask = (stamps>=t1.value) if use_lhs else True diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 51097cd157b99..08bcd9cfad8cc 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -251,6 +251,15 @@ def test_indexing(self): expected = ts['2013'] assert_series_equal(expected,ts) + # GH 3925, indexing with a seconds resolution string / datetime object + df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s')) + expected = df.loc[[df.index[2]]] + result = df['2012-01-02 18:01:02'] + self.assert_(result == expected) + + # this is a single date, so will raise + self.assertRaises(KeyError, df.__getitem__, df.index[2],) + def assert_range_equal(left, right): assert(left.equals(right)) assert(left.freq == right.freq)
this no longer has much to do with #3925, and is only fixing a bug Minor revision to select on second frequency ``` In [11]: df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s')) In [12]: df Out[12]: open high low close volume 2012-01-02 18:01:00-06:00 0.131243 0.301542 0.128027 0.804162 1.296658 2012-01-02 18:01:01-06:00 0.341487 1.548695 0.703234 0.904201 1.422337 2012-01-02 18:01:02-06:00 -1.050453 -1.884035 1.537788 -0.821058 0.558631 2012-01-02 18:01:03-06:00 0.846885 1.045378 -0.722903 -0.613625 -0.476531 2012-01-02 18:01:04-06:00 1.186823 -0.018299 -0.513886 -1.103269 -0.311907 In [14]: df['2012-01-02 18:01:02'] Out[14]: open high low close volume 2012-01-02 18:01:02-06:00 -1.050453 -1.884035 1.537788 -0.821058 0.558631 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3931
2013-06-17T01:37:13Z
2013-06-19T00:59:39Z
2013-06-19T00:59:39Z
2014-06-19T03:37:21Z
DOC: fix to_json docstring nesting issue
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bae85aa84a96e..16b3176521e28 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -539,25 +539,27 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch', ---------- path_or_buf : the path or buffer to write the result string if this is None, return a StringIO of the converted string - orient : + orient : string - Series : - default is 'index' - allowed values are: {'split','records','index'} + * Series - DataFrame : - default is 'columns' - allowed values are: {'split','records','index','columns','values'} + - default is 'index' + - allowed values are: {'split','records','index'} - The format of the JSON string - split : dict like - {index -> [index], columns -> [columns], data -> [values]} - records : list like [{column -> value}, ... , {column -> value}] - index : dict like {index -> {column -> value}} - columns : dict like {column -> {index -> value}} - values : just the values array + * DataFrame - date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601), + - default is 'columns' + - allowed values are: {'split','records','index','columns','values'} + + * The format of the JSON string + + - split : dict like {index -> [index], columns -> [columns], data -> [values]} + - records : list like [{column -> value}, ... , {column -> value}] + - index : dict like {index -> {column -> value}} + - columns : dict like {column -> {index -> value}} + - values : just the values array + + date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601) default is epoch double_precision : The number of decimal places to use when encoding floating point values, default 10.
https://api.github.com/repos/pandas-dev/pandas/pulls/3930
2013-06-17T01:06:47Z
2013-06-17T01:42:50Z
2013-06-17T01:42:50Z
2014-07-16T08:14:34Z
PTF no more
diff --git a/.travis.yml b/.travis.yml index 8e2bb49d9df93..30f09deefd93a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,14 +5,6 @@ python: env: global: - - secure: "O04RU5QRKEDL/SrIWEsVe8O+1TxZqZQSa28Sd+Fz48NW/XddhefYyxzqcUXh\nk/NjWMqknJRQhApLolBianVpsE577OTllzlcyKn3nUL6hjOXcoszGaYray7S\niNGKGyO8xrtB/ZQDtmupz0ksK8sLoCTscdiGotFulczbx0zt+4g=" - - secure: "PUJ9nC1/v2vpFUtELSoSjI53OHCVXfFTb8+t5lIGIqHtjUBkhiJSNPfCv8Bx\ndsdrx30qP8KsSceYzaa/bog6p8YNU1iih23S0KbjucutvA0LNHBTNvnxmjBR\nSJfKd5FmwnXvizRyghYBzmQ3NmGO7ADw2DBwKOhgGMqCHZ8Tlc8=" - - secure: "IDcMrCCW+6pgJtsI3Q163OPc0iec1ogpitaqiRhHcrEBUCXZgVeclOeiZBlw\n/u+uGyW/O0NhHMaFXKB8BdDVwlQEEHv48syN6npS/A5+O6jriWKL4ozttOhE\npOlu+yLhHnEwx6wZVIHRTVn+t1GkOrjlBcjaQi+Z13G3XmDaSG8=" - - secure: "Zu9aj0dTGpvMqT/HqBGQgDYl/v5ubC7lFwfE8Fqb0N1UVXqbpjXnNH/7oal1\nUsIT7klO++LWm+LxsP/A1FWENTSgdYe99JQtNyauW+0x5YR1JTuDJ8atDgx9\nSq66CaVpS5t+ov7UVm2bKSUX+1S8+8zGbIDADrMxEzYEMF7WoGM=" - - secure: "AfIvLxvCxj22zrqg3ejGf/VePKT2AyGT9erYzlKpBS0H8yi5Pp1MfmJjhaR4\n51zBtzqHPHiIEY6ZdE06o9PioMWkXS+BqJNrxGSbt1ltxgOFrxW5zOpwiFGZ\nZOv1YeFkuPf8PEsWT7615mdydqTQT7B0pqUKK/d6aka4TQ/tg5Q=" - - secure: "EM4ySBUusReNu7H1QHXvjnP/J1QowvfpwEBmjysYxJuq7KcG8HhhlfpUF+Gh\nLBzLak9QBA67k4edhum3qtKuJR5cHuja3+zuV8xmx096B/m96liJFTrwZpea\n58op3W6ZULctEpQNgIkyae20bjxl4f99JhZRUlonoPfx/rBIMFc=" - - secure: "pgMYS/6MQqDGb58qdzTJesvAMmcJWTUEEM8gf9rVbfqfxceOL4Xpx8siR9B2\nC4U4MW1cHMPP3RFEb4Jy0uK49aHH10snwZY1S84YPPllpH5ZFXVdN68OayNj\nh4k5N/2hhaaQuJ6Uh8v8s783ye4oYTOW5RJUFqQu4QdG4IkTIMs=" - - NOSE_ARGS="not slow" UPLOAD=true matrix: @@ -41,7 +33,6 @@ before_install: # - export APT_ARGS=-qq # comment this to debug travis install issues # - set -x # enable this to see bash commands - export ZIP_FLAGS=-q # comment this to debug travis install issues - - source ci/envars.sh # we need to source this to bring in the envars - ci/before_install.sh - python -V diff --git a/ci/before_install.sh b/ci/before_install.sh index 677ddfa642f80..e4376e1bf21c2 100755 --- a/ci/before_install.sh +++ b/ci/before_install.sh @@ -10,27 +10,4 @@ echo "inside $0" # overview sudo apt-get update $APT_ARGS # run apt-get update for all versions -if $PLEASE_TRAVIS_FASTER ; then - echo "Faster? well... I'll try." - - if $CACHE_FILE_AVAILABLE ; then - echo retrieving "$CACHE_FILE_URL"; - - wget -q "$CACHE_FILE_URL" -O "/tmp/_$CYTHON_HASH.zip"; - unzip $ZIP_FLAGS /tmp/_"$CYTHON_HASH.zip" -d "$BUILD_CACHE_DIR"; - rm -f /tmp/_"$CYTHON_HASH.zip" - # copy cythonized c files over - cp -R "$BUILD_CACHE_DIR"/pandas/*.c pandas/ - cp -R "$BUILD_CACHE_DIR"/pandas/src/*.c pandas/src/ - fi; - echo "VENV_FILE_AVAILABLE=$VENV_FILE_AVAILABLE" - if $VENV_FILE_AVAILABLE ; then - echo "getting venv" - wget -q $VENV_FILE_URL -O "/tmp/venv.zip"; - sudo unzip $ZIP_FLAGS -o /tmp/venv.zip -d "/"; - sudo chown travis -R "$VIRTUAL_ENV" - rm -f /tmp/_"$CYTHON_HASH.zip" - fi; -fi - true # never fail because bad things happened here diff --git a/ci/envars.sh b/ci/envars.sh deleted file mode 100755 index 2b4cacfd96fe4..0000000000000 --- a/ci/envars.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash - -# This must be sourced by .travis.yml, so any envars exported here will -# be available to the rest of the build stages - -# - computes a hash based on the cython files in the codebade -# - retrieves the decrypted key if any for all whitelisted forks -# - checks whether the user optd int to use the cache -# - if so, check for availablity of cache files on the server, based on hash -# - set envars to control what the following scripts do - -# at most one of these will decrypt, so the end result is that $STORE_KEY -# either holds a single key or does not -export STORE_KEY="$STORE_KEY0""$STORE_KEY1""$STORE_KEY2""$STORE_KEY3""$STORE_KEY4" -export STORE_KEY="$STORE_KEY""$STORE_KEY5""$STORE_KEY6""$STORE_KEY7" - -export CYTHON_HASH=$(find pandas | grep -P '\.(pyx|pxd)$' | sort \ - | while read N; do echo $(tail -n+1 $N | md5sum ) ;done | md5sum| cut -d ' ' -f 1) - -export CYTHON_HASH=$CYTHON_HASH-$TRAVIS_PYTHON_VERSION - -# where the cache files live on the server -export CACHE_FILE_URL="https://cache27-pypandas.rhcloud.com/static/$STORE_KEY/$CYTHON_HASH.zip" -export VENV_FILE_URL="https://cache27-pypandas.rhcloud.com/static/$STORE_KEY/venv-$TRAVIS_PYTHON_VERSION.zip" -export CACHE_FILE_STORE_URL="https://cache27-pypandas.rhcloud.com/store/$STORE_KEY" - -echo "Hashing:" -find pandas | grep -P '\.(pyx|pxd)$' -echo "Key: $CYTHON_HASH" - -export CACHE_FILE_AVAILABLE=false -export VENV_FILE_AVAILABLE=false -export PLEASE_TRAVIS_FASTER=false - -# check whether the user opted in to use the cache via commit message -if [ x"$(git log --format='%B' -n 1 | grep PLEASE_TRAVIS_FASTER | wc -l)" != x"0" ]; then - export PLEASE_TRAVIS_FASTER=true -fi; -if [ x"$(git log --format='%B' -n 1 | grep PTF | wc -l)" != x"0" ]; then - export PLEASE_TRAVIS_FASTER=true -fi; - -if $PLEASE_TRAVIS_FASTER; then - - # check whether the files exists on the server - curl -s -f -I "$CACHE_FILE_URL" # silent, don;t expose key - if [ x"$?" == x"0" ] ; then - export CACHE_FILE_AVAILABLE=true; - fi - - - curl -s -f -I "$VENV_FILE_URL" # silent, don;t expose key - if [ x"$?" == x"0" ] ; then - export VENV_FILE_AVAILABLE=true; - fi - - # the pandas build cache machinery needs this set, and the directory created - export BUILD_CACHE_DIR="/tmp/build_cache" - mkdir "$BUILD_CACHE_DIR" -fi; - -# debug -echo "PLEASE_TRAVIS_FASTER=$PLEASE_TRAVIS_FASTER" -echo "CACHE_FILE_AVAILABLE=$CACHE_FILE_AVAILABLE" -echo "VENV_FILE_AVAILABLE=$VENV_FILE_AVAILABLE" - -true diff --git a/ci/install.sh b/ci/install.sh index 294db286a1001..60ea5643c6ad2 100755 --- a/ci/install.sh +++ b/ci/install.sh @@ -45,102 +45,60 @@ if [ x"$FULL_DEPS" == x"true" ] ; then fi fi -# Everything installed inside this clause into site-packages -# will get included in the cached venv downloaded from the net -# in PTF mode -if ( ! $VENV_FILE_AVAILABLE ); then - echo "Running full monty" - # Hard Deps - pip install $PIP_ARGS nose python-dateutil pytz +# Hard Deps +pip install $PIP_ARGS nose python-dateutil pytz +pip install $PIP_ARGS cython + +if [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then # should be >=3,3 + pip install $PIP_ARGS numpy==1.7.0 +elif [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then + # sudo apt-get $APT_ARGS install python3-numpy; # 1.6.2 or precise + pip install $PIP_ARGS numpy==1.6.1 +else + pip install $PIP_ARGS numpy==1.6.1 +fi + +# Optional Deps +if [ x"$FULL_DEPS" == x"true" ]; then + echo "Installing FULL_DEPS" pip install $PIP_ARGS cython - if [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then # should be >=3,3 - pip install $PIP_ARGS numpy==1.7.0 - elif [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then - # sudo apt-get $APT_ARGS install python3-numpy; # 1.6.2 or precise - pip install $PIP_ARGS numpy==1.6.1 + if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then + pip install $PIP_ARGS xlwt + pip install $PIP_ARGS bottleneck + pip install $PIP_ARGS numexpr==2.0.1 + pip install $PIP_ARGS tables==2.3.1 else - pip install $PIP_ARGS numpy==1.6.1 + pip install $PIP_ARGS numexpr + pip install $PIP_ARGS tables fi - # Optional Deps - if [ x"$FULL_DEPS" == x"true" ]; then - echo "Installing FULL_DEPS" - pip install $PIP_ARGS cython - - if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - pip install $PIP_ARGS xlwt - pip install $PIP_ARGS bottleneck - pip install $PIP_ARGS numexpr==2.0.1 - pip install $PIP_ARGS tables==2.3.1 - else - pip install $PIP_ARGS numexpr - pip install $PIP_ARGS tables - fi - - pip install $PIP_ARGS matplotlib - pip install $PIP_ARGS openpyxl - pip install $PIP_ARGS xlrd>=0.9.0 - pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r=' - pip install $PIP_ARGS patsy - pip install $PIP_ARGS html5lib - - if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then - sudo apt-get $APT_ARGS remove python3-lxml - elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - sudo apt-get $APT_ARGS remove python-lxml - fi - - pip install $PIP_ARGS lxml - # fool statsmodels into thinking pandas was already installed - # so it won't refuse to install itself. We want it in the zipped venv - - mkdir $SITE_PKG_DIR/pandas - touch $SITE_PKG_DIR/pandas/__init__.py - echo "version='0.10.0-phony'" > $SITE_PKG_DIR/pandas/version.py - pip install $PIP_ARGS git+git://github.com/statsmodels/statsmodels@c9062e43b8a5f7385537ca95#egg=statsmodels - - rm -Rf $SITE_PKG_DIR/pandas # scrub phoney pandas + pip install $PIP_ARGS matplotlib + pip install $PIP_ARGS openpyxl + pip install $PIP_ARGS xlrd>=0.9.0 + pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r=' + pip install $PIP_ARGS patsy + pip install $PIP_ARGS html5lib + + if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then + sudo apt-get $APT_ARGS remove python3-lxml + elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then + sudo apt-get $APT_ARGS remove python-lxml fi - # pack up the venv and cache it - if [ x"$STORE_KEY" != x"" ] && $UPLOAD && $PLEASE_TRAVIS_FASTER ; then - VENV_FNAME="venv-$TRAVIS_PYTHON_VERSION.zip" - - zip $ZIP_FLAGS -r "$HOME/$VENV_FNAME" $SITE_PKG_DIR/ - ls -l "$HOME/$VENV_FNAME" - echo "posting venv" - # silent, don't expose key - curl -s --form upload=@"$HOME/$VENV_FNAME" "$CACHE_FILE_STORE_URL/$VENV_FNAME" - fi + pip install $PIP_ARGS lxml + # fool statsmodels into thinking pandas was already installed + # so it won't refuse to install itself. -fi; + mkdir $SITE_PKG_DIR/pandas + touch $SITE_PKG_DIR/pandas/__init__.py + echo "version='0.10.0-phony'" > $SITE_PKG_DIR/pandas/version.py + pip install $PIP_ARGS git+git://github.com/statsmodels/statsmodels@c9062e43b8a5f7385537ca95#egg=statsmodels -#build and install pandas -if [ x"$BUILD_CACHE_DIR" != x"" ]; then - scripts/use_build_cache.py -d - python setup.py install; -else - python setup.py build_ext install + rm -Rf $SITE_PKG_DIR/pandas # scrub phoney pandas fi -# package pandas build artifacts and send them home -# that's everything the build cache (scripts/use_build_cache.py) -# stored during the build (.so, pyx->.c and 2to3) -if (! $CACHE_FILE_AVAILABLE) ; then - if [ x"$STORE_KEY" != x"" ] && $UPLOAD && $PLEASE_TRAVIS_FASTER ; then - echo "Posting artifacts" - strip "$BUILD_CACHE_DIR/*" &> /dev/null - echo "$BUILD_CACHE_DIR" - cd "$BUILD_CACHE_DIR"/ - zip -r $ZIP_FLAGS "$HOME/$CYTHON_HASH".zip * - cd "$TRAVIS_BUILD_DIR" - pwd - zip "$HOME/$CYTHON_HASH".zip $(find pandas | grep -P '\.(pyx|pxd)$' | sed -r 's/.(pyx|pxd)$/.c/') - - # silent, don't expose key - curl --connect-timeout 5 -s --form upload=@"$HOME/$CYTHON_HASH".zip "$CACHE_FILE_STORE_URL/$CYTHON_HASH.zip" - fi -fi +# build pandas +python setup.py build_ext install true diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py index 2a2a5c9643c75..d019af3370ba9 100755 --- a/vb_suite/test_perf.py +++ b/vb_suite/test_perf.py @@ -37,18 +37,10 @@ import random import numpy as np -import pandas as pd from pandas import DataFrame, Series -try: - import git # gitpython -except Exception: - print("Error: Please install the `gitpython` package\n") - sys.exit(1) - from suite import REPO_PATH -VB_DIR = os.path.dirname(os.path.abspath(__file__)) DEFAULT_MIN_DURATION = 0.01 HEAD_COL="head[ms]" BASE_COL="base[ms]" @@ -65,14 +57,6 @@ parser.add_argument('-t', '--target-commit', help='The commit to compare against the baseline (default: HEAD).', type=str) -parser.add_argument('--base-pickle', - help='name of pickle file with timings data generated by a former `-H -d FILE` run. '\ - 'filename must be of the form <hash>-*.* or specify --base-commit seperately', - type=str) -parser.add_argument('--target-pickle', - help='name of pickle file with timings data generated by a former `-H -d FILE` run '\ - 'filename must be of the form <hash>-*.* or specify --target-commit seperately', - type=str) parser.add_argument('-m', '--min-duration', help='Minimum duration (in ms) of baseline test for inclusion in report (default: %.3f).' % DEFAULT_MIN_DURATION, type=float, @@ -85,7 +69,7 @@ metavar="FNAME", dest='outdf', default=None, - help='Name of file to df.to_pickle() the result table into. Will overwrite') + help='Name of file to df.save() the result table into. Will overwrite') parser.add_argument('-r', '--regex', metavar="REGEX", dest='regex', @@ -120,7 +104,8 @@ parser.add_argument('-a', '--affinity', metavar="a", dest='affinity', - default=None, + default=1, + type=int, help='set processor affinity of processm by default bind to cpu/core #1 only' 'requires the "affinity" python module , will raise Warning otherwise' ) @@ -221,74 +206,30 @@ def profile_comparative(benchmarks): head_res = get_results_df(db, h_head) baseline_res = get_results_df(db, h_baseline) - - report_comparative(head_res,baseline_res) - + ratio = head_res['timing'] / baseline_res['timing'] + totals = DataFrame({HEAD_COL:head_res['timing'], + BASE_COL:baseline_res['timing'], + 'ratio':ratio, + 'name':baseline_res.name}, + columns=[HEAD_COL, BASE_COL, "ratio", "name"]) + totals = totals.ix[totals[HEAD_COL] > args.min_duration] + # ignore below threshold + totals = totals.dropna( + ).sort("ratio").set_index('name') # sort in ascending order + + h_msg = repo.messages.get(h_head, "") + b_msg = repo.messages.get(h_baseline, "") + + print_report(totals,h_head=h_head,h_msg=h_msg, + h_baseline=h_baseline,b_msg=b_msg) + + if args.outdf: + prprint("The results DataFrame was written to '%s'\n" % args.outdf) + totals.save(args.outdf) finally: # print("Disposing of TMP_DIR: %s" % TMP_DIR) shutil.rmtree(TMP_DIR) -def prep_pickle_for_total(df, agg_name='median'): - """ - accepts a datafram resulting from invocation with -H -d o.pickle - If multiple data columns are present (-N was used), the - `agg_name` attr of the datafram will be used to reduce - them to a single value per vbench, df.median is used by defa - ult. - - Returns a datadrame of the form expected by prep_totals - """ - def prep(df): - agg = getattr(df,agg_name) - df = DataFrame(agg(1)) - cols = list(df.columns) - cols[0]='timing' - df.columns=cols - df['name'] = list(df.index) - return df - - return prep(df) - -def prep_totals(head_res, baseline_res): - """ - Each argument should be a dataframe with 'timing' and 'name' columns - where name is the name of the vbench. - - returns a 'totals' dataframe, suitable as input for print_report. - """ - head_res, baseline_res = head_res.align(baseline_res) - ratio = head_res['timing'] / baseline_res['timing'] - totals = DataFrame({HEAD_COL:head_res['timing'], - BASE_COL:baseline_res['timing'], - 'ratio':ratio, - 'name':baseline_res.name}, - columns=[HEAD_COL, BASE_COL, "ratio", "name"]) - totals = totals.ix[totals[HEAD_COL] > args.min_duration] - # ignore below threshold - totals = totals.dropna( - ).sort("ratio").set_index('name') # sort in ascending order - return totals - -def report_comparative(head_res,baseline_res): - try: - r=git.Repo(VB_DIR) - except: - import pdb - pdb.set_trace() - - totals = prep_totals(head_res,baseline_res) - - h_head = args.target_commit - h_baseline = args.base_commit - h_msg = r.commit(h_head).message.strip() - b_msg = r.commit(h_baseline).message.strip() - - print_report(totals,h_head=h_head,h_msg=h_msg, - h_baseline=h_baseline,b_msg=b_msg) - - if args.outdf: - prprint("The results DataFrame was written to '%s'\n" % args.outdf) - totals.to_pickle(args.outdf) def profile_head_single(benchmark): import gc @@ -364,7 +305,7 @@ def profile_head(benchmarks): if args.outdf: prprint("The results DataFrame was written to '%s'\n" % args.outdf) - DataFrame(results).to_pickle(args.outdf) + DataFrame(results).save(args.outdf) def print_report(df,h_head=None,h_msg="",h_baseline=None,b_msg=""): @@ -447,23 +388,18 @@ def main(): random.seed(args.seed) np.random.seed(args.seed) - if args.base_pickle and args.target_pickle: - baseline_res = prep_pickle_for_total(pd.read_pickle(args.base_pickle)) - target_res = prep_pickle_for_total(pd.read_pickle(args.target_pickle)) - - report_comparative(target_res, baseline_res) - sys.exit(0) - - if args.affinity is not None: - try: - import affinity - - affinity.set_process_affinity_mask(0,args.affinity) - assert affinity.get_process_affinity_mask(0) == args.affinity - print("CPU affinity set to %d" % args.affinity) - except ImportError: - print("-a/--afinity specified, but the 'affinity' module is not available, aborting.\n") - sys.exit(1) + try: + import affinity + affinity.set_process_affinity_mask(0,args.affinity) + assert affinity.get_process_affinity_mask(0) == args.affinity + print("CPU affinity set to %d" % args.affinity) + except ImportError: + import warnings + print("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"+ + "The 'affinity' module is not available, results may be unreliable\n" + + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n" + ) + time.sleep(2) print("\n") prprint("LOG_FILE = %s" % args.log_file) @@ -543,39 +479,10 @@ def inner(repo_path): if __name__ == '__main__': args = parser.parse_args() - if (not args.head - and not (args.base_commit and args.target_commit) - and not (args.base_pickle and args.target_pickle)): + if not args.head and (not args.base_commit and not args.target_commit): parser.print_help() - sys.exit(1) - elif ((args.base_pickle or args.target_pickle) and not - (args.base_pickle and args.target_pickle)): - print("Must specify Both --base-pickle and --target-pickle.") - sys.exit(1) - - if ((args.base_pickle or args.target_pickle) and not - (args.base_commit and args.target_commit)): - if not args.base_commit: - print("base_commit not specified, Assuming base_pickle is named <commit>-foo.*") - args.base_commit = args.base_pickle.split('-')[0] - if not args.target_commit: - print("target_commit not specified, Assuming target_pickle is named <commit>-foo.*") - args.target_commit = args.target_pickle.split('-')[0] - - import warnings - warnings.filterwarnings('ignore',category=FutureWarning) - warnings.filterwarnings('ignore',category=DeprecationWarning) - - if args.base_commit and args.target_commit: - print("Verifying specified commits exist in repo...") - r=git.Repo(VB_DIR) - for c in [ args.base_commit, args.target_commit ]: - try: - msg = r.commit(c).message.strip() - except git.BadObject: - print("The commit '%s' was not found, aborting" % c) - sys.exit(1) - else: - print("%s: %s" % (c,msg)) - - main() + else: + import warnings + warnings.filterwarnings('ignore',category=FutureWarning) + warnings.filterwarnings('ignore',category=DeprecationWarning) + main()
The travis network build cache (activated via commit message, for whitelisted commiters) was added a few months back. It worked well for a while, but ultimately it's seen little use in practive by commiters. While it was gratifying to see this actually worked, all the additional magic in ci/ doesn't pay for itself in practice, so I've decided to retire this unless there are strong objections and get a simpler ci/ in return. The build cache system got an upgrade as a side-effect of this work and `scripts/use_build_cache.py` remains a useful bit of kit. Not entirely wasted effort after all. xref https://github.com/pydata/pandas/pull/3383
https://api.github.com/repos/pandas-dev/pandas/pulls/3929
2013-06-17T00:41:30Z
2013-06-17T02:45:06Z
2013-06-17T02:45:06Z
2014-07-16T08:14:30Z
TST: Fix error in assert_produces_warning.
diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 20e59b6d3342a..c297cfa554fa5 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -790,5 +790,5 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always"): if expected_warning: assert saw_warning, ("Did not see expected warning of class %r." % expected_warning.__name__) - assert not extra_warnings, ("Caused unexpected warning(s): %r." - % extra_warnings) + assert not extra_warnings, ("Caused unexpected warning(s): %r." + % extra_warnings)
When I copied this over and sent to @cpcloud, I thought the indentation was wrong and changed it. Turned out I was wrong -- sorry about that! Now all the doctests pass...
https://api.github.com/repos/pandas-dev/pandas/pulls/3927
2013-06-16T20:25:20Z
2013-06-17T00:18:26Z
2013-06-17T00:18:26Z
2014-07-16T08:14:29Z
CLN: Remove unused Exceptions
diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 0a099661c58f1..92f69a7444aab 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -90,10 +90,6 @@ def panel_index(time, panels, names=['time', 'panel']): return MultiIndex(levels, labels, sortorder=None, names=names) -class PanelError(Exception): - pass - - def _arith_method(func, name): # work only for scalars diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 37141e37d965c..054363d8cda06 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -24,10 +24,6 @@ import pandas.parser as _parser from pandas.tseries.period import Period - -class DateConversionError(Exception): - pass - _parser_params = """Also supports optionally iterating or breaking of the file into chunks. @@ -1752,10 +1748,7 @@ def _try_convert_dates(parser, colspec, data_dict, columns): new_name = '_'.join([str(x) for x in colnames]) to_parse = [data_dict[c] for c in colnames if c in data_dict] - try: - new_col = parser(*to_parse) - except DateConversionError: - new_col = parser(_concat_date_cols(to_parse)) + new_col = parser(*to_parse) return new_name, new_col, colnames diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 1cb986ee6cd7c..46d8d0cc00795 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -87,10 +87,6 @@ def _ensure_datetime64(other): raise TypeError('%s type object %s' % (type(other), str(other))) -class TimeSeriesError(Exception): - pass - - _midnight = time(0, 0) class DatetimeIndex(Int64Index):
There are at least 3 `Exception`s that are _never_ raised anywhere, so they can be removed without issue. (There are other exceptions that probably should be removed or refactored, but this is just a basic commit to make sure that these don't get used going forward.) CLN: Remove 'DateConversionError' that is never raised anywhere CLN: Remove TimeSeriesError that was never used CLN: Remove unused 'PanelError'
https://api.github.com/repos/pandas-dev/pandas/pulls/3921
2013-06-16T04:52:12Z
2013-06-19T01:13:57Z
2013-06-19T01:13:57Z
2014-07-06T05:46:03Z
CLN: Fix CRLFs in repo + add .gitattributes
diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000..0ef16e42a0660 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,15 @@ +* text=auto +# enforce text on certain files +*.py text +*.pyx text +*.pyd text +*.c text +*.h text +*.html text +*.csv text +*.json text +*.pickle binary +*.h5 binary +*.dta binary +*.xls binary +*.xlsx binary diff --git a/doc/source/10min.rst b/doc/source/10min.rst index d1246dc223626..af84efb93bb5e 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -1,708 +1,708 @@ -.. _10min: - -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - import random - import os - np.random.seed(123456) - from pandas import * - import pandas as pd - randn = np.random.randn - randint = np.random.randint - np.set_printoptions(precision=4, suppress=True) - options.display.mpl_style='default' - - #### portions of this were borrowed from the - #### Pandas cheatsheet - #### created during the PyData Workshop-Sprint 2012 - #### Hannah Chen, Henry Chow, Eric Cox, Robert Mauriello - - -******************** -10 Minutes to Pandas -******************** - -This is a short introduction to pandas, geared mainly for new users. -You can see more complex recipes in the :ref:`Cookbook<cookbook>` - -Customarily, we import as follows - -.. ipython:: python - - import pandas as pd - import numpy as np - -Object Creation ---------------- - -See the :ref:`Data Structure Intro section <dsintro>` - -Creating a ``Series`` by passing a list of values, letting pandas create a default -integer index - -.. ipython:: python - - s = pd.Series([1,3,5,np.nan,6,8]) - s - -Creating a ``DataFrame`` by passing a numpy array, with a datetime index and labeled columns. - -.. ipython:: python - - dates = pd.date_range('20130101',periods=6) - dates - df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD')) - df - -Creating a ``DataFrame`` by passing a dict of objects that can be converted to series-like. - -.. ipython:: python - - df2 = pd.DataFrame({ 'A' : 1., - 'B' : pd.Timestamp('20130102'), - 'C' : pd.Series(1,index=range(4),dtype='float32'), - 'D' : np.array([3] * 4,dtype='int32'), - 'E' : 'foo' }) - df2 - -Having specific :ref:`dtypes <basics.dtypes>` - -.. ipython:: python - - df2.dtypes - -Viewing Data ------------- - -See the :ref:`Basics section <basics>` - -See the top & bottom rows of the frame - -.. ipython:: python - - df.head() - df.tail(3) - -Display the index,columns, and the underlying numpy data - -.. ipython:: python - - df.index - df.columns - df.values - -Describe shows a quick statistic summary of your data - -.. ipython:: python - - df.describe() - -Transposing your data - -.. ipython:: python - - df.T - -Sorting by an axis - -.. ipython:: python - - df.sort_index(axis=1, ascending=False) - -Sorting by values - -.. ipython:: python - - df.sort(columns='B') - -Selection ---------- - -.. note:: - - While standard Python / Numpy expressions for selecting and setting are - intuitive and come in handy for interactive work, for production code, we - recommend the optimized pandas data access methods, ``.at``, ``.iat``, - ``.loc``, ``.iloc`` and ``.ix``. - -See the :ref:`Indexing section <indexing>` and below. - -Getting -~~~~~~~ - -Selecting a single column, which yields a ``Series``, -equivalent to ``df.A`` - -.. ipython:: python - - df['A'] - -Selecting via ``[]``, which slices the rows. - -.. ipython:: python - - df[0:3] - df['20130102':'20130104'] - -Selection by Label -~~~~~~~~~~~~~~~~~~ - -See more in :ref:`Selection by Label <indexing.label>` - -For getting a cross section using a label - -.. ipython:: python - - df.loc[dates[0]] - -Selecting on a multi-axis by label - -.. ipython:: python - - df.loc[:,['A','B']] - -Showing label slicing, both endpoints are *included* - -.. ipython:: python - - df.loc['20130102':'20130104',['A','B']] - -Reduction in the dimensions of the returned object - -.. ipython:: python - - df.loc['20130102',['A','B']] - -For getting a scalar value - -.. ipython:: python - - df.loc[dates[0],'A'] - -For getting fast access to a scalar (equiv to the prior method) - -.. ipython:: python - - df.at[dates[0],'A'] - -Selection by Position -~~~~~~~~~~~~~~~~~~~~~ - -See more in :ref:`Selection by Position <indexing.integer>` - -Select via the position of the passed integers - -.. ipython:: python - - df.iloc[3] - -By integer slices, acting similar to numpy/python - -.. ipython:: python - - df.iloc[3:5,0:2] - -By lists of integer position locations, similar to the numpy/python style - -.. ipython:: python - - df.iloc[[1,2,4],[0,2]] - -For slicing rows explicitly - -.. ipython:: python - - df.iloc[1:3,:] - -For slicing columns explicitly - -.. ipython:: python - - df.iloc[:,1:3] - -For getting a value explicity - -.. ipython:: python - - df.iloc[1,1] - -For getting fast access to a scalar (equiv to the prior method) - -.. ipython:: python - - df.iat[1,1] - -There is one signficant departure from standard python/numpy slicing semantics. -python/numpy allow slicing past the end of an array without an associated -error. - -.. ipython:: python - - # these are allowed in python/numpy. - x = list('abcdef') - x[4:10] - x[8:10] - -Pandas will detect this and raise ``IndexError``, rather than return an empty -structure. - -:: - - >>> df.iloc[:,8:10] - IndexError: out-of-bounds on slice (end) - -Boolean Indexing -~~~~~~~~~~~~~~~~ - -Using a single column's values to select data. - -.. ipython:: python - - df[df.A > 0] - -A ``where`` operation for getting. - -.. ipython:: python - - df[df > 0] - - -Setting -~~~~~~~ - -Setting a new column automatically aligns the data -by the indexes - -.. ipython:: python - - s1 = pd.Series([1,2,3,4,5,6],index=date_range('20130102',periods=6)) - s1 - df['F'] = s1 - -Setting values by label - -.. ipython:: python - - df.at[dates[0],'A'] = 0 - -Setting values by position - -.. ipython:: python - - df.iat[0,1] = 0 - -Setting by assigning with a numpy array - -.. ipython:: python - - df.loc[:,'D'] = np.array([5] * len(df)) - -The result of the prior setting operations - -.. ipython:: python - - df - -A ``where`` operation with setting. - -.. ipython:: python - - df2 = df.copy() - df2[df2 > 0] = -df2 - df2 - - -Missing Data ------------- - -Pandas primarily uses the value ``np.nan`` to represent missing data. It is by -default not included in computations. See the :ref:`Missing Data section -<missing_data>` - -Reindexing allows you to change/add/delete the index on a specified axis. This -returns a copy of the data. - -.. ipython:: python - - df1 = df.reindex(index=dates[0:4],columns=list(df.columns) + ['E']) - df1.loc[dates[0]:dates[1],'E'] = 1 - df1 - -To drop any rows that have missing data. - -.. ipython:: python - - df1.dropna(how='any') - -Filling missing data - -.. ipython:: python - - df1.fillna(value=5) - -To get the boolean mask where values are ``nan`` - -.. ipython:: python - - pd.isnull(df1) - - -Operations ----------- - -See the :ref:`Basic section on Binary Ops <basics.binop>` - -Stats -~~~~~ - -Operations in general *exclude* missing data. - -Performing a descriptive statistic - -.. ipython:: python - - df.mean() - -Same operation on the other axis - -.. ipython:: python - - df.mean(1) - -Operating with objects that have different dimensionality and need alignment. -In addition, pandas automatically broadcasts along the specified dimension. - -.. ipython:: python - - s = pd.Series([1,3,5,np.nan,6,8],index=dates).shift(2) - s - df.sub(s,axis='index') - - -Apply -~~~~~ - -Applying functions to the data - -.. ipython:: python - - df.apply(np.cumsum) - df.apply(lambda x: x.max() - x.min()) - -Histogramming -~~~~~~~~~~~~~ - -See more at :ref:`Histogramming and Discretization <basics.discretization>` - -.. ipython:: python - - s = Series(np.random.randint(0,7,size=10)) - s - s.value_counts() - -String Methods -~~~~~~~~~~~~~~ - -See more at :ref:`Vectorized String Methods <basics.string_methods>` - -.. ipython:: python - - s = Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) - s.str.lower() - -Merge ------ - -Concat -~~~~~~ - -Pandas provides various facilities for easily combining together Series, -DataFrame, and Panel objects with various kinds of set logic for the indexes -and relational algebra functionality in the case of join / merge-type -operations. - -See the :ref:`Merging section <merging>` - -Concatenating pandas objects together - -.. ipython:: python - - df = pd.DataFrame(np.random.randn(10, 4)) - df - - # break it into pieces - pieces = [df[:3], df[3:7], df[7:]] - - concat(pieces) - -Join -~~~~ - -SQL style merges. See the :ref:`Database style joining <merging.join>` - -.. ipython:: python - - left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]}) - right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]}) - left - right - merge(left, right, on='key') - -Append -~~~~~~ - -Append rows to a dataframe. See the :ref:`Appending <merging.concatenation>` - -.. ipython:: python - - df = pd.DataFrame(np.random.randn(8, 4), columns=['A','B','C','D']) - df - s = df.iloc[3] - df.append(s, ignore_index=True) - - -Grouping --------- - -By "group by" we are referring to a process involving one or more of the -following steps - - - **Splitting** the data into groups based on some criteria - - **Applying** a function to each group independently - - **Combining** the results into a data structure - -See the :ref:`Grouping section <groupby>` - -.. ipython:: python - - df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B' : ['one', 'one', 'two', 'three', - 'two', 'two', 'one', 'three'], - 'C' : randn(8), 'D' : randn(8)}) - df - -Grouping and then applying a function ``sum`` to the resulting groups. - -.. ipython:: python - - df.groupby('A').sum() - -Grouping by multiple columns forms a hierarchical index, which we then apply -the function. - -.. ipython:: python - - df.groupby(['A','B']).sum() - -Reshaping ---------- - -See the section on :ref:`Hierarchical Indexing <indexing.hierarchical>` and -see the section on :ref:`Reshaping <reshaping.stacking>`). - -Stack -~~~~~ - -.. ipython:: python - - tuples = zip(*[['bar', 'bar', 'baz', 'baz', - 'foo', 'foo', 'qux', 'qux'], - ['one', 'two', 'one', 'two', - 'one', 'two', 'one', 'two']]) - index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second']) - df = pd.DataFrame(randn(8, 2), index=index, columns=['A', 'B']) - df2 = df[:4] - df2 - -The ``stack`` function "compresses" a level in the DataFrame's columns. - -.. ipython:: python - - stacked = df2.stack() - stacked - -With a "stacked" DataFrame or Series (having a ``MultiIndex`` as the -``index``), the inverse operation of ``stack`` is ``unstack``, which by default -unstacks the **last level**: - -.. ipython:: python - - stacked.unstack() - stacked.unstack(1) - stacked.unstack(0) - -Pivot Tables -~~~~~~~~~~~~ -See the section on :ref:`Pivot Tables <reshaping.pivot>`. - -.. ipython:: python - - df = DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3, - 'B' : ['A', 'B', 'C'] * 4, - 'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2, - 'D' : np.random.randn(12), - 'E' : np.random.randn(12)}) - df - -We can produce pivot tables from this data very easily: - -.. ipython:: python - - pivot_table(df, values='D', rows=['A', 'B'], cols=['C']) - - -Time Series ------------ - -Pandas has simple, powerful, and efficient functionality for performing -resampling operations during frequency conversion (e.g., converting secondly -data into 5-minutely data). This is extremely common in, but not limited to, -financial applications. See the :ref:`Time Series section <timeseries>` - -.. ipython:: python - - rng = pd.date_range('1/1/2012', periods=100, freq='S') - ts = pd.Series(randint(0, 500, len(rng)), index=rng) - ts.resample('5Min', how='sum') - -Time zone representation - -.. ipython:: python - - rng = pd.date_range('3/6/2012 00:00', periods=5, freq='D') - ts = pd.Series(randn(len(rng)), rng) - ts_utc = ts.tz_localize('UTC') - ts_utc - -Convert to another time zone - -.. ipython:: python - - ts_utc.tz_convert('US/Eastern') - -Converting between time span representations - -.. ipython:: python - - rng = pd.date_range('1/1/2012', periods=5, freq='M') - ts = pd.Series(randn(len(rng)), index=rng) - ts - ps = ts.to_period() - ps - ps.to_timestamp() - -Converting between period and timestamp enables some convenient arithmetic -functions to be used. In the following example, we convert a quarterly -frequency with year ending in November to 9am of the end of the month following -the quarter end: - -.. ipython:: python - - prng = period_range('1990Q1', '2000Q4', freq='Q-NOV') - ts = Series(randn(len(prng)), prng) - ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9 - ts.head() - - -Plotting --------- - -:ref:`Plotting <visualization>` docs. - -.. ipython:: python - :suppress: - - import matplotlib.pyplot as plt - plt.close('all') - options.display.mpl_style='default' - -.. ipython:: python - - ts = pd.Series(randn(1000), index=pd.date_range('1/1/2000', periods=1000)) - ts = ts.cumsum() - - @savefig series_plot_basic.png width=6in - ts.plot() - -On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: - -.. ipython:: python - - df = pd.DataFrame(randn(1000, 4), index=ts.index, - columns=['A', 'B', 'C', 'D']) - df = df.cumsum() - - @savefig frame_plot_basic.png width=6in - plt.figure(); df.plot(); plt.legend(loc='best') - -Getting Data In/Out -------------------- - -CSV -~~~ - -:ref:`Writing to a csv file <io.store_in_csv>` - -.. ipython:: python - - df.to_csv('foo.csv') - -:ref:`Reading from a csv file <io.read_csv_table>` - -.. ipython:: python - - pd.read_csv('foo.csv') - -.. ipython:: python - :suppress: - - os.remove('foo.csv') - -HDF5 -~~~~ - -Reading and writing to :ref:`HDFStores <io.hdf5>` - -Writing to a HDF5 Store - -.. ipython:: python - - df.to_hdf('foo.h5','df') - -Reading from a HDF5 Store - -.. ipython:: python - - read_hdf('foo.h5','df') - -.. ipython:: python - :suppress: - - os.remove('foo.h5') - -Excel -~~~~~ - -Reading and writing to :ref:`MS Excel <io.excel>` - -Writing to an excel file - -.. ipython:: python - - df.to_excel('foo.xlsx', sheet_name='sheet1') - -Reading from an excel file - -.. ipython:: python - - read_excel('foo.xlsx', 'sheet1', index_col=None, na_values=['NA']) - -.. ipython:: python - :suppress: - - os.remove('foo.xlsx') +.. _10min: + +.. currentmodule:: pandas + +.. ipython:: python + :suppress: + + import numpy as np + import random + import os + np.random.seed(123456) + from pandas import * + import pandas as pd + randn = np.random.randn + randint = np.random.randint + np.set_printoptions(precision=4, suppress=True) + options.display.mpl_style='default' + + #### portions of this were borrowed from the + #### Pandas cheatsheet + #### created during the PyData Workshop-Sprint 2012 + #### Hannah Chen, Henry Chow, Eric Cox, Robert Mauriello + + +******************** +10 Minutes to Pandas +******************** + +This is a short introduction to pandas, geared mainly for new users. +You can see more complex recipes in the :ref:`Cookbook<cookbook>` + +Customarily, we import as follows + +.. ipython:: python + + import pandas as pd + import numpy as np + +Object Creation +--------------- + +See the :ref:`Data Structure Intro section <dsintro>` + +Creating a ``Series`` by passing a list of values, letting pandas create a default +integer index + +.. ipython:: python + + s = pd.Series([1,3,5,np.nan,6,8]) + s + +Creating a ``DataFrame`` by passing a numpy array, with a datetime index and labeled columns. + +.. ipython:: python + + dates = pd.date_range('20130101',periods=6) + dates + df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD')) + df + +Creating a ``DataFrame`` by passing a dict of objects that can be converted to series-like. + +.. ipython:: python + + df2 = pd.DataFrame({ 'A' : 1., + 'B' : pd.Timestamp('20130102'), + 'C' : pd.Series(1,index=range(4),dtype='float32'), + 'D' : np.array([3] * 4,dtype='int32'), + 'E' : 'foo' }) + df2 + +Having specific :ref:`dtypes <basics.dtypes>` + +.. ipython:: python + + df2.dtypes + +Viewing Data +------------ + +See the :ref:`Basics section <basics>` + +See the top & bottom rows of the frame + +.. ipython:: python + + df.head() + df.tail(3) + +Display the index,columns, and the underlying numpy data + +.. ipython:: python + + df.index + df.columns + df.values + +Describe shows a quick statistic summary of your data + +.. ipython:: python + + df.describe() + +Transposing your data + +.. ipython:: python + + df.T + +Sorting by an axis + +.. ipython:: python + + df.sort_index(axis=1, ascending=False) + +Sorting by values + +.. ipython:: python + + df.sort(columns='B') + +Selection +--------- + +.. note:: + + While standard Python / Numpy expressions for selecting and setting are + intuitive and come in handy for interactive work, for production code, we + recommend the optimized pandas data access methods, ``.at``, ``.iat``, + ``.loc``, ``.iloc`` and ``.ix``. + +See the :ref:`Indexing section <indexing>` and below. + +Getting +~~~~~~~ + +Selecting a single column, which yields a ``Series``, +equivalent to ``df.A`` + +.. ipython:: python + + df['A'] + +Selecting via ``[]``, which slices the rows. + +.. ipython:: python + + df[0:3] + df['20130102':'20130104'] + +Selection by Label +~~~~~~~~~~~~~~~~~~ + +See more in :ref:`Selection by Label <indexing.label>` + +For getting a cross section using a label + +.. ipython:: python + + df.loc[dates[0]] + +Selecting on a multi-axis by label + +.. ipython:: python + + df.loc[:,['A','B']] + +Showing label slicing, both endpoints are *included* + +.. ipython:: python + + df.loc['20130102':'20130104',['A','B']] + +Reduction in the dimensions of the returned object + +.. ipython:: python + + df.loc['20130102',['A','B']] + +For getting a scalar value + +.. ipython:: python + + df.loc[dates[0],'A'] + +For getting fast access to a scalar (equiv to the prior method) + +.. ipython:: python + + df.at[dates[0],'A'] + +Selection by Position +~~~~~~~~~~~~~~~~~~~~~ + +See more in :ref:`Selection by Position <indexing.integer>` + +Select via the position of the passed integers + +.. ipython:: python + + df.iloc[3] + +By integer slices, acting similar to numpy/python + +.. ipython:: python + + df.iloc[3:5,0:2] + +By lists of integer position locations, similar to the numpy/python style + +.. ipython:: python + + df.iloc[[1,2,4],[0,2]] + +For slicing rows explicitly + +.. ipython:: python + + df.iloc[1:3,:] + +For slicing columns explicitly + +.. ipython:: python + + df.iloc[:,1:3] + +For getting a value explicity + +.. ipython:: python + + df.iloc[1,1] + +For getting fast access to a scalar (equiv to the prior method) + +.. ipython:: python + + df.iat[1,1] + +There is one signficant departure from standard python/numpy slicing semantics. +python/numpy allow slicing past the end of an array without an associated +error. + +.. ipython:: python + + # these are allowed in python/numpy. + x = list('abcdef') + x[4:10] + x[8:10] + +Pandas will detect this and raise ``IndexError``, rather than return an empty +structure. + +:: + + >>> df.iloc[:,8:10] + IndexError: out-of-bounds on slice (end) + +Boolean Indexing +~~~~~~~~~~~~~~~~ + +Using a single column's values to select data. + +.. ipython:: python + + df[df.A > 0] + +A ``where`` operation for getting. + +.. ipython:: python + + df[df > 0] + + +Setting +~~~~~~~ + +Setting a new column automatically aligns the data +by the indexes + +.. ipython:: python + + s1 = pd.Series([1,2,3,4,5,6],index=date_range('20130102',periods=6)) + s1 + df['F'] = s1 + +Setting values by label + +.. ipython:: python + + df.at[dates[0],'A'] = 0 + +Setting values by position + +.. ipython:: python + + df.iat[0,1] = 0 + +Setting by assigning with a numpy array + +.. ipython:: python + + df.loc[:,'D'] = np.array([5] * len(df)) + +The result of the prior setting operations + +.. ipython:: python + + df + +A ``where`` operation with setting. + +.. ipython:: python + + df2 = df.copy() + df2[df2 > 0] = -df2 + df2 + + +Missing Data +------------ + +Pandas primarily uses the value ``np.nan`` to represent missing data. It is by +default not included in computations. See the :ref:`Missing Data section +<missing_data>` + +Reindexing allows you to change/add/delete the index on a specified axis. This +returns a copy of the data. + +.. ipython:: python + + df1 = df.reindex(index=dates[0:4],columns=list(df.columns) + ['E']) + df1.loc[dates[0]:dates[1],'E'] = 1 + df1 + +To drop any rows that have missing data. + +.. ipython:: python + + df1.dropna(how='any') + +Filling missing data + +.. ipython:: python + + df1.fillna(value=5) + +To get the boolean mask where values are ``nan`` + +.. ipython:: python + + pd.isnull(df1) + + +Operations +---------- + +See the :ref:`Basic section on Binary Ops <basics.binop>` + +Stats +~~~~~ + +Operations in general *exclude* missing data. + +Performing a descriptive statistic + +.. ipython:: python + + df.mean() + +Same operation on the other axis + +.. ipython:: python + + df.mean(1) + +Operating with objects that have different dimensionality and need alignment. +In addition, pandas automatically broadcasts along the specified dimension. + +.. ipython:: python + + s = pd.Series([1,3,5,np.nan,6,8],index=dates).shift(2) + s + df.sub(s,axis='index') + + +Apply +~~~~~ + +Applying functions to the data + +.. ipython:: python + + df.apply(np.cumsum) + df.apply(lambda x: x.max() - x.min()) + +Histogramming +~~~~~~~~~~~~~ + +See more at :ref:`Histogramming and Discretization <basics.discretization>` + +.. ipython:: python + + s = Series(np.random.randint(0,7,size=10)) + s + s.value_counts() + +String Methods +~~~~~~~~~~~~~~ + +See more at :ref:`Vectorized String Methods <basics.string_methods>` + +.. ipython:: python + + s = Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) + s.str.lower() + +Merge +----- + +Concat +~~~~~~ + +Pandas provides various facilities for easily combining together Series, +DataFrame, and Panel objects with various kinds of set logic for the indexes +and relational algebra functionality in the case of join / merge-type +operations. + +See the :ref:`Merging section <merging>` + +Concatenating pandas objects together + +.. ipython:: python + + df = pd.DataFrame(np.random.randn(10, 4)) + df + + # break it into pieces + pieces = [df[:3], df[3:7], df[7:]] + + concat(pieces) + +Join +~~~~ + +SQL style merges. See the :ref:`Database style joining <merging.join>` + +.. ipython:: python + + left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]}) + right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]}) + left + right + merge(left, right, on='key') + +Append +~~~~~~ + +Append rows to a dataframe. See the :ref:`Appending <merging.concatenation>` + +.. ipython:: python + + df = pd.DataFrame(np.random.randn(8, 4), columns=['A','B','C','D']) + df + s = df.iloc[3] + df.append(s, ignore_index=True) + + +Grouping +-------- + +By "group by" we are referring to a process involving one or more of the +following steps + + - **Splitting** the data into groups based on some criteria + - **Applying** a function to each group independently + - **Combining** the results into a data structure + +See the :ref:`Grouping section <groupby>` + +.. ipython:: python + + df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'foo', 'foo'], + 'B' : ['one', 'one', 'two', 'three', + 'two', 'two', 'one', 'three'], + 'C' : randn(8), 'D' : randn(8)}) + df + +Grouping and then applying a function ``sum`` to the resulting groups. + +.. ipython:: python + + df.groupby('A').sum() + +Grouping by multiple columns forms a hierarchical index, which we then apply +the function. + +.. ipython:: python + + df.groupby(['A','B']).sum() + +Reshaping +--------- + +See the section on :ref:`Hierarchical Indexing <indexing.hierarchical>` and +see the section on :ref:`Reshaping <reshaping.stacking>`). + +Stack +~~~~~ + +.. ipython:: python + + tuples = zip(*[['bar', 'bar', 'baz', 'baz', + 'foo', 'foo', 'qux', 'qux'], + ['one', 'two', 'one', 'two', + 'one', 'two', 'one', 'two']]) + index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second']) + df = pd.DataFrame(randn(8, 2), index=index, columns=['A', 'B']) + df2 = df[:4] + df2 + +The ``stack`` function "compresses" a level in the DataFrame's columns. + +.. ipython:: python + + stacked = df2.stack() + stacked + +With a "stacked" DataFrame or Series (having a ``MultiIndex`` as the +``index``), the inverse operation of ``stack`` is ``unstack``, which by default +unstacks the **last level**: + +.. ipython:: python + + stacked.unstack() + stacked.unstack(1) + stacked.unstack(0) + +Pivot Tables +~~~~~~~~~~~~ +See the section on :ref:`Pivot Tables <reshaping.pivot>`. + +.. ipython:: python + + df = DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3, + 'B' : ['A', 'B', 'C'] * 4, + 'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2, + 'D' : np.random.randn(12), + 'E' : np.random.randn(12)}) + df + +We can produce pivot tables from this data very easily: + +.. ipython:: python + + pivot_table(df, values='D', rows=['A', 'B'], cols=['C']) + + +Time Series +----------- + +Pandas has simple, powerful, and efficient functionality for performing +resampling operations during frequency conversion (e.g., converting secondly +data into 5-minutely data). This is extremely common in, but not limited to, +financial applications. See the :ref:`Time Series section <timeseries>` + +.. ipython:: python + + rng = pd.date_range('1/1/2012', periods=100, freq='S') + ts = pd.Series(randint(0, 500, len(rng)), index=rng) + ts.resample('5Min', how='sum') + +Time zone representation + +.. ipython:: python + + rng = pd.date_range('3/6/2012 00:00', periods=5, freq='D') + ts = pd.Series(randn(len(rng)), rng) + ts_utc = ts.tz_localize('UTC') + ts_utc + +Convert to another time zone + +.. ipython:: python + + ts_utc.tz_convert('US/Eastern') + +Converting between time span representations + +.. ipython:: python + + rng = pd.date_range('1/1/2012', periods=5, freq='M') + ts = pd.Series(randn(len(rng)), index=rng) + ts + ps = ts.to_period() + ps + ps.to_timestamp() + +Converting between period and timestamp enables some convenient arithmetic +functions to be used. In the following example, we convert a quarterly +frequency with year ending in November to 9am of the end of the month following +the quarter end: + +.. ipython:: python + + prng = period_range('1990Q1', '2000Q4', freq='Q-NOV') + ts = Series(randn(len(prng)), prng) + ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9 + ts.head() + + +Plotting +-------- + +:ref:`Plotting <visualization>` docs. + +.. ipython:: python + :suppress: + + import matplotlib.pyplot as plt + plt.close('all') + options.display.mpl_style='default' + +.. ipython:: python + + ts = pd.Series(randn(1000), index=pd.date_range('1/1/2000', periods=1000)) + ts = ts.cumsum() + + @savefig series_plot_basic.png width=6in + ts.plot() + +On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: + +.. ipython:: python + + df = pd.DataFrame(randn(1000, 4), index=ts.index, + columns=['A', 'B', 'C', 'D']) + df = df.cumsum() + + @savefig frame_plot_basic.png width=6in + plt.figure(); df.plot(); plt.legend(loc='best') + +Getting Data In/Out +------------------- + +CSV +~~~ + +:ref:`Writing to a csv file <io.store_in_csv>` + +.. ipython:: python + + df.to_csv('foo.csv') + +:ref:`Reading from a csv file <io.read_csv_table>` + +.. ipython:: python + + pd.read_csv('foo.csv') + +.. ipython:: python + :suppress: + + os.remove('foo.csv') + +HDF5 +~~~~ + +Reading and writing to :ref:`HDFStores <io.hdf5>` + +Writing to a HDF5 Store + +.. ipython:: python + + df.to_hdf('foo.h5','df') + +Reading from a HDF5 Store + +.. ipython:: python + + read_hdf('foo.h5','df') + +.. ipython:: python + :suppress: + + os.remove('foo.h5') + +Excel +~~~~~ + +Reading and writing to :ref:`MS Excel <io.excel>` + +Writing to an excel file + +.. ipython:: python + + df.to_excel('foo.xlsx', sheet_name='sheet1') + +Reading from an excel file + +.. ipython:: python + + read_excel('foo.xlsx', 'sheet1', index_col=None, na_values=['NA']) + +.. ipython:: python + :suppress: + + os.remove('foo.xlsx') diff --git a/pandas/core/expressions.py b/pandas/core/expressions.py index de93394872e12..34e56fe576a07 100644 --- a/pandas/core/expressions.py +++ b/pandas/core/expressions.py @@ -1,180 +1,180 @@ -""" -Expressions ------------ - -Offer fast expression evaluation thru numexpr - -""" -import numpy as np - -try: - import numexpr as ne - _NUMEXPR_INSTALLED = True -except ImportError: # pragma: no cover - _NUMEXPR_INSTALLED = False - -_USE_NUMEXPR = _NUMEXPR_INSTALLED -_evaluate = None -_where = None - -# the set of dtypes that we will allow pass to numexpr -_ALLOWED_DTYPES = dict(evaluate = set(['int64','int32','float64','float32','bool']), - where = set(['int64','float64','bool'])) - -# the minimum prod shape that we will use numexpr -_MIN_ELEMENTS = 10000 - -def set_use_numexpr(v = True): - # set/unset to use numexpr - global _USE_NUMEXPR - if _NUMEXPR_INSTALLED: - _USE_NUMEXPR = v - - # choose what we are going to do - global _evaluate, _where - if not _USE_NUMEXPR: - _evaluate = _evaluate_standard - _where = _where_standard - else: - _evaluate = _evaluate_numexpr - _where = _where_numexpr - -def set_numexpr_threads(n = None): - # if we are using numexpr, set the threads to n - # otherwise reset - try: - if _NUMEXPR_INSTALLED and _USE_NUMEXPR: - if n is None: - n = ne.detect_number_of_cores() - ne.set_num_threads(n) - except: - pass - - -def _evaluate_standard(op, op_str, a, b, raise_on_error=True): - """ standard evaluation """ - return op(a,b) - -def _can_use_numexpr(op, op_str, a, b, dtype_check): - """ return a boolean if we WILL be using numexpr """ - if op_str is not None: - - # required min elements (otherwise we are adding overhead) - if np.prod(a.shape) > _MIN_ELEMENTS: - - # check for dtype compatiblity - dtypes = set() - for o in [ a, b ]: - if hasattr(o,'get_dtype_counts'): - s = o.get_dtype_counts() - if len(s) > 1: - return False - dtypes |= set(s.index) - elif isinstance(o,np.ndarray): - dtypes |= set([o.dtype.name]) - - # allowed are a superset - if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes: - return True - - return False - -def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False): - result = None - - if _can_use_numexpr(op, op_str, a, b, 'evaluate'): - try: - a_value, b_value = a, b - if hasattr(a_value,'values'): - a_value = a_value.values - if hasattr(b_value,'values'): - b_value = b_value.values - result = ne.evaluate('a_value %s b_value' % op_str, - local_dict={ 'a_value' : a_value, - 'b_value' : b_value }, - casting='safe') - except (ValueError), detail: - if 'unknown type object' in str(detail): - pass - except (Exception), detail: - if raise_on_error: - raise TypeError(str(detail)) - - if result is None: - result = _evaluate_standard(op,op_str,a,b,raise_on_error) - - return result - -def _where_standard(cond, a, b, raise_on_error=True): - return np.where(cond, a, b) - -def _where_numexpr(cond, a, b, raise_on_error = False): - result = None - - if _can_use_numexpr(None, 'where', a, b, 'where'): - - try: - cond_value, a_value, b_value = cond, a, b - if hasattr(cond_value,'values'): - cond_value = cond_value.values - if hasattr(a_value,'values'): - a_value = a_value.values - if hasattr(b_value,'values'): - b_value = b_value.values - result = ne.evaluate('where(cond_value,a_value,b_value)', - local_dict={ 'cond_value' : cond_value, - 'a_value' : a_value, - 'b_value' : b_value }, - casting='safe') - except (ValueError), detail: - if 'unknown type object' in str(detail): - pass - except (Exception), detail: - if raise_on_error: - raise TypeError(str(detail)) - - if result is None: - result = _where_standard(cond,a,b,raise_on_error) - - return result - - -# turn myself on -set_use_numexpr(True) - -def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True): - """ evaluate and return the expression of the op on a and b - - Parameters - ---------- - - op : the actual operand - op_str: the string version of the op - a : left operand - b : right operand - raise_on_error : pass the error to the higher level if indicated (default is False), - otherwise evaluate the op with and return the results - use_numexpr : whether to try to use numexpr (default True) - """ - - if use_numexpr: - return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error) - return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error) - -def where(cond, a, b, raise_on_error=False, use_numexpr=True): - """ evaluate the where condition cond on a and b - - Parameters - ---------- - - cond : a boolean array - a : return if cond is True - b : return if cond is False - raise_on_error : pass the error to the higher level if indicated (default is False), - otherwise evaluate the op with and return the results - use_numexpr : whether to try to use numexpr (default True) - """ - - if use_numexpr: - return _where(cond, a, b, raise_on_error=raise_on_error) - return _where_standard(cond, a, b, raise_on_error=raise_on_error) +""" +Expressions +----------- + +Offer fast expression evaluation thru numexpr + +""" +import numpy as np + +try: + import numexpr as ne + _NUMEXPR_INSTALLED = True +except ImportError: # pragma: no cover + _NUMEXPR_INSTALLED = False + +_USE_NUMEXPR = _NUMEXPR_INSTALLED +_evaluate = None +_where = None + +# the set of dtypes that we will allow pass to numexpr +_ALLOWED_DTYPES = dict(evaluate = set(['int64','int32','float64','float32','bool']), + where = set(['int64','float64','bool'])) + +# the minimum prod shape that we will use numexpr +_MIN_ELEMENTS = 10000 + +def set_use_numexpr(v = True): + # set/unset to use numexpr + global _USE_NUMEXPR + if _NUMEXPR_INSTALLED: + _USE_NUMEXPR = v + + # choose what we are going to do + global _evaluate, _where + if not _USE_NUMEXPR: + _evaluate = _evaluate_standard + _where = _where_standard + else: + _evaluate = _evaluate_numexpr + _where = _where_numexpr + +def set_numexpr_threads(n = None): + # if we are using numexpr, set the threads to n + # otherwise reset + try: + if _NUMEXPR_INSTALLED and _USE_NUMEXPR: + if n is None: + n = ne.detect_number_of_cores() + ne.set_num_threads(n) + except: + pass + + +def _evaluate_standard(op, op_str, a, b, raise_on_error=True): + """ standard evaluation """ + return op(a,b) + +def _can_use_numexpr(op, op_str, a, b, dtype_check): + """ return a boolean if we WILL be using numexpr """ + if op_str is not None: + + # required min elements (otherwise we are adding overhead) + if np.prod(a.shape) > _MIN_ELEMENTS: + + # check for dtype compatiblity + dtypes = set() + for o in [ a, b ]: + if hasattr(o,'get_dtype_counts'): + s = o.get_dtype_counts() + if len(s) > 1: + return False + dtypes |= set(s.index) + elif isinstance(o,np.ndarray): + dtypes |= set([o.dtype.name]) + + # allowed are a superset + if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes: + return True + + return False + +def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False): + result = None + + if _can_use_numexpr(op, op_str, a, b, 'evaluate'): + try: + a_value, b_value = a, b + if hasattr(a_value,'values'): + a_value = a_value.values + if hasattr(b_value,'values'): + b_value = b_value.values + result = ne.evaluate('a_value %s b_value' % op_str, + local_dict={ 'a_value' : a_value, + 'b_value' : b_value }, + casting='safe') + except (ValueError), detail: + if 'unknown type object' in str(detail): + pass + except (Exception), detail: + if raise_on_error: + raise TypeError(str(detail)) + + if result is None: + result = _evaluate_standard(op,op_str,a,b,raise_on_error) + + return result + +def _where_standard(cond, a, b, raise_on_error=True): + return np.where(cond, a, b) + +def _where_numexpr(cond, a, b, raise_on_error = False): + result = None + + if _can_use_numexpr(None, 'where', a, b, 'where'): + + try: + cond_value, a_value, b_value = cond, a, b + if hasattr(cond_value,'values'): + cond_value = cond_value.values + if hasattr(a_value,'values'): + a_value = a_value.values + if hasattr(b_value,'values'): + b_value = b_value.values + result = ne.evaluate('where(cond_value,a_value,b_value)', + local_dict={ 'cond_value' : cond_value, + 'a_value' : a_value, + 'b_value' : b_value }, + casting='safe') + except (ValueError), detail: + if 'unknown type object' in str(detail): + pass + except (Exception), detail: + if raise_on_error: + raise TypeError(str(detail)) + + if result is None: + result = _where_standard(cond,a,b,raise_on_error) + + return result + + +# turn myself on +set_use_numexpr(True) + +def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True): + """ evaluate and return the expression of the op on a and b + + Parameters + ---------- + + op : the actual operand + op_str: the string version of the op + a : left operand + b : right operand + raise_on_error : pass the error to the higher level if indicated (default is False), + otherwise evaluate the op with and return the results + use_numexpr : whether to try to use numexpr (default True) + """ + + if use_numexpr: + return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error) + return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error) + +def where(cond, a, b, raise_on_error=False, use_numexpr=True): + """ evaluate the where condition cond on a and b + + Parameters + ---------- + + cond : a boolean array + a : return if cond is True + b : return if cond is False + raise_on_error : pass the error to the higher level if indicated (default is False), + otherwise evaluate the op with and return the results + use_numexpr : whether to try to use numexpr (default True) + """ + + if use_numexpr: + return _where(cond, a, b, raise_on_error=raise_on_error) + return _where_standard(cond, a, b, raise_on_error=raise_on_error) diff --git a/pandas/io/tests/data/banklist.csv b/pandas/io/tests/data/banklist.csv index 85cebb56f6adf..e7900830140d2 100644 --- a/pandas/io/tests/data/banklist.csv +++ b/pandas/io/tests/data/banklist.csv @@ -1,507 +1,507 @@ -Bank Name,City,ST,CERT,Acquiring Institution,Closing Date,Updated Date -Banks of Wisconsin d/b/a Bank of Kenosha,Kenosha,WI,35386,"North Shore Bank, FSB",31-May-13,31-May-13 -Central Arizona Bank,Scottsdale,AZ,34527,Western State Bank,14-May-13,20-May-13 -Sunrise Bank,Valdosta,GA,58185,Synovus Bank,10-May-13,21-May-13 -Pisgah Community Bank,Asheville,NC,58701,"Capital Bank, N.A.",10-May-13,14-May-13 -Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,16-May-13 -Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,17-May-13 -Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,16-May-13 -Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,16-May-13 -First Federal Bank,Lexington,KY,29594,Your Community Bank,19-Apr-13,23-Apr-13 -Gold Canyon Bank,Gold Canyon,AZ,58066,"First Scottsdale Bank, National Association",5-Apr-13,9-Apr-13 -Frontier Bank,LaGrange,GA,16431,HeritageBank of the South,8-Mar-13,26-Mar-13 -Covenant Bank,Chicago,IL,22476,Liberty Bank and Trust Company,15-Feb-13,4-Mar-13 -1st Regents Bank,Andover,MN,57157,First Minnesota Bank,18-Jan-13,28-Feb-13 -Westside Community Bank,University Place,WA,33997,Sunwest Bank,11-Jan-13,24-Jan-13 -Community Bank of the Ozarks,Sunrise Beach,MO,27331,Bank of Sullivan,14-Dec-12,24-Jan-13 -Hometown Community Bank,Braselton,GA,57928,"CertusBank, National Association",16-Nov-12,24-Jan-13 -Citizens First National Bank,Princeton,IL,3731,Heartland Bank and Trust Company,2-Nov-12,24-Jan-13 -Heritage Bank of Florida,Lutz,FL,35009,Centennial Bank,2-Nov-12,24-Jan-13 -NOVA Bank,Berwyn,PA,27148,No Acquirer,26-Oct-12,24-Jan-13 -Excel Bank,Sedalia,MO,19189,Simmons First National Bank,19-Oct-12,24-Jan-13 -First East Side Savings Bank,Tamarac,FL,28144,Stearns Bank N.A.,19-Oct-12,24-Jan-13 -GulfSouth Private Bank,Destin,FL,58073,SmartBank,19-Oct-12,24-Jan-13 -First United Bank,Crete,IL,20685,"Old Plank Trail Community Bank, National Association",28-Sep-12,15-Nov-12 -Truman Bank,St. Louis,MO,27316,Simmons First National Bank,14-Sep-12,17-Dec-12 -First Commercial Bank,Bloomington,MN,35246,Republic Bank & Trust Company,7-Sep-12,17-Dec-12 -Waukegan Savings Bank,Waukegan,IL,28243,First Midwest Bank,3-Aug-12,11-Oct-12 -Jasper Banking Company,Jasper,GA,16240,Stearns Bank N.A.,27-Jul-12,17-Dec-12 -Second Federal Savings and Loan Association of Chicago,Chicago,IL,27986,Hinsdale Bank & Trust Company,20-Jul-12,14-Jan-13 -Heartland Bank,Leawood,KS,1361,Metcalf Bank,20-Jul-12,17-Dec-12 -First Cherokee State Bank,Woodstock,GA,32711,Community & Southern Bank,20-Jul-12,31-Oct-12 -Georgia Trust Bank,Buford,GA,57847,Community & Southern Bank,20-Jul-12,17-Dec-12 -The Royal Palm Bank of Florida,Naples,FL,57096,First National Bank of the Gulf Coast,20-Jul-12,7-Jan-13 -Glasgow Savings Bank,Glasgow,MO,1056,Regional Missouri Bank,13-Jul-12,11-Oct-12 -Montgomery Bank & Trust,Ailey,GA,19498,Ameris Bank,6-Jul-12,31-Oct-12 -The Farmers Bank of Lynchburg,Lynchburg,TN,1690,Clayton Bank and Trust,15-Jun-12,31-Oct-12 -Security Exchange Bank,Marietta,GA,35299,Fidelity Bank,15-Jun-12,10-Oct-12 -Putnam State Bank,Palatka,FL,27405,Harbor Community Bank,15-Jun-12,10-Oct-12 -Waccamaw Bank,Whiteville,NC,34515,First Community Bank,8-Jun-12,8-Nov-12 -Farmers' and Traders' State Bank,Shabbona,IL,9257,First State Bank,8-Jun-12,10-Oct-12 -Carolina Federal Savings Bank,Charleston,SC,35372,Bank of North Carolina,8-Jun-12,31-Oct-12 -First Capital Bank,Kingfisher,OK,416,F & M Bank,8-Jun-12,10-Oct-12 -"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,20-May-13 -"Security Bank, National Association",North Lauderdale,FL,23156,Banesco USA,4-May-12,31-Oct-12 -Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,17-May-13 -Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,17-May-13 -"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-May-13 -HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-May-13 -Bank of the Eastern Shore,Cambridge,MD,26759,No Acquirer,27-Apr-12,17-Oct-12 -"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,17-May-13 -Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,16-May-13 -Premier Bank,Wilmette,IL,35419,International Bank of Chicago,23-Mar-12,17-Oct-12 -Covenant Bank & Trust,Rock Spring,GA,58068,"Stearns Bank, N.A.",23-Mar-12,31-Oct-12 -New City Bank,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12 -Global Commerce Bank,Doraville,GA,34046,Metro City Bank,2-Mar-12,31-Oct-12 -Home Savings of America,Little Falls,MN,29178,No Acquirer,24-Feb-12,17-Dec-12 -Central Bank of Georgia,Ellaville,GA,5687,Ameris Bank,24-Feb-12,9-Aug-12 -SCB Bank,Shelbyville,IN,29761,"First Merchants Bank, National Association",10-Feb-12,25-Mar-13 -Charter National Bank and Trust,Hoffman Estates,IL,23187,"Barrington Bank & Trust Company, National Association",10-Feb-12,25-Mar-13 -BankEast,Knoxville,TN,19869,U.S.Bank National Association,27-Jan-12,8-Mar-13 -Patriot Bank Minnesota,Forest Lake,MN,34823,First Resource Bank,27-Jan-12,12-Sep-12 -Tennessee Commerce Bank,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12 -First Guaranty Bank and Trust Company of Jacksonville,Jacksonville,FL,16579,"CenterState Bank of Florida, N.A.",27-Jan-12,12-Sep-12 -American Eagle Savings Bank,Boothwyn,PA,31581,"Capital Bank, N.A.",20-Jan-12,25-Jan-13 -The First State Bank,Stockbridge,GA,19252,Hamilton State Bank,20-Jan-12,25-Jan-13 -Central Florida State Bank,Belleview,FL,57186,"CenterState Bank of Florida, N.A.",20-Jan-12,25-Jan-13 -Western National Bank,Phoenix,AZ,57917,Washington Federal,16-Dec-11,13-Aug-12 -Premier Community Bank of the Emerald Coast,Crestview,FL,58343,Summit Bank,16-Dec-11,12-Sep-12 -Central Progressive Bank,Lacombe,LA,19657,First NBC Bank,18-Nov-11,13-Aug-12 -Polk County Bank,Johnston,IA,14194,Grinnell State Bank,18-Nov-11,15-Aug-12 -Community Bank of Rockmart,Rockmart,GA,57860,Century Bank of Georgia,10-Nov-11,13-Aug-12 -SunFirst Bank,Saint George,UT,57087,Cache Valley Bank,4-Nov-11,16-Nov-12 -"Mid City Bank, Inc.",Omaha,NE,19397,Premier Bank,4-Nov-11,15-Aug-12 -All American Bank,Des Plaines,IL,57759,International Bank of Chicago,28-Oct-11,15-Aug-12 -Community Banks of Colorado,Greenwood Village,CO,21132,"Bank Midwest, N.A.",21-Oct-11,2-Jan-13 -Community Capital Bank,Jonesboro,GA,57036,State Bank and Trust Company,21-Oct-11,8-Nov-12 -Decatur First Bank,Decatur,GA,34392,Fidelity Bank,21-Oct-11,8-Nov-12 -Old Harbor Bank,Clearwater,FL,57537,1st United Bank,21-Oct-11,8-Nov-12 -Country Bank,Aledo,IL,35395,Blackhawk Bank & Trust,14-Oct-11,15-Aug-12 -First State Bank,Cranford,NJ,58046,Northfield Bank,14-Oct-11,8-Nov-12 -"Blue Ridge Savings Bank, Inc.",Asheville,NC,32347,Bank of North Carolina,14-Oct-11,8-Nov-12 -Piedmont Community Bank,Gray,GA,57256,State Bank and Trust Company,14-Oct-11,22-Jan-13 -Sun Security Bank,Ellington,MO,20115,Great Southern Bank,7-Oct-11,7-Nov-12 -The RiverBank,Wyoming,MN,10216,Central Bank,7-Oct-11,7-Nov-12 -First International Bank,Plano,TX,33513,American First National Bank,30-Sep-11,9-Oct-12 -Citizens Bank of Northern California,Nevada City,CA,33983,Tri Counties Bank,23-Sep-11,9-Oct-12 -Bank of the Commonwealth,Norfolk,VA,20408,Southern Bank and Trust Company,23-Sep-11,9-Oct-12 -The First National Bank of Florida,Milton,FL,25155,CharterBank,9-Sep-11,6-Sep-12 -CreekSide Bank,Woodstock,GA,58226,Georgia Commerce Bank,2-Sep-11,6-Sep-12 -Patriot Bank of Georgia,Cumming,GA,58273,Georgia Commerce Bank,2-Sep-11,2-Nov-12 -First Choice Bank,Geneva,IL,57212,Inland Bank & Trust,19-Aug-11,15-Aug-12 -First Southern National Bank,Statesboro,GA,57239,Heritage Bank of the South,19-Aug-11,2-Nov-12 -Lydian Private Bank,Palm Beach,FL,35356,"Sabadell United Bank, N.A.",19-Aug-11,2-Nov-12 -Public Savings Bank,Huntingdon Valley,PA,34130,"Capital Bank, N.A.",18-Aug-11,15-Aug-12 -The First National Bank of Olathe,Olathe,KS,4744,Enterprise Bank & Trust,12-Aug-11,23-Aug-12 -Bank of Whitman,Colfax,WA,22528,Columbia State Bank,5-Aug-11,16-Aug-12 -Bank of Shorewood,Shorewood,IL,22637,Heartland Bank and Trust Company,5-Aug-11,16-Aug-12 -Integra Bank National Association,Evansville,IN,4392,Old National Bank,29-Jul-11,16-Aug-12 -"BankMeridian, N.A.",Columbia,SC,58222,SCBT National Association,29-Jul-11,2-Nov-12 -Virginia Business Bank,Richmond,VA,58283,Xenith Bank,29-Jul-11,9-Oct-12 -Bank of Choice,Greeley,CO,2994,"Bank Midwest, N.A.",22-Jul-11,12-Sep-12 -LandMark Bank of Florida,Sarasota,FL,35244,American Momentum Bank,22-Jul-11,2-Nov-12 -Southshore Community Bank,Apollo Beach,FL,58056,American Momentum Bank,22-Jul-11,2-Nov-12 -Summit Bank,Prescott,AZ,57442,The Foothills Bank,15-Jul-11,16-Aug-12 -First Peoples Bank,Port St. Lucie,FL,34870,"Premier American Bank, N.A.",15-Jul-11,2-Nov-12 -High Trust Bank,Stockbridge,GA,19554,Ameris Bank,15-Jul-11,2-Nov-12 -One Georgia Bank,Atlanta,GA,58238,Ameris Bank,15-Jul-11,2-Nov-12 -Signature Bank,Windsor,CO,57835,Points West Community Bank,8-Jul-11,26-Oct-12 -Colorado Capital Bank,Castle Rock,CO,34522,First-Citizens Bank & Trust Company,8-Jul-11,15-Jan-13 -First Chicago Bank & Trust,Chicago,IL,27935,Northbrook Bank & Trust Company,8-Jul-11,9-Sep-12 -Mountain Heritage Bank,Clayton,GA,57593,First American Bank and Trust Company,24-Jun-11,2-Nov-12 -First Commercial Bank of Tampa Bay,Tampa,FL,27583,Stonegate Bank,17-Jun-11,2-Nov-12 -McIntosh State Bank,Jackson,GA,19237,Hamilton State Bank,17-Jun-11,2-Nov-12 -Atlantic Bank and Trust,Charleston,SC,58420,"First Citizens Bank and Trust Company, Inc.",3-Jun-11,31-Oct-12 -First Heritage Bank,Snohomish,WA,23626,Columbia State Bank,27-May-11,28-Jan-13 -Summit Bank,Burlington,WA,513,Columbia State Bank,20-May-11,22-Jan-13 -First Georgia Banking Company,Franklin,GA,57647,"CertusBank, National Association",20-May-11,13-Nov-12 -Atlantic Southern Bank,Macon,GA,57213,"CertusBank, National Association",20-May-11,31-Oct-12 -Coastal Bank,Cocoa Beach,FL,34898,"Florida Community Bank, a division of Premier American Bank, N.A.",6-May-11,30-Nov-12 -Community Central Bank,Mount Clemens,MI,34234,Talmer Bank & Trust,29-Apr-11,16-Aug-12 -The Park Avenue Bank,Valdosta,GA,19797,Bank of the Ozarks,29-Apr-11,30-Nov-12 -First Choice Community Bank,Dallas,GA,58539,Bank of the Ozarks,29-Apr-11,22-Jan-13 -Cortez Community Bank,Brooksville,FL,57625,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12 -First National Bank of Central Florida,Winter Park,FL,26297,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12 -Heritage Banking Group,Carthage,MS,14273,Trustmark National Bank,15-Apr-11,30-Nov-12 -Rosemount National Bank,Rosemount,MN,24099,Central Bank,15-Apr-11,16-Aug-12 -Superior Bank,Birmingham,AL,17750,"Superior Bank, National Association",15-Apr-11,30-Nov-12 -Nexity Bank,Birmingham,AL,19794,AloStar Bank of Commerce,15-Apr-11,4-Sep-12 -New Horizons Bank,East Ellijay,GA,57705,Citizens South Bank,15-Apr-11,16-Aug-12 -Bartow County Bank,Cartersville,GA,21495,Hamilton State Bank,15-Apr-11,22-Jan-13 -Nevada Commerce Bank,Las Vegas,NV,35418,City National Bank,8-Apr-11,9-Sep-12 -Western Springs National Bank and Trust,Western Springs,IL,10086,Heartland Bank and Trust Company,8-Apr-11,22-Jan-13 -The Bank of Commerce,Wood Dale,IL,34292,Advantage National Bank Group,25-Mar-11,22-Jan-13 -Legacy Bank,Milwaukee,WI,34818,Seaway Bank and Trust Company,11-Mar-11,12-Sep-12 -First National Bank of Davis,Davis,OK,4077,The Pauls Valley National Bank,11-Mar-11,20-Aug-12 -Valley Community Bank,St. Charles,IL,34187,First State Bank,25-Feb-11,12-Sep-12 -"San Luis Trust Bank, FSB",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12 -Charter Oak Bank,Napa,CA,57855,Bank of Marin,18-Feb-11,12-Sep-12 -Citizens Bank of Effingham,Springfield,GA,34601,Heritage Bank of the South,18-Feb-11,2-Nov-12 -Habersham Bank,Clarkesville,GA,151,SCBT National Association,18-Feb-11,2-Nov-12 -Canyon National Bank,Palm Springs,CA,34692,Pacific Premier Bank,11-Feb-11,12-Sep-12 -Badger State Bank,Cassville,WI,13272,Royal Bank,11-Feb-11,12-Sep-12 -Peoples State Bank,Hamtramck,MI,14939,First Michigan Bank,11-Feb-11,22-Jan-13 -Sunshine State Community Bank,Port Orange,FL,35478,"Premier American Bank, N.A.",11-Feb-11,2-Nov-12 -Community First Bank Chicago,Chicago,IL,57948,Northbrook Bank & Trust Company,4-Feb-11,20-Aug-12 -North Georgia Bank,Watkinsville,GA,35242,BankSouth,4-Feb-11,2-Nov-12 -American Trust Bank,Roswell,GA,57432,Renasant Bank,4-Feb-11,31-Oct-12 -First Community Bank,Taos,NM,12261,"U.S. Bank, N.A.",28-Jan-11,12-Sep-12 -FirsTier Bank,Louisville,CO,57646,No Acquirer,28-Jan-11,12-Sep-12 -Evergreen State Bank,Stoughton,WI,5328,McFarland State Bank,28-Jan-11,12-Sep-12 -The First State Bank,Camargo,OK,2303,Bank 7,28-Jan-11,12-Sep-12 -United Western Bank,Denver,CO,31293,First-Citizens Bank & Trust Company,21-Jan-11,12-Sep-12 -The Bank of Asheville,Asheville,NC,34516,First Bank,21-Jan-11,2-Nov-12 -CommunitySouth Bank & Trust,Easley,SC,57868,"CertusBank, National Association",21-Jan-11,2-Nov-12 -Enterprise Banking Company,McDonough,GA,19758,No Acquirer,21-Jan-11,2-Nov-12 -Oglethorpe Bank,Brunswick,GA,57440,Bank of the Ozarks,14-Jan-11,2-Nov-12 -Legacy Bank,Scottsdale,AZ,57820,Enterprise Bank & Trust,7-Jan-11,12-Sep-12 -First Commercial Bank of Florida,Orlando,FL,34965,First Southern Bank,7-Jan-11,2-Nov-12 -Community National Bank,Lino Lakes,MN,23306,Farmers & Merchants Savings Bank,17-Dec-10,20-Aug-12 -First Southern Bank,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12 -"United Americas Bank, N.A.",Atlanta,GA,35065,State Bank and Trust Company,17-Dec-10,2-Nov-12 -"Appalachian Community Bank, FSB",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12 -Chestatee State Bank,Dawsonville,GA,34578,Bank of the Ozarks,17-Dec-10,2-Nov-12 -"The Bank of Miami,N.A.",Coral Gables,FL,19040,1st United Bank,17-Dec-10,2-Nov-12 -Earthstar Bank,Southampton,PA,35561,Polonia Bank,10-Dec-10,20-Aug-12 -Paramount Bank,Farmington Hills,MI,34673,Level One Bank,10-Dec-10,20-Aug-12 -First Banking Center,Burlington,WI,5287,First Michigan Bank,19-Nov-10,20-Aug-12 -Allegiance Bank of North America,Bala Cynwyd,PA,35078,VIST Bank,19-Nov-10,20-Aug-12 -Gulf State Community Bank,Carrabelle,FL,20340,Centennial Bank,19-Nov-10,2-Nov-12 -Copper Star Bank,Scottsdale,AZ,35463,"Stearns Bank, N.A.",12-Nov-10,20-Aug-12 -Darby Bank & Trust Co.,Vidalia,GA,14580,Ameris Bank,12-Nov-10,15-Jan-13 -Tifton Banking Company,Tifton,GA,57831,Ameris Bank,12-Nov-10,2-Nov-12 -First Vietnamese American Bank,Westminster,CA,57885,Grandpoint Bank,5-Nov-10,12-Sep-12 -Pierce Commercial Bank,Tacoma,WA,34411,Heritage Bank,5-Nov-10,20-Aug-12 -Western Commercial Bank,Woodland Hills,CA,58087,First California Bank,5-Nov-10,12-Sep-12 -K Bank,Randallstown,MD,31263,Manufacturers and Traders Trust Company (M&T Bank),5-Nov-10,20-Aug-12 -"First Arizona Savings, A FSB",Scottsdale,AZ,32582,No Acquirer,22-Oct-10,20-Aug-12 -Hillcrest Bank,Overland Park,KS,22173,"Hillcrest Bank, N.A.",22-Oct-10,20-Aug-12 -First Suburban National Bank,Maywood,IL,16089,Seaway Bank and Trust Company,22-Oct-10,20-Aug-12 -The First National Bank of Barnesville,Barnesville,GA,2119,United Bank,22-Oct-10,2-Nov-12 -The Gordon Bank,Gordon,GA,33904,Morris Bank,22-Oct-10,2-Nov-12 -Progress Bank of Florida,Tampa,FL,32251,Bay Cities Bank,22-Oct-10,2-Nov-12 -First Bank of Jacksonville,Jacksonville,FL,27573,Ameris Bank,22-Oct-10,2-Nov-12 -Premier Bank,Jefferson City,MO,34016,Providence Bank,15-Oct-10,20-Aug-12 -WestBridge Bank and Trust Company,Chesterfield,MO,58205,Midland States Bank,15-Oct-10,20-Aug-12 -"Security Savings Bank, F.S.B.",Olathe,KS,30898,Simmons First National Bank,15-Oct-10,20-Aug-12 -Shoreline Bank,Shoreline,WA,35250,GBC International Bank,1-Oct-10,20-Aug-12 -Wakulla Bank,Crawfordville,FL,21777,Centennial Bank,1-Oct-10,2-Nov-12 -North County Bank,Arlington,WA,35053,Whidbey Island Bank,24-Sep-10,20-Aug-12 -Haven Trust Bank Florida,Ponte Vedra Beach,FL,58308,First Southern Bank,24-Sep-10,5-Nov-12 -Maritime Savings Bank,West Allis,WI,28612,"North Shore Bank, FSB",17-Sep-10,20-Aug-12 -Bramble Savings Bank,Milford,OH,27808,Foundation Bank,17-Sep-10,20-Aug-12 -The Peoples Bank,Winder,GA,182,Community & Southern Bank,17-Sep-10,5-Nov-12 -First Commerce Community Bank,Douglasville,GA,57448,Community & Southern Bank,17-Sep-10,15-Jan-13 -Bank of Ellijay,Ellijay,GA,58197,Community & Southern Bank,17-Sep-10,15-Jan-13 -ISN Bank,Cherry Hill,NJ,57107,Customers Bank,17-Sep-10,22-Aug-12 -Horizon Bank,Bradenton,FL,35061,Bank of the Ozarks,10-Sep-10,5-Nov-12 -Sonoma Valley Bank,Sonoma,CA,27259,Westamerica Bank,20-Aug-10,12-Sep-12 -Los Padres Bank,Solvang,CA,32165,Pacific Western Bank,20-Aug-10,12-Sep-12 -Butte Community Bank,Chico,CA,33219,"Rabobank, N.A.",20-Aug-10,12-Sep-12 -Pacific State Bank,Stockton,CA,27090,"Rabobank, N.A.",20-Aug-10,12-Sep-12 -ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,16-May-13 -Imperial Savings and Loan Association,Martinsville,VA,31623,"River Community Bank, N.A.",20-Aug-10,24-Aug-12 -Independent National Bank,Ocala,FL,27344,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12 -Community National Bank at Bartow,Bartow,FL,25266,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12 -Palos Bank and Trust Company,Palos Heights,IL,17599,First Midwest Bank,13-Aug-10,22-Aug-12 -Ravenswood Bank,Chicago,IL,34231,Northbrook Bank & Trust Company,6-Aug-10,22-Aug-12 -LibertyBank,Eugene,OR,31964,Home Federal Bank,30-Jul-10,22-Aug-12 -The Cowlitz Bank,Longview,WA,22643,Heritage Bank,30-Jul-10,22-Aug-12 -Coastal Community Bank,Panama City Beach,FL,9619,Centennial Bank,30-Jul-10,5-Nov-12 -Bayside Savings Bank,Port Saint Joe,FL,57669,Centennial Bank,30-Jul-10,5-Nov-12 -Northwest Bank & Trust,Acworth,GA,57658,State Bank and Trust Company,30-Jul-10,5-Nov-12 -Home Valley Bank,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12 -SouthwestUSA Bank,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12 -Community Security Bank,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12 -Thunder Bank,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12 -Williamsburg First National Bank,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12 -Crescent Bank and Trust Company,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12 -Sterling Bank,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12 -"Mainstreet Savings Bank, FSB",Hastings,MI,28136,Commercial Bank,16-Jul-10,13-Sep-12 -Olde Cypress Community Bank,Clewiston,FL,28864,"CenterState Bank of Florida, N.A.",16-Jul-10,5-Nov-12 -Turnberry Bank,Aventura,FL,32280,NAFH National Bank,16-Jul-10,5-Nov-12 -Metro Bank of Dade County,Miami,FL,25172,NAFH National Bank,16-Jul-10,5-Nov-12 -First National Bank of the South,Spartanburg,SC,35383,NAFH National Bank,16-Jul-10,5-Nov-12 -Woodlands Bank,Bluffton,SC,32571,Bank of the Ozarks,16-Jul-10,5-Nov-12 -Home National Bank,Blackwell,OK,11636,RCB Bank,9-Jul-10,10-Dec-12 -USA Bank,Port Chester,NY,58072,New Century Bank,9-Jul-10,14-Sep-12 -Ideal Federal Savings Bank,Baltimore,MD,32456,No Acquirer,9-Jul-10,14-Sep-12 -Bay National Bank,Baltimore,MD,35462,"Bay Bank, FSB",9-Jul-10,15-Jan-13 -High Desert State Bank,Albuquerque,NM,35279,First American Bank,25-Jun-10,14-Sep-12 -First National Bank,Savannah,GA,34152,"The Savannah Bank, N.A.",25-Jun-10,5-Nov-12 -Peninsula Bank,Englewood,FL,26563,"Premier American Bank, N.A.",25-Jun-10,5-Nov-12 -Nevada Security Bank,Reno,NV,57110,Umpqua Bank,18-Jun-10,23-Aug-12 -Washington First International Bank,Seattle,WA,32955,East West Bank,11-Jun-10,14-Sep-12 -TierOne Bank,Lincoln,NE,29341,Great Western Bank,4-Jun-10,14-Sep-12 -Arcola Homestead Savings Bank,Arcola,IL,31813,No Acquirer,4-Jun-10,14-Sep-12 -First National Bank,Rosedale,MS,15814,The Jefferson Bank,4-Jun-10,5-Nov-12 -Sun West Bank,Las Vegas,NV,34785,City National Bank,28-May-10,14-Sep-12 -"Granite Community Bank, NA",Granite Bay,CA,57315,Tri Counties Bank,28-May-10,14-Sep-12 -Bank of Florida - Tampa,Tampa,FL,57814,EverBank,28-May-10,5-Nov-12 -Bank of Florida - Southwest,Naples,FL,35106,EverBank,28-May-10,5-Nov-12 -Bank of Florida - Southeast,Fort Lauderdale,FL,57360,EverBank,28-May-10,5-Nov-12 -Pinehurst Bank,Saint Paul,MN,57735,Coulee Bank,21-May-10,26-Oct-12 -Midwest Bank and Trust Company,Elmwood Park,IL,18117,"FirstMerit Bank, N.A.",14-May-10,23-Aug-12 -Southwest Community Bank,Springfield,MO,34255,Simmons First National Bank,14-May-10,23-Aug-12 -New Liberty Bank,Plymouth,MI,35586,Bank of Ann Arbor,14-May-10,23-Aug-12 -Satilla Community Bank,Saint Marys,GA,35114,Ameris Bank,14-May-10,5-Nov-12 -1st Pacific Bank of California,San Diego,CA,35517,City National Bank,7-May-10,13-Dec-12 -Towne Bank of Arizona,Mesa,AZ,57697,Commerce Bank of Arizona,7-May-10,23-Aug-12 -Access Bank,Champlin,MN,16476,PrinsBank,7-May-10,23-Aug-12 -The Bank of Bonifay,Bonifay,FL,14246,First Federal Bank of Florida,7-May-10,5-Nov-12 -Frontier Bank,Everett,WA,22710,"Union Bank, N.A.",30-Apr-10,15-Jan-13 -BC National Banks,Butler,MO,17792,Community First Bank,30-Apr-10,23-Aug-12 -Champion Bank,Creve Coeur,MO,58362,BankLiberty,30-Apr-10,23-Aug-12 -CF Bancorp,Port Huron,MI,30005,First Michigan Bank,30-Apr-10,15-Jan-13 -Westernbank Puerto Rico,Mayaguez,PR,31027,Banco Popular de Puerto Rico,30-Apr-10,5-Nov-12 -R-G Premier Bank of Puerto Rico,Hato Rey,PR,32185,Scotiabank de Puerto Rico,30-Apr-10,5-Nov-12 -Eurobank,San Juan,PR,27150,Oriental Bank and Trust,30-Apr-10,5-Nov-12 -Wheatland Bank,Naperville,IL,58429,Wheaton Bank & Trust,23-Apr-10,23-Aug-12 -Peotone Bank and Trust Company,Peotone,IL,10888,First Midwest Bank,23-Apr-10,23-Aug-12 -Lincoln Park Savings Bank,Chicago,IL,30600,Northbrook Bank & Trust Company,23-Apr-10,23-Aug-12 -New Century Bank,Chicago,IL,34821,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12 -Citizens Bank and Trust Company of Chicago,Chicago,IL,34658,Republic Bank of Chicago,23-Apr-10,23-Aug-12 -Broadway Bank,Chicago,IL,22853,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12 -"Amcore Bank, National Association",Rockford,IL,3735,Harris N.A.,23-Apr-10,23-Aug-12 -City Bank,Lynnwood,WA,21521,Whidbey Island Bank,16-Apr-10,14-Sep-12 -Tamalpais Bank,San Rafael,CA,33493,"Union Bank, N.A.",16-Apr-10,23-Aug-12 -Innovative Bank,Oakland,CA,23876,Center Bank,16-Apr-10,23-Aug-12 -Butler Bank,Lowell,MA,26619,People's United Bank,16-Apr-10,23-Aug-12 -Riverside National Bank of Florida,Fort Pierce,FL,24067,"TD Bank, N.A.",16-Apr-10,5-Nov-12 -AmericanFirst Bank,Clermont,FL,57724,"TD Bank, N.A.",16-Apr-10,31-Oct-12 -First Federal Bank of North Florida,Palatka,FL,28886,"TD Bank, N.A.",16-Apr-10,15-Jan-13 -Lakeside Community Bank,Sterling Heights,MI,34878,No Acquirer,16-Apr-10,23-Aug-12 -Beach First National Bank,Myrtle Beach,SC,34242,Bank of North Carolina,9-Apr-10,5-Nov-12 -Desert Hills Bank,Phoenix,AZ,57060,New York Community Bank,26-Mar-10,23-Aug-12 -Unity National Bank,Cartersville,GA,34678,Bank of the Ozarks,26-Mar-10,14-Sep-12 -Key West Bank,Key West,FL,34684,Centennial Bank,26-Mar-10,23-Aug-12 -McIntosh Commercial Bank,Carrollton,GA,57399,CharterBank,26-Mar-10,23-Aug-12 -State Bank of Aurora,Aurora,MN,8221,Northern State Bank,19-Mar-10,23-Aug-12 -First Lowndes Bank,Fort Deposit,AL,24957,First Citizens Bank,19-Mar-10,23-Aug-12 -Bank of Hiawassee,Hiawassee,GA,10054,Citizens South Bank,19-Mar-10,23-Aug-12 -Appalachian Community Bank,Ellijay,GA,33989,Community & Southern Bank,19-Mar-10,31-Oct-12 -Advanta Bank Corp.,Draper,UT,33535,No Acquirer,19-Mar-10,14-Sep-12 -Century Security Bank,Duluth,GA,58104,Bank of Upson,19-Mar-10,23-Aug-12 -American National Bank,Parma,OH,18806,The National Bank and Trust Company,19-Mar-10,23-Aug-12 -Statewide Bank,Covington,LA,29561,Home Bank,12-Mar-10,23-Aug-12 -Old Southern Bank,Orlando,FL,58182,Centennial Bank,12-Mar-10,23-Aug-12 -The Park Avenue Bank,New York,NY,27096,Valley National Bank,12-Mar-10,23-Aug-12 -LibertyPointe Bank,New York,NY,58071,Valley National Bank,11-Mar-10,23-Aug-12 -Centennial Bank,Ogden,UT,34430,No Acquirer,5-Mar-10,14-Sep-12 -Waterfield Bank,Germantown,MD,34976,No Acquirer,5-Mar-10,23-Aug-12 -Bank of Illinois,Normal,IL,9268,Heartland Bank and Trust Company,5-Mar-10,23-Aug-12 -Sun American Bank,Boca Raton,FL,27126,First-Citizens Bank & Trust Company,5-Mar-10,23-Aug-12 -Rainier Pacific Bank,Tacoma,WA,38129,Umpqua Bank,26-Feb-10,23-Aug-12 -Carson River Community Bank,Carson City,NV,58352,Heritage Bank of Nevada,26-Feb-10,15-Jan-13 -"La Jolla Bank, FSB",La Jolla,CA,32423,"OneWest Bank, FSB",19-Feb-10,24-Aug-12 -George Washington Savings Bank,Orland Park,IL,29952,"FirstMerit Bank, N.A.",19-Feb-10,24-Aug-12 -The La Coste National Bank,La Coste,TX,3287,Community National Bank,19-Feb-10,14-Sep-12 -Marco Community Bank,Marco Island,FL,57586,Mutual of Omaha Bank,19-Feb-10,24-Aug-12 -1st American State Bank of Minnesota,Hancock,MN,15448,"Community Development Bank, FSB",5-Feb-10,24-Aug-12 -American Marine Bank,Bainbridge Island,WA,16730,Columbia State Bank,29-Jan-10,24-Aug-12 -First Regional Bank,Los Angeles,CA,23011,First-Citizens Bank & Trust Company,29-Jan-10,24-Aug-12 -Community Bank and Trust,Cornelia,GA,5702,SCBT National Association,29-Jan-10,15-Jan-13 -"Marshall Bank, N.A.",Hallock,MN,16133,United Valley Bank,29-Jan-10,23-Aug-12 -Florida Community Bank,Immokalee,FL,5672,"Premier American Bank, N.A.",29-Jan-10,15-Jan-13 -First National Bank of Georgia,Carrollton,GA,16480,Community & Southern Bank,29-Jan-10,13-Dec-12 -Columbia River Bank,The Dalles,OR,22469,Columbia State Bank,22-Jan-10,14-Sep-12 -Evergreen Bank,Seattle,WA,20501,Umpqua Bank,22-Jan-10,15-Jan-13 -Charter Bank,Santa Fe,NM,32498,Charter Bank,22-Jan-10,23-Aug-12 -Bank of Leeton,Leeton,MO,8265,"Sunflower Bank, N.A.",22-Jan-10,15-Jan-13 -Premier American Bank,Miami,FL,57147,"Premier American Bank, N.A.",22-Jan-10,13-Dec-12 -Barnes Banking Company,Kaysville,UT,1252,No Acquirer,15-Jan-10,23-Aug-12 -St. Stephen State Bank,St. Stephen,MN,17522,First State Bank of St. Joseph,15-Jan-10,23-Aug-12 -Town Community Bank & Trust,Antioch,IL,34705,First American Bank,15-Jan-10,23-Aug-12 -Horizon Bank,Bellingham,WA,22977,Washington Federal Savings and Loan Association,8-Jan-10,23-Aug-12 -"First Federal Bank of California, F.S.B.",Santa Monica,CA,28536,"OneWest Bank, FSB",18-Dec-09,23-Aug-12 -Imperial Capital Bank,La Jolla,CA,26348,City National Bank,18-Dec-09,5-Sep-12 -Independent Bankers' Bank,Springfield,IL,26820,The Independent BankersBank (TIB),18-Dec-09,23-Aug-12 -New South Federal Savings Bank,Irondale,AL,32276,Beal Bank,18-Dec-09,23-Aug-12 -Citizens State Bank,New Baltimore,MI,1006,No Acquirer,18-Dec-09,5-Nov-12 -Peoples First Community Bank,Panama City,FL,32167,Hancock Bank,18-Dec-09,5-Nov-12 -RockBridge Commercial Bank,Atlanta,GA,58315,No Acquirer,18-Dec-09,5-Nov-12 -SolutionsBank,Overland Park,KS,4731,Arvest Bank,11-Dec-09,23-Aug-12 -"Valley Capital Bank, N.A.",Mesa,AZ,58399,Enterprise Bank & Trust,11-Dec-09,23-Aug-12 -"Republic Federal Bank, N.A.",Miami,FL,22846,1st United Bank,11-Dec-09,5-Nov-12 -Greater Atlantic Bank,Reston,VA,32583,Sonabank,4-Dec-09,5-Nov-12 -Benchmark Bank,Aurora,IL,10440,"MB Financial Bank, N.A.",4-Dec-09,23-Aug-12 -AmTrust Bank,Cleveland,OH,29776,New York Community Bank,4-Dec-09,5-Nov-12 -The Tattnall Bank,Reidsville,GA,12080,Heritage Bank of the South,4-Dec-09,5-Nov-12 -First Security National Bank,Norcross,GA,26290,State Bank and Trust Company,4-Dec-09,5-Nov-12 -The Buckhead Community Bank,Atlanta,GA,34663,State Bank and Trust Company,4-Dec-09,5-Nov-12 -Commerce Bank of Southwest Florida,Fort Myers,FL,58016,Central Bank,20-Nov-09,5-Nov-12 -Pacific Coast National Bank,San Clemente,CA,57914,Sunwest Bank,13-Nov-09,22-Aug-12 -Orion Bank,Naples,FL,22427,IBERIABANK,13-Nov-09,5-Nov-12 -"Century Bank, F.S.B.",Sarasota,FL,32267,IBERIABANK,13-Nov-09,22-Aug-12 -United Commercial Bank,San Francisco,CA,32469,East West Bank,6-Nov-09,5-Nov-12 -Gateway Bank of St. Louis,St. Louis,MO,19450,Central Bank of Kansas City,6-Nov-09,22-Aug-12 -Prosperan Bank,Oakdale,MN,35074,"Alerus Financial, N.A.",6-Nov-09,22-Aug-12 -Home Federal Savings Bank,Detroit,MI,30329,Liberty Bank and Trust Company,6-Nov-09,22-Aug-12 -United Security Bank,Sparta,GA,22286,Ameris Bank,6-Nov-09,15-Jan-13 -North Houston Bank,Houston,TX,18776,U.S. Bank N.A.,30-Oct-09,22-Aug-12 -Madisonville State Bank,Madisonville,TX,33782,U.S. Bank N.A.,30-Oct-09,22-Aug-12 -Citizens National Bank,Teague,TX,25222,U.S. Bank N.A.,30-Oct-09,22-Aug-12 -Park National Bank,Chicago,IL,11677,U.S. Bank N.A.,30-Oct-09,22-Aug-12 -Pacific National Bank,San Francisco,CA,30006,U.S. Bank N.A.,30-Oct-09,22-Aug-12 -California National Bank,Los Angeles,CA,34659,U.S. Bank N.A.,30-Oct-09,5-Sep-12 -San Diego National Bank,San Diego,CA,23594,U.S. Bank N.A.,30-Oct-09,22-Aug-12 -Community Bank of Lemont,Lemont,IL,35291,U.S. Bank N.A.,30-Oct-09,15-Jan-13 -"Bank USA, N.A.",Phoenix,AZ,32218,U.S. Bank N.A.,30-Oct-09,22-Aug-12 -First DuPage Bank,Westmont,IL,35038,First Midwest Bank,23-Oct-09,22-Aug-12 -Riverview Community Bank,Otsego,MN,57525,Central Bank,23-Oct-09,22-Aug-12 -Bank of Elmwood,Racine,WI,18321,Tri City National Bank,23-Oct-09,22-Aug-12 -Flagship National Bank,Bradenton,FL,35044,First Federal Bank of Florida,23-Oct-09,22-Aug-12 -Hillcrest Bank Florida,Naples,FL,58336,Stonegate Bank,23-Oct-09,22-Aug-12 -American United Bank,Lawrenceville,GA,57794,Ameris Bank,23-Oct-09,5-Sep-12 -Partners Bank,Naples,FL,57959,Stonegate Bank,23-Oct-09,15-Jan-13 -San Joaquin Bank,Bakersfield,CA,23266,Citizens Business Bank,16-Oct-09,22-Aug-12 -Southern Colorado National Bank,Pueblo,CO,57263,Legacy Bank,2-Oct-09,5-Sep-12 -Jennings State Bank,Spring Grove,MN,11416,Central Bank,2-Oct-09,21-Aug-12 -Warren Bank,Warren,MI,34824,The Huntington National Bank,2-Oct-09,21-Aug-12 -Georgian Bank,Atlanta,GA,57151,"First Citizens Bank and Trust Company, Inc.",25-Sep-09,21-Aug-12 -"Irwin Union Bank, F.S.B.",Louisville,KY,57068,"First Financial Bank, N.A.",18-Sep-09,5-Sep-12 -Irwin Union Bank and Trust Company,Columbus,IN,10100,"First Financial Bank, N.A.",18-Sep-09,21-Aug-12 -Venture Bank,Lacey,WA,22868,First-Citizens Bank & Trust Company,11-Sep-09,21-Aug-12 -Brickwell Community Bank,Woodbury,MN,57736,CorTrust Bank N.A.,11-Sep-09,15-Jan-13 -"Corus Bank, N.A.",Chicago,IL,13693,"MB Financial Bank, N.A.",11-Sep-09,21-Aug-12 -First State Bank,Flagstaff,AZ,34875,Sunwest Bank,4-Sep-09,15-Jan-13 -Platinum Community Bank,Rolling Meadows,IL,35030,No Acquirer,4-Sep-09,21-Aug-12 -Vantus Bank,Sioux City,IN,27732,Great Southern Bank,4-Sep-09,21-Aug-12 -InBank,Oak Forest,IL,20203,"MB Financial Bank, N.A.",4-Sep-09,21-Aug-12 -First Bank of Kansas City,Kansas City,MO,25231,Great American Bank,4-Sep-09,21-Aug-12 -Affinity Bank,Ventura,CA,27197,Pacific Western Bank,28-Aug-09,21-Aug-12 -Mainstreet Bank,Forest Lake,MN,1909,Central Bank,28-Aug-09,21-Aug-12 -Bradford Bank,Baltimore,MD,28312,Manufacturers and Traders Trust Company (M&T Bank),28-Aug-09,15-Jan-13 -Guaranty Bank,Austin,TX,32618,BBVA Compass,21-Aug-09,21-Aug-12 -CapitalSouth Bank,Birmingham,AL,22130,IBERIABANK,21-Aug-09,15-Jan-13 -First Coweta Bank,Newnan,GA,57702,United Bank,21-Aug-09,15-Jan-13 -ebank,Atlanta,GA,34682,"Stearns Bank, N.A.",21-Aug-09,21-Aug-12 -Community Bank of Nevada,Las Vegas,NV,34043,No Acquirer,14-Aug-09,21-Aug-12 -Community Bank of Arizona,Phoenix,AZ,57645,MidFirst Bank,14-Aug-09,21-Aug-12 -"Union Bank, National Association",Gilbert,AZ,34485,MidFirst Bank,14-Aug-09,21-Aug-12 -Colonial Bank,Montgomery,AL,9609,"Branch Banking & Trust Company, (BB&T)",14-Aug-09,5-Sep-12 -Dwelling House Savings and Loan Association,Pittsburgh,PA,31559,"PNC Bank, N.A.",14-Aug-09,15-Jan-13 -Community First Bank,Prineville,OR,23268,Home Federal Bank,7-Aug-09,15-Jan-13 -Community National Bank of Sarasota County,Venice,FL,27183,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12 -First State Bank,Sarasota,FL,27364,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12 -Mutual Bank,Harvey,IL,18659,United Central Bank,31-Jul-09,20-Aug-12 -First BankAmericano,Elizabeth,NJ,34270,Crown Bank,31-Jul-09,20-Aug-12 -Peoples Community Bank,West Chester,OH,32288,"First Financial Bank, N.A.",31-Jul-09,20-Aug-12 -Integrity Bank,Jupiter,FL,57604,Stonegate Bank,31-Jul-09,20-Aug-12 -First State Bank of Altus,Altus,OK,9873,Herring Bank,31-Jul-09,20-Aug-12 -Security Bank of Jones County,Gray,GA,8486,State Bank and Trust Company,24-Jul-09,20-Aug-12 -Security Bank of Houston County,Perry,GA,27048,State Bank and Trust Company,24-Jul-09,20-Aug-12 -Security Bank of Bibb County,Macon,GA,27367,State Bank and Trust Company,24-Jul-09,20-Aug-12 -Security Bank of North Metro,Woodstock,GA,57105,State Bank and Trust Company,24-Jul-09,20-Aug-12 -Security Bank of North Fulton,Alpharetta,GA,57430,State Bank and Trust Company,24-Jul-09,20-Aug-12 -Security Bank of Gwinnett County,Suwanee,GA,57346,State Bank and Trust Company,24-Jul-09,20-Aug-12 -Waterford Village Bank,Williamsville,NY,58065,"Evans Bank, N.A.",24-Jul-09,20-Aug-12 -Temecula Valley Bank,Temecula,CA,34341,First-Citizens Bank & Trust Company,17-Jul-09,20-Aug-12 -Vineyard Bank,Rancho Cucamonga,CA,23556,California Bank & Trust,17-Jul-09,20-Aug-12 -BankFirst,Sioux Falls,SD,34103,"Alerus Financial, N.A.",17-Jul-09,20-Aug-12 -First Piedmont Bank,Winder,GA,34594,First American Bank and Trust Company,17-Jul-09,15-Jan-13 -Bank of Wyoming,Thermopolis,WY,22754,Central Bank & Trust,10-Jul-09,20-Aug-12 -Founders Bank,Worth,IL,18390,The PrivateBank and Trust Company,2-Jul-09,20-Aug-12 -Millennium State Bank of Texas,Dallas,TX,57667,State Bank of Texas,2-Jul-09,26-Oct-12 -First National Bank of Danville,Danville,IL,3644,"First Financial Bank, N.A.",2-Jul-09,20-Aug-12 -Elizabeth State Bank,Elizabeth,IL,9262,Galena State Bank and Trust Company,2-Jul-09,20-Aug-12 -Rock River Bank,Oregon,IL,15302,The Harvard State Bank,2-Jul-09,20-Aug-12 -First State Bank of Winchester,Winchester,IL,11710,The First National Bank of Beardstown,2-Jul-09,20-Aug-12 -John Warner Bank,Clinton,IL,12093,State Bank of Lincoln,2-Jul-09,20-Aug-12 -Mirae Bank,Los Angeles,CA,57332,Wilshire State Bank,26-Jun-09,20-Aug-12 -MetroPacific Bank,Irvine,CA,57893,Sunwest Bank,26-Jun-09,20-Aug-12 -Horizon Bank,Pine City,MN,9744,"Stearns Bank, N.A.",26-Jun-09,20-Aug-12 -Neighborhood Community Bank,Newnan,GA,35285,CharterBank,26-Jun-09,20-Aug-12 -Community Bank of West Georgia,Villa Rica,GA,57436,No Acquirer,26-Jun-09,17-Aug-12 -First National Bank of Anthony,Anthony,KS,4614,Bank of Kansas,19-Jun-09,17-Aug-12 -Cooperative Bank,Wilmington,NC,27837,First Bank,19-Jun-09,17-Aug-12 -Southern Community Bank,Fayetteville,GA,35251,United Community Bank,19-Jun-09,17-Aug-12 -Bank of Lincolnwood,Lincolnwood,IL,17309,Republic Bank of Chicago,5-Jun-09,17-Aug-12 -Citizens National Bank,Macomb,IL,5757,Morton Community Bank,22-May-09,4-Sep-12 -Strategic Capital Bank,Champaign,IL,35175,Midland States Bank,22-May-09,4-Sep-12 -"BankUnited, FSB",Coral Gables,FL,32247,BankUnited,21-May-09,17-Aug-12 -Westsound Bank,Bremerton,WA,34843,Kitsap Bank,8-May-09,4-Sep-12 -America West Bank,Layton,UT,35461,Cache Valley Bank,1-May-09,17-Aug-12 -Citizens Community Bank,Ridgewood,NJ,57563,North Jersey Community Bank,1-May-09,4-Sep-12 -"Silverton Bank, NA",Atlanta,GA,26535,No Acquirer,1-May-09,17-Aug-12 -First Bank of Idaho,Ketchum,ID,34396,"U.S. Bank, N.A.",24-Apr-09,17-Aug-12 -First Bank of Beverly Hills,Calabasas,CA,32069,No Acquirer,24-Apr-09,4-Sep-12 -Michigan Heritage Bank,Farmington Hills,MI,34369,Level One Bank,24-Apr-09,17-Aug-12 -American Southern Bank,Kennesaw,GA,57943,Bank of North Georgia,24-Apr-09,17-Aug-12 -Great Basin Bank of Nevada,Elko,NV,33824,Nevada State Bank,17-Apr-09,4-Sep-12 -American Sterling Bank,Sugar Creek,MO,8266,Metcalf Bank,17-Apr-09,31-Aug-12 -New Frontier Bank,Greeley,CO,34881,No Acquirer,10-Apr-09,4-Sep-12 -Cape Fear Bank,Wilmington,NC,34639,First Federal Savings and Loan Association,10-Apr-09,17-Aug-12 -Omni National Bank,Atlanta,GA,22238,No Acquirer,27-Mar-09,17-Aug-12 -"TeamBank, NA",Paola,KS,4754,Great Southern Bank,20-Mar-09,17-Aug-12 -Colorado National Bank,Colorado Springs,CO,18896,Herring Bank,20-Mar-09,17-Aug-12 -FirstCity Bank,Stockbridge,GA,18243,No Acquirer,20-Mar-09,17-Aug-12 -Freedom Bank of Georgia,Commerce,GA,57558,Northeast Georgia Bank,6-Mar-09,17-Aug-12 -Security Savings Bank,Henderson,NV,34820,Bank of Nevada,27-Feb-09,7-Sep-12 -Heritage Community Bank,Glenwood,IL,20078,"MB Financial Bank, N.A.",27-Feb-09,17-Aug-12 -Silver Falls Bank,Silverton,OR,35399,Citizens Bank,20-Feb-09,17-Aug-12 -Pinnacle Bank of Oregon,Beaverton,OR,57342,Washington Trust Bank of Spokane,13-Feb-09,17-Aug-12 -Corn Belt Bank & Trust Co.,Pittsfield,IL,16500,The Carlinville National Bank,13-Feb-09,17-Aug-12 -Riverside Bank of the Gulf Coast,Cape Coral,FL,34563,TIB Bank,13-Feb-09,17-Aug-12 -Sherman County Bank,Loup City,NE,5431,Heritage Bank,13-Feb-09,17-Aug-12 -County Bank,Merced,CA,22574,Westamerica Bank,6-Feb-09,4-Sep-12 -Alliance Bank,Culver City,CA,23124,California Bank & Trust,6-Feb-09,16-Aug-12 -FirstBank Financial Services,McDonough,GA,57017,Regions Bank,6-Feb-09,16-Aug-12 -Ocala National Bank,Ocala,FL,26538,"CenterState Bank of Florida, N.A.",30-Jan-09,4-Sep-12 -Suburban FSB,Crofton,MD,30763,Bank of Essex,30-Jan-09,16-Aug-12 -MagnetBank,Salt Lake City,UT,58001,No Acquirer,30-Jan-09,16-Aug-12 -1st Centennial Bank,Redlands,CA,33025,First California Bank,23-Jan-09,16-Aug-12 -Bank of Clark County,Vancouver,WA,34959,Umpqua Bank,16-Jan-09,16-Aug-12 -National Bank of Commerce,Berkeley,IL,19733,Republic Bank of Chicago,16-Jan-09,16-Aug-12 -Sanderson State Bank,Sanderson,TX,11568,The Pecos County State Bank,12-Dec-08,4-Sep-12 -Haven Trust Bank,Duluth,GA,35379,"Branch Banking & Trust Company, (BB&T)",12-Dec-08,16-Aug-12 -First Georgia Community Bank,Jackson,GA,34301,United Bank,5-Dec-08,16-Aug-12 -PFF Bank & Trust,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13 -Downey Savings & Loan,Newport Beach,CA,30968,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13 -Community Bank,Loganville,GA,16490,Bank of Essex,21-Nov-08,4-Sep-12 -Security Pacific Bank,Los Angeles,CA,23595,Pacific Western Bank,7-Nov-08,28-Aug-12 -"Franklin Bank, SSB",Houston,TX,26870,Prosperity Bank,7-Nov-08,16-Aug-12 -Freedom Bank,Bradenton,FL,57930,Fifth Third Bank,31-Oct-08,16-Aug-12 -Alpha Bank & Trust,Alpharetta,GA,58241,"Stearns Bank, N.A.",24-Oct-08,16-Aug-12 -Meridian Bank,Eldred,IL,13789,National Bank,10-Oct-08,31-May-12 -Main Street Bank,Northville,MI,57654,Monroe Bank & Trust,10-Oct-08,16-Aug-12 -Washington Mutual Bank,Henderson,NV,32633,JP Morgan Chase Bank,25-Sep-08,16-Aug-12 -Ameribank,Northfork,WV,6782,The Citizens Savings Bank,19-Sep-08,16-Aug-12 -Silver State Bank,Henderson,NV,34194,Nevada State Bank,5-Sep-08,16-Aug-12 -Integrity Bank,Alpharetta,GA,35469,Regions Bank,29-Aug-08,16-Aug-12 -Columbian Bank & Trust,Topeka,KS,22728,Citizens Bank & Trust,22-Aug-08,16-Aug-12 -First Priority Bank,Bradenton,FL,57523,SunTrust Bank,1-Aug-08,16-Aug-12 -"First Heritage Bank, NA",Newport Beach,CA,57961,Mutual of Omaha Bank,25-Jul-08,28-Aug-12 -First National Bank of Nevada,Reno,NV,27011,Mutual of Omaha Bank,25-Jul-08,28-Aug-12 -IndyMac Bank,Pasadena,CA,29730,"OneWest Bank, FSB",11-Jul-08,28-Aug-12 -"First Integrity Bank, NA",Staples,MN,12736,First International Bank and Trust,30-May-08,28-Aug-12 -"ANB Financial, NA",Bentonville,AR,33901,Pulaski Bank and Trust Company,9-May-08,28-Aug-12 -Hume Bank,Hume,MO,1971,Security Bank,7-Mar-08,28-Aug-12 -Douglass National Bank,Kansas City,MO,24660,Liberty Bank and Trust Company,25-Jan-08,26-Oct-12 -Miami Valley Bank,Lakeview,OH,16848,The Citizens Banking Company,4-Oct-07,28-Aug-12 -NetBank,Alpharetta,GA,32575,ING DIRECT,28-Sep-07,28-Aug-12 -Metropolitan Savings Bank,Pittsburgh,PA,35353,Allegheny Valley Bank of Pittsburgh,2-Feb-07,27-Oct-10 -Bank of Ephraim,Ephraim,UT,1249,Far West Bank,25-Jun-04,9-Apr-08 -Reliance Bank,White Plains,NY,26778,Union State Bank,19-Mar-04,9-Apr-08 -Guaranty National Bank of Tallahassee,Tallahassee,FL,26838,Hancock Bank of Florida,12-Mar-04,5-Jun-12 -Dollar Savings Bank,Newark,NJ,31330,No Acquirer,14-Feb-04,9-Apr-08 -Pulaski Savings Bank,Philadelphia,PA,27203,Earthstar Bank,14-Nov-03,22-Jul-05 -First National Bank of Blanchardville,Blanchardville,WI,11639,The Park Bank,9-May-03,5-Jun-12 -Southern Pacific Bank,Torrance,CA,27094,Beal Bank,7-Feb-03,20-Oct-08 -Farmers Bank of Cheneyville,Cheneyville,LA,16445,Sabine State Bank & Trust,17-Dec-02,20-Oct-04 -Bank of Alamo,Alamo,TN,9961,No Acquirer,8-Nov-02,18-Mar-05 -AmTrade International Bank,Atlanta,GA,33784,No Acquirer,30-Sep-02,11-Sep-06 -Universal Federal Savings Bank,Chicago,IL,29355,Chicago Community Bank,27-Jun-02,9-Apr-08 -Connecticut Bank of Commerce,Stamford,CT,19183,Hudson United Bank,26-Jun-02,14-Feb-12 -New Century Bank,Shelby Township,MI,34979,No Acquirer,28-Mar-02,18-Mar-05 -Net 1st National Bank,Boca Raton,FL,26652,Bank Leumi USA,1-Mar-02,9-Apr-08 -"NextBank, NA",Phoenix,AZ,22314,No Acquirer,7-Feb-02,27-Aug-10 -Oakwood Deposit Bank Co.,Oakwood,OH,8966,The State Bank & Trust Company,1-Feb-02,25-Oct-12 -Bank of Sierra Blanca,Sierra Blanca,TX,22002,The Security State Bank of Pecos,18-Jan-02,6-Nov-03 -"Hamilton Bank, NA",Miami,FL,24382,Israel Discount Bank of New York,11-Jan-02,5-Jun-12 -Sinclair National Bank,Gravette,AR,34248,Delta Trust & Bank,7-Sep-01,10-Feb-04 -"Superior Bank, FSB",Hinsdale,IL,32646,"Superior Federal, FSB",27-Jul-01,5-Jun-12 -Malta National Bank,Malta,OH,6629,North Valley Bank,3-May-01,18-Nov-02 -First Alliance Bank & Trust Co.,Manchester,NH,34264,Southern New Hampshire Bank & Trust,2-Feb-01,18-Feb-03 -National State Bank of Metropolis,Metropolis,IL,3815,Banterra Bank of Marion,14-Dec-00,17-Mar-05 -Bank of Honolulu,Honolulu,HI,21029,Bank of the Orient,13-Oct-00,17-Mar-05 +Bank Name,City,ST,CERT,Acquiring Institution,Closing Date,Updated Date +Banks of Wisconsin d/b/a Bank of Kenosha,Kenosha,WI,35386,"North Shore Bank, FSB",31-May-13,31-May-13 +Central Arizona Bank,Scottsdale,AZ,34527,Western State Bank,14-May-13,20-May-13 +Sunrise Bank,Valdosta,GA,58185,Synovus Bank,10-May-13,21-May-13 +Pisgah Community Bank,Asheville,NC,58701,"Capital Bank, N.A.",10-May-13,14-May-13 +Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,16-May-13 +Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,17-May-13 +Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,16-May-13 +Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,16-May-13 +First Federal Bank,Lexington,KY,29594,Your Community Bank,19-Apr-13,23-Apr-13 +Gold Canyon Bank,Gold Canyon,AZ,58066,"First Scottsdale Bank, National Association",5-Apr-13,9-Apr-13 +Frontier Bank,LaGrange,GA,16431,HeritageBank of the South,8-Mar-13,26-Mar-13 +Covenant Bank,Chicago,IL,22476,Liberty Bank and Trust Company,15-Feb-13,4-Mar-13 +1st Regents Bank,Andover,MN,57157,First Minnesota Bank,18-Jan-13,28-Feb-13 +Westside Community Bank,University Place,WA,33997,Sunwest Bank,11-Jan-13,24-Jan-13 +Community Bank of the Ozarks,Sunrise Beach,MO,27331,Bank of Sullivan,14-Dec-12,24-Jan-13 +Hometown Community Bank,Braselton,GA,57928,"CertusBank, National Association",16-Nov-12,24-Jan-13 +Citizens First National Bank,Princeton,IL,3731,Heartland Bank and Trust Company,2-Nov-12,24-Jan-13 +Heritage Bank of Florida,Lutz,FL,35009,Centennial Bank,2-Nov-12,24-Jan-13 +NOVA Bank,Berwyn,PA,27148,No Acquirer,26-Oct-12,24-Jan-13 +Excel Bank,Sedalia,MO,19189,Simmons First National Bank,19-Oct-12,24-Jan-13 +First East Side Savings Bank,Tamarac,FL,28144,Stearns Bank N.A.,19-Oct-12,24-Jan-13 +GulfSouth Private Bank,Destin,FL,58073,SmartBank,19-Oct-12,24-Jan-13 +First United Bank,Crete,IL,20685,"Old Plank Trail Community Bank, National Association",28-Sep-12,15-Nov-12 +Truman Bank,St. Louis,MO,27316,Simmons First National Bank,14-Sep-12,17-Dec-12 +First Commercial Bank,Bloomington,MN,35246,Republic Bank & Trust Company,7-Sep-12,17-Dec-12 +Waukegan Savings Bank,Waukegan,IL,28243,First Midwest Bank,3-Aug-12,11-Oct-12 +Jasper Banking Company,Jasper,GA,16240,Stearns Bank N.A.,27-Jul-12,17-Dec-12 +Second Federal Savings and Loan Association of Chicago,Chicago,IL,27986,Hinsdale Bank & Trust Company,20-Jul-12,14-Jan-13 +Heartland Bank,Leawood,KS,1361,Metcalf Bank,20-Jul-12,17-Dec-12 +First Cherokee State Bank,Woodstock,GA,32711,Community & Southern Bank,20-Jul-12,31-Oct-12 +Georgia Trust Bank,Buford,GA,57847,Community & Southern Bank,20-Jul-12,17-Dec-12 +The Royal Palm Bank of Florida,Naples,FL,57096,First National Bank of the Gulf Coast,20-Jul-12,7-Jan-13 +Glasgow Savings Bank,Glasgow,MO,1056,Regional Missouri Bank,13-Jul-12,11-Oct-12 +Montgomery Bank & Trust,Ailey,GA,19498,Ameris Bank,6-Jul-12,31-Oct-12 +The Farmers Bank of Lynchburg,Lynchburg,TN,1690,Clayton Bank and Trust,15-Jun-12,31-Oct-12 +Security Exchange Bank,Marietta,GA,35299,Fidelity Bank,15-Jun-12,10-Oct-12 +Putnam State Bank,Palatka,FL,27405,Harbor Community Bank,15-Jun-12,10-Oct-12 +Waccamaw Bank,Whiteville,NC,34515,First Community Bank,8-Jun-12,8-Nov-12 +Farmers' and Traders' State Bank,Shabbona,IL,9257,First State Bank,8-Jun-12,10-Oct-12 +Carolina Federal Savings Bank,Charleston,SC,35372,Bank of North Carolina,8-Jun-12,31-Oct-12 +First Capital Bank,Kingfisher,OK,416,F & M Bank,8-Jun-12,10-Oct-12 +"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,20-May-13 +"Security Bank, National Association",North Lauderdale,FL,23156,Banesco USA,4-May-12,31-Oct-12 +Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,17-May-13 +Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,17-May-13 +"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-May-13 +HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-May-13 +Bank of the Eastern Shore,Cambridge,MD,26759,No Acquirer,27-Apr-12,17-Oct-12 +"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,17-May-13 +Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,16-May-13 +Premier Bank,Wilmette,IL,35419,International Bank of Chicago,23-Mar-12,17-Oct-12 +Covenant Bank & Trust,Rock Spring,GA,58068,"Stearns Bank, N.A.",23-Mar-12,31-Oct-12 +New City Bank,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12 +Global Commerce Bank,Doraville,GA,34046,Metro City Bank,2-Mar-12,31-Oct-12 +Home Savings of America,Little Falls,MN,29178,No Acquirer,24-Feb-12,17-Dec-12 +Central Bank of Georgia,Ellaville,GA,5687,Ameris Bank,24-Feb-12,9-Aug-12 +SCB Bank,Shelbyville,IN,29761,"First Merchants Bank, National Association",10-Feb-12,25-Mar-13 +Charter National Bank and Trust,Hoffman Estates,IL,23187,"Barrington Bank & Trust Company, National Association",10-Feb-12,25-Mar-13 +BankEast,Knoxville,TN,19869,U.S.Bank National Association,27-Jan-12,8-Mar-13 +Patriot Bank Minnesota,Forest Lake,MN,34823,First Resource Bank,27-Jan-12,12-Sep-12 +Tennessee Commerce Bank,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12 +First Guaranty Bank and Trust Company of Jacksonville,Jacksonville,FL,16579,"CenterState Bank of Florida, N.A.",27-Jan-12,12-Sep-12 +American Eagle Savings Bank,Boothwyn,PA,31581,"Capital Bank, N.A.",20-Jan-12,25-Jan-13 +The First State Bank,Stockbridge,GA,19252,Hamilton State Bank,20-Jan-12,25-Jan-13 +Central Florida State Bank,Belleview,FL,57186,"CenterState Bank of Florida, N.A.",20-Jan-12,25-Jan-13 +Western National Bank,Phoenix,AZ,57917,Washington Federal,16-Dec-11,13-Aug-12 +Premier Community Bank of the Emerald Coast,Crestview,FL,58343,Summit Bank,16-Dec-11,12-Sep-12 +Central Progressive Bank,Lacombe,LA,19657,First NBC Bank,18-Nov-11,13-Aug-12 +Polk County Bank,Johnston,IA,14194,Grinnell State Bank,18-Nov-11,15-Aug-12 +Community Bank of Rockmart,Rockmart,GA,57860,Century Bank of Georgia,10-Nov-11,13-Aug-12 +SunFirst Bank,Saint George,UT,57087,Cache Valley Bank,4-Nov-11,16-Nov-12 +"Mid City Bank, Inc.",Omaha,NE,19397,Premier Bank,4-Nov-11,15-Aug-12 +All American Bank,Des Plaines,IL,57759,International Bank of Chicago,28-Oct-11,15-Aug-12 +Community Banks of Colorado,Greenwood Village,CO,21132,"Bank Midwest, N.A.",21-Oct-11,2-Jan-13 +Community Capital Bank,Jonesboro,GA,57036,State Bank and Trust Company,21-Oct-11,8-Nov-12 +Decatur First Bank,Decatur,GA,34392,Fidelity Bank,21-Oct-11,8-Nov-12 +Old Harbor Bank,Clearwater,FL,57537,1st United Bank,21-Oct-11,8-Nov-12 +Country Bank,Aledo,IL,35395,Blackhawk Bank & Trust,14-Oct-11,15-Aug-12 +First State Bank,Cranford,NJ,58046,Northfield Bank,14-Oct-11,8-Nov-12 +"Blue Ridge Savings Bank, Inc.",Asheville,NC,32347,Bank of North Carolina,14-Oct-11,8-Nov-12 +Piedmont Community Bank,Gray,GA,57256,State Bank and Trust Company,14-Oct-11,22-Jan-13 +Sun Security Bank,Ellington,MO,20115,Great Southern Bank,7-Oct-11,7-Nov-12 +The RiverBank,Wyoming,MN,10216,Central Bank,7-Oct-11,7-Nov-12 +First International Bank,Plano,TX,33513,American First National Bank,30-Sep-11,9-Oct-12 +Citizens Bank of Northern California,Nevada City,CA,33983,Tri Counties Bank,23-Sep-11,9-Oct-12 +Bank of the Commonwealth,Norfolk,VA,20408,Southern Bank and Trust Company,23-Sep-11,9-Oct-12 +The First National Bank of Florida,Milton,FL,25155,CharterBank,9-Sep-11,6-Sep-12 +CreekSide Bank,Woodstock,GA,58226,Georgia Commerce Bank,2-Sep-11,6-Sep-12 +Patriot Bank of Georgia,Cumming,GA,58273,Georgia Commerce Bank,2-Sep-11,2-Nov-12 +First Choice Bank,Geneva,IL,57212,Inland Bank & Trust,19-Aug-11,15-Aug-12 +First Southern National Bank,Statesboro,GA,57239,Heritage Bank of the South,19-Aug-11,2-Nov-12 +Lydian Private Bank,Palm Beach,FL,35356,"Sabadell United Bank, N.A.",19-Aug-11,2-Nov-12 +Public Savings Bank,Huntingdon Valley,PA,34130,"Capital Bank, N.A.",18-Aug-11,15-Aug-12 +The First National Bank of Olathe,Olathe,KS,4744,Enterprise Bank & Trust,12-Aug-11,23-Aug-12 +Bank of Whitman,Colfax,WA,22528,Columbia State Bank,5-Aug-11,16-Aug-12 +Bank of Shorewood,Shorewood,IL,22637,Heartland Bank and Trust Company,5-Aug-11,16-Aug-12 +Integra Bank National Association,Evansville,IN,4392,Old National Bank,29-Jul-11,16-Aug-12 +"BankMeridian, N.A.",Columbia,SC,58222,SCBT National Association,29-Jul-11,2-Nov-12 +Virginia Business Bank,Richmond,VA,58283,Xenith Bank,29-Jul-11,9-Oct-12 +Bank of Choice,Greeley,CO,2994,"Bank Midwest, N.A.",22-Jul-11,12-Sep-12 +LandMark Bank of Florida,Sarasota,FL,35244,American Momentum Bank,22-Jul-11,2-Nov-12 +Southshore Community Bank,Apollo Beach,FL,58056,American Momentum Bank,22-Jul-11,2-Nov-12 +Summit Bank,Prescott,AZ,57442,The Foothills Bank,15-Jul-11,16-Aug-12 +First Peoples Bank,Port St. Lucie,FL,34870,"Premier American Bank, N.A.",15-Jul-11,2-Nov-12 +High Trust Bank,Stockbridge,GA,19554,Ameris Bank,15-Jul-11,2-Nov-12 +One Georgia Bank,Atlanta,GA,58238,Ameris Bank,15-Jul-11,2-Nov-12 +Signature Bank,Windsor,CO,57835,Points West Community Bank,8-Jul-11,26-Oct-12 +Colorado Capital Bank,Castle Rock,CO,34522,First-Citizens Bank & Trust Company,8-Jul-11,15-Jan-13 +First Chicago Bank & Trust,Chicago,IL,27935,Northbrook Bank & Trust Company,8-Jul-11,9-Sep-12 +Mountain Heritage Bank,Clayton,GA,57593,First American Bank and Trust Company,24-Jun-11,2-Nov-12 +First Commercial Bank of Tampa Bay,Tampa,FL,27583,Stonegate Bank,17-Jun-11,2-Nov-12 +McIntosh State Bank,Jackson,GA,19237,Hamilton State Bank,17-Jun-11,2-Nov-12 +Atlantic Bank and Trust,Charleston,SC,58420,"First Citizens Bank and Trust Company, Inc.",3-Jun-11,31-Oct-12 +First Heritage Bank,Snohomish,WA,23626,Columbia State Bank,27-May-11,28-Jan-13 +Summit Bank,Burlington,WA,513,Columbia State Bank,20-May-11,22-Jan-13 +First Georgia Banking Company,Franklin,GA,57647,"CertusBank, National Association",20-May-11,13-Nov-12 +Atlantic Southern Bank,Macon,GA,57213,"CertusBank, National Association",20-May-11,31-Oct-12 +Coastal Bank,Cocoa Beach,FL,34898,"Florida Community Bank, a division of Premier American Bank, N.A.",6-May-11,30-Nov-12 +Community Central Bank,Mount Clemens,MI,34234,Talmer Bank & Trust,29-Apr-11,16-Aug-12 +The Park Avenue Bank,Valdosta,GA,19797,Bank of the Ozarks,29-Apr-11,30-Nov-12 +First Choice Community Bank,Dallas,GA,58539,Bank of the Ozarks,29-Apr-11,22-Jan-13 +Cortez Community Bank,Brooksville,FL,57625,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12 +First National Bank of Central Florida,Winter Park,FL,26297,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12 +Heritage Banking Group,Carthage,MS,14273,Trustmark National Bank,15-Apr-11,30-Nov-12 +Rosemount National Bank,Rosemount,MN,24099,Central Bank,15-Apr-11,16-Aug-12 +Superior Bank,Birmingham,AL,17750,"Superior Bank, National Association",15-Apr-11,30-Nov-12 +Nexity Bank,Birmingham,AL,19794,AloStar Bank of Commerce,15-Apr-11,4-Sep-12 +New Horizons Bank,East Ellijay,GA,57705,Citizens South Bank,15-Apr-11,16-Aug-12 +Bartow County Bank,Cartersville,GA,21495,Hamilton State Bank,15-Apr-11,22-Jan-13 +Nevada Commerce Bank,Las Vegas,NV,35418,City National Bank,8-Apr-11,9-Sep-12 +Western Springs National Bank and Trust,Western Springs,IL,10086,Heartland Bank and Trust Company,8-Apr-11,22-Jan-13 +The Bank of Commerce,Wood Dale,IL,34292,Advantage National Bank Group,25-Mar-11,22-Jan-13 +Legacy Bank,Milwaukee,WI,34818,Seaway Bank and Trust Company,11-Mar-11,12-Sep-12 +First National Bank of Davis,Davis,OK,4077,The Pauls Valley National Bank,11-Mar-11,20-Aug-12 +Valley Community Bank,St. Charles,IL,34187,First State Bank,25-Feb-11,12-Sep-12 +"San Luis Trust Bank, FSB",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12 +Charter Oak Bank,Napa,CA,57855,Bank of Marin,18-Feb-11,12-Sep-12 +Citizens Bank of Effingham,Springfield,GA,34601,Heritage Bank of the South,18-Feb-11,2-Nov-12 +Habersham Bank,Clarkesville,GA,151,SCBT National Association,18-Feb-11,2-Nov-12 +Canyon National Bank,Palm Springs,CA,34692,Pacific Premier Bank,11-Feb-11,12-Sep-12 +Badger State Bank,Cassville,WI,13272,Royal Bank,11-Feb-11,12-Sep-12 +Peoples State Bank,Hamtramck,MI,14939,First Michigan Bank,11-Feb-11,22-Jan-13 +Sunshine State Community Bank,Port Orange,FL,35478,"Premier American Bank, N.A.",11-Feb-11,2-Nov-12 +Community First Bank Chicago,Chicago,IL,57948,Northbrook Bank & Trust Company,4-Feb-11,20-Aug-12 +North Georgia Bank,Watkinsville,GA,35242,BankSouth,4-Feb-11,2-Nov-12 +American Trust Bank,Roswell,GA,57432,Renasant Bank,4-Feb-11,31-Oct-12 +First Community Bank,Taos,NM,12261,"U.S. Bank, N.A.",28-Jan-11,12-Sep-12 +FirsTier Bank,Louisville,CO,57646,No Acquirer,28-Jan-11,12-Sep-12 +Evergreen State Bank,Stoughton,WI,5328,McFarland State Bank,28-Jan-11,12-Sep-12 +The First State Bank,Camargo,OK,2303,Bank 7,28-Jan-11,12-Sep-12 +United Western Bank,Denver,CO,31293,First-Citizens Bank & Trust Company,21-Jan-11,12-Sep-12 +The Bank of Asheville,Asheville,NC,34516,First Bank,21-Jan-11,2-Nov-12 +CommunitySouth Bank & Trust,Easley,SC,57868,"CertusBank, National Association",21-Jan-11,2-Nov-12 +Enterprise Banking Company,McDonough,GA,19758,No Acquirer,21-Jan-11,2-Nov-12 +Oglethorpe Bank,Brunswick,GA,57440,Bank of the Ozarks,14-Jan-11,2-Nov-12 +Legacy Bank,Scottsdale,AZ,57820,Enterprise Bank & Trust,7-Jan-11,12-Sep-12 +First Commercial Bank of Florida,Orlando,FL,34965,First Southern Bank,7-Jan-11,2-Nov-12 +Community National Bank,Lino Lakes,MN,23306,Farmers & Merchants Savings Bank,17-Dec-10,20-Aug-12 +First Southern Bank,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12 +"United Americas Bank, N.A.",Atlanta,GA,35065,State Bank and Trust Company,17-Dec-10,2-Nov-12 +"Appalachian Community Bank, FSB",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12 +Chestatee State Bank,Dawsonville,GA,34578,Bank of the Ozarks,17-Dec-10,2-Nov-12 +"The Bank of Miami,N.A.",Coral Gables,FL,19040,1st United Bank,17-Dec-10,2-Nov-12 +Earthstar Bank,Southampton,PA,35561,Polonia Bank,10-Dec-10,20-Aug-12 +Paramount Bank,Farmington Hills,MI,34673,Level One Bank,10-Dec-10,20-Aug-12 +First Banking Center,Burlington,WI,5287,First Michigan Bank,19-Nov-10,20-Aug-12 +Allegiance Bank of North America,Bala Cynwyd,PA,35078,VIST Bank,19-Nov-10,20-Aug-12 +Gulf State Community Bank,Carrabelle,FL,20340,Centennial Bank,19-Nov-10,2-Nov-12 +Copper Star Bank,Scottsdale,AZ,35463,"Stearns Bank, N.A.",12-Nov-10,20-Aug-12 +Darby Bank & Trust Co.,Vidalia,GA,14580,Ameris Bank,12-Nov-10,15-Jan-13 +Tifton Banking Company,Tifton,GA,57831,Ameris Bank,12-Nov-10,2-Nov-12 +First Vietnamese American Bank,Westminster,CA,57885,Grandpoint Bank,5-Nov-10,12-Sep-12 +Pierce Commercial Bank,Tacoma,WA,34411,Heritage Bank,5-Nov-10,20-Aug-12 +Western Commercial Bank,Woodland Hills,CA,58087,First California Bank,5-Nov-10,12-Sep-12 +K Bank,Randallstown,MD,31263,Manufacturers and Traders Trust Company (M&T Bank),5-Nov-10,20-Aug-12 +"First Arizona Savings, A FSB",Scottsdale,AZ,32582,No Acquirer,22-Oct-10,20-Aug-12 +Hillcrest Bank,Overland Park,KS,22173,"Hillcrest Bank, N.A.",22-Oct-10,20-Aug-12 +First Suburban National Bank,Maywood,IL,16089,Seaway Bank and Trust Company,22-Oct-10,20-Aug-12 +The First National Bank of Barnesville,Barnesville,GA,2119,United Bank,22-Oct-10,2-Nov-12 +The Gordon Bank,Gordon,GA,33904,Morris Bank,22-Oct-10,2-Nov-12 +Progress Bank of Florida,Tampa,FL,32251,Bay Cities Bank,22-Oct-10,2-Nov-12 +First Bank of Jacksonville,Jacksonville,FL,27573,Ameris Bank,22-Oct-10,2-Nov-12 +Premier Bank,Jefferson City,MO,34016,Providence Bank,15-Oct-10,20-Aug-12 +WestBridge Bank and Trust Company,Chesterfield,MO,58205,Midland States Bank,15-Oct-10,20-Aug-12 +"Security Savings Bank, F.S.B.",Olathe,KS,30898,Simmons First National Bank,15-Oct-10,20-Aug-12 +Shoreline Bank,Shoreline,WA,35250,GBC International Bank,1-Oct-10,20-Aug-12 +Wakulla Bank,Crawfordville,FL,21777,Centennial Bank,1-Oct-10,2-Nov-12 +North County Bank,Arlington,WA,35053,Whidbey Island Bank,24-Sep-10,20-Aug-12 +Haven Trust Bank Florida,Ponte Vedra Beach,FL,58308,First Southern Bank,24-Sep-10,5-Nov-12 +Maritime Savings Bank,West Allis,WI,28612,"North Shore Bank, FSB",17-Sep-10,20-Aug-12 +Bramble Savings Bank,Milford,OH,27808,Foundation Bank,17-Sep-10,20-Aug-12 +The Peoples Bank,Winder,GA,182,Community & Southern Bank,17-Sep-10,5-Nov-12 +First Commerce Community Bank,Douglasville,GA,57448,Community & Southern Bank,17-Sep-10,15-Jan-13 +Bank of Ellijay,Ellijay,GA,58197,Community & Southern Bank,17-Sep-10,15-Jan-13 +ISN Bank,Cherry Hill,NJ,57107,Customers Bank,17-Sep-10,22-Aug-12 +Horizon Bank,Bradenton,FL,35061,Bank of the Ozarks,10-Sep-10,5-Nov-12 +Sonoma Valley Bank,Sonoma,CA,27259,Westamerica Bank,20-Aug-10,12-Sep-12 +Los Padres Bank,Solvang,CA,32165,Pacific Western Bank,20-Aug-10,12-Sep-12 +Butte Community Bank,Chico,CA,33219,"Rabobank, N.A.",20-Aug-10,12-Sep-12 +Pacific State Bank,Stockton,CA,27090,"Rabobank, N.A.",20-Aug-10,12-Sep-12 +ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,16-May-13 +Imperial Savings and Loan Association,Martinsville,VA,31623,"River Community Bank, N.A.",20-Aug-10,24-Aug-12 +Independent National Bank,Ocala,FL,27344,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12 +Community National Bank at Bartow,Bartow,FL,25266,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12 +Palos Bank and Trust Company,Palos Heights,IL,17599,First Midwest Bank,13-Aug-10,22-Aug-12 +Ravenswood Bank,Chicago,IL,34231,Northbrook Bank & Trust Company,6-Aug-10,22-Aug-12 +LibertyBank,Eugene,OR,31964,Home Federal Bank,30-Jul-10,22-Aug-12 +The Cowlitz Bank,Longview,WA,22643,Heritage Bank,30-Jul-10,22-Aug-12 +Coastal Community Bank,Panama City Beach,FL,9619,Centennial Bank,30-Jul-10,5-Nov-12 +Bayside Savings Bank,Port Saint Joe,FL,57669,Centennial Bank,30-Jul-10,5-Nov-12 +Northwest Bank & Trust,Acworth,GA,57658,State Bank and Trust Company,30-Jul-10,5-Nov-12 +Home Valley Bank,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12 +SouthwestUSA Bank,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12 +Community Security Bank,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12 +Thunder Bank,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12 +Williamsburg First National Bank,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12 +Crescent Bank and Trust Company,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12 +Sterling Bank,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12 +"Mainstreet Savings Bank, FSB",Hastings,MI,28136,Commercial Bank,16-Jul-10,13-Sep-12 +Olde Cypress Community Bank,Clewiston,FL,28864,"CenterState Bank of Florida, N.A.",16-Jul-10,5-Nov-12 +Turnberry Bank,Aventura,FL,32280,NAFH National Bank,16-Jul-10,5-Nov-12 +Metro Bank of Dade County,Miami,FL,25172,NAFH National Bank,16-Jul-10,5-Nov-12 +First National Bank of the South,Spartanburg,SC,35383,NAFH National Bank,16-Jul-10,5-Nov-12 +Woodlands Bank,Bluffton,SC,32571,Bank of the Ozarks,16-Jul-10,5-Nov-12 +Home National Bank,Blackwell,OK,11636,RCB Bank,9-Jul-10,10-Dec-12 +USA Bank,Port Chester,NY,58072,New Century Bank,9-Jul-10,14-Sep-12 +Ideal Federal Savings Bank,Baltimore,MD,32456,No Acquirer,9-Jul-10,14-Sep-12 +Bay National Bank,Baltimore,MD,35462,"Bay Bank, FSB",9-Jul-10,15-Jan-13 +High Desert State Bank,Albuquerque,NM,35279,First American Bank,25-Jun-10,14-Sep-12 +First National Bank,Savannah,GA,34152,"The Savannah Bank, N.A.",25-Jun-10,5-Nov-12 +Peninsula Bank,Englewood,FL,26563,"Premier American Bank, N.A.",25-Jun-10,5-Nov-12 +Nevada Security Bank,Reno,NV,57110,Umpqua Bank,18-Jun-10,23-Aug-12 +Washington First International Bank,Seattle,WA,32955,East West Bank,11-Jun-10,14-Sep-12 +TierOne Bank,Lincoln,NE,29341,Great Western Bank,4-Jun-10,14-Sep-12 +Arcola Homestead Savings Bank,Arcola,IL,31813,No Acquirer,4-Jun-10,14-Sep-12 +First National Bank,Rosedale,MS,15814,The Jefferson Bank,4-Jun-10,5-Nov-12 +Sun West Bank,Las Vegas,NV,34785,City National Bank,28-May-10,14-Sep-12 +"Granite Community Bank, NA",Granite Bay,CA,57315,Tri Counties Bank,28-May-10,14-Sep-12 +Bank of Florida - Tampa,Tampa,FL,57814,EverBank,28-May-10,5-Nov-12 +Bank of Florida - Southwest,Naples,FL,35106,EverBank,28-May-10,5-Nov-12 +Bank of Florida - Southeast,Fort Lauderdale,FL,57360,EverBank,28-May-10,5-Nov-12 +Pinehurst Bank,Saint Paul,MN,57735,Coulee Bank,21-May-10,26-Oct-12 +Midwest Bank and Trust Company,Elmwood Park,IL,18117,"FirstMerit Bank, N.A.",14-May-10,23-Aug-12 +Southwest Community Bank,Springfield,MO,34255,Simmons First National Bank,14-May-10,23-Aug-12 +New Liberty Bank,Plymouth,MI,35586,Bank of Ann Arbor,14-May-10,23-Aug-12 +Satilla Community Bank,Saint Marys,GA,35114,Ameris Bank,14-May-10,5-Nov-12 +1st Pacific Bank of California,San Diego,CA,35517,City National Bank,7-May-10,13-Dec-12 +Towne Bank of Arizona,Mesa,AZ,57697,Commerce Bank of Arizona,7-May-10,23-Aug-12 +Access Bank,Champlin,MN,16476,PrinsBank,7-May-10,23-Aug-12 +The Bank of Bonifay,Bonifay,FL,14246,First Federal Bank of Florida,7-May-10,5-Nov-12 +Frontier Bank,Everett,WA,22710,"Union Bank, N.A.",30-Apr-10,15-Jan-13 +BC National Banks,Butler,MO,17792,Community First Bank,30-Apr-10,23-Aug-12 +Champion Bank,Creve Coeur,MO,58362,BankLiberty,30-Apr-10,23-Aug-12 +CF Bancorp,Port Huron,MI,30005,First Michigan Bank,30-Apr-10,15-Jan-13 +Westernbank Puerto Rico,Mayaguez,PR,31027,Banco Popular de Puerto Rico,30-Apr-10,5-Nov-12 +R-G Premier Bank of Puerto Rico,Hato Rey,PR,32185,Scotiabank de Puerto Rico,30-Apr-10,5-Nov-12 +Eurobank,San Juan,PR,27150,Oriental Bank and Trust,30-Apr-10,5-Nov-12 +Wheatland Bank,Naperville,IL,58429,Wheaton Bank & Trust,23-Apr-10,23-Aug-12 +Peotone Bank and Trust Company,Peotone,IL,10888,First Midwest Bank,23-Apr-10,23-Aug-12 +Lincoln Park Savings Bank,Chicago,IL,30600,Northbrook Bank & Trust Company,23-Apr-10,23-Aug-12 +New Century Bank,Chicago,IL,34821,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12 +Citizens Bank and Trust Company of Chicago,Chicago,IL,34658,Republic Bank of Chicago,23-Apr-10,23-Aug-12 +Broadway Bank,Chicago,IL,22853,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12 +"Amcore Bank, National Association",Rockford,IL,3735,Harris N.A.,23-Apr-10,23-Aug-12 +City Bank,Lynnwood,WA,21521,Whidbey Island Bank,16-Apr-10,14-Sep-12 +Tamalpais Bank,San Rafael,CA,33493,"Union Bank, N.A.",16-Apr-10,23-Aug-12 +Innovative Bank,Oakland,CA,23876,Center Bank,16-Apr-10,23-Aug-12 +Butler Bank,Lowell,MA,26619,People's United Bank,16-Apr-10,23-Aug-12 +Riverside National Bank of Florida,Fort Pierce,FL,24067,"TD Bank, N.A.",16-Apr-10,5-Nov-12 +AmericanFirst Bank,Clermont,FL,57724,"TD Bank, N.A.",16-Apr-10,31-Oct-12 +First Federal Bank of North Florida,Palatka,FL,28886,"TD Bank, N.A.",16-Apr-10,15-Jan-13 +Lakeside Community Bank,Sterling Heights,MI,34878,No Acquirer,16-Apr-10,23-Aug-12 +Beach First National Bank,Myrtle Beach,SC,34242,Bank of North Carolina,9-Apr-10,5-Nov-12 +Desert Hills Bank,Phoenix,AZ,57060,New York Community Bank,26-Mar-10,23-Aug-12 +Unity National Bank,Cartersville,GA,34678,Bank of the Ozarks,26-Mar-10,14-Sep-12 +Key West Bank,Key West,FL,34684,Centennial Bank,26-Mar-10,23-Aug-12 +McIntosh Commercial Bank,Carrollton,GA,57399,CharterBank,26-Mar-10,23-Aug-12 +State Bank of Aurora,Aurora,MN,8221,Northern State Bank,19-Mar-10,23-Aug-12 +First Lowndes Bank,Fort Deposit,AL,24957,First Citizens Bank,19-Mar-10,23-Aug-12 +Bank of Hiawassee,Hiawassee,GA,10054,Citizens South Bank,19-Mar-10,23-Aug-12 +Appalachian Community Bank,Ellijay,GA,33989,Community & Southern Bank,19-Mar-10,31-Oct-12 +Advanta Bank Corp.,Draper,UT,33535,No Acquirer,19-Mar-10,14-Sep-12 +Century Security Bank,Duluth,GA,58104,Bank of Upson,19-Mar-10,23-Aug-12 +American National Bank,Parma,OH,18806,The National Bank and Trust Company,19-Mar-10,23-Aug-12 +Statewide Bank,Covington,LA,29561,Home Bank,12-Mar-10,23-Aug-12 +Old Southern Bank,Orlando,FL,58182,Centennial Bank,12-Mar-10,23-Aug-12 +The Park Avenue Bank,New York,NY,27096,Valley National Bank,12-Mar-10,23-Aug-12 +LibertyPointe Bank,New York,NY,58071,Valley National Bank,11-Mar-10,23-Aug-12 +Centennial Bank,Ogden,UT,34430,No Acquirer,5-Mar-10,14-Sep-12 +Waterfield Bank,Germantown,MD,34976,No Acquirer,5-Mar-10,23-Aug-12 +Bank of Illinois,Normal,IL,9268,Heartland Bank and Trust Company,5-Mar-10,23-Aug-12 +Sun American Bank,Boca Raton,FL,27126,First-Citizens Bank & Trust Company,5-Mar-10,23-Aug-12 +Rainier Pacific Bank,Tacoma,WA,38129,Umpqua Bank,26-Feb-10,23-Aug-12 +Carson River Community Bank,Carson City,NV,58352,Heritage Bank of Nevada,26-Feb-10,15-Jan-13 +"La Jolla Bank, FSB",La Jolla,CA,32423,"OneWest Bank, FSB",19-Feb-10,24-Aug-12 +George Washington Savings Bank,Orland Park,IL,29952,"FirstMerit Bank, N.A.",19-Feb-10,24-Aug-12 +The La Coste National Bank,La Coste,TX,3287,Community National Bank,19-Feb-10,14-Sep-12 +Marco Community Bank,Marco Island,FL,57586,Mutual of Omaha Bank,19-Feb-10,24-Aug-12 +1st American State Bank of Minnesota,Hancock,MN,15448,"Community Development Bank, FSB",5-Feb-10,24-Aug-12 +American Marine Bank,Bainbridge Island,WA,16730,Columbia State Bank,29-Jan-10,24-Aug-12 +First Regional Bank,Los Angeles,CA,23011,First-Citizens Bank & Trust Company,29-Jan-10,24-Aug-12 +Community Bank and Trust,Cornelia,GA,5702,SCBT National Association,29-Jan-10,15-Jan-13 +"Marshall Bank, N.A.",Hallock,MN,16133,United Valley Bank,29-Jan-10,23-Aug-12 +Florida Community Bank,Immokalee,FL,5672,"Premier American Bank, N.A.",29-Jan-10,15-Jan-13 +First National Bank of Georgia,Carrollton,GA,16480,Community & Southern Bank,29-Jan-10,13-Dec-12 +Columbia River Bank,The Dalles,OR,22469,Columbia State Bank,22-Jan-10,14-Sep-12 +Evergreen Bank,Seattle,WA,20501,Umpqua Bank,22-Jan-10,15-Jan-13 +Charter Bank,Santa Fe,NM,32498,Charter Bank,22-Jan-10,23-Aug-12 +Bank of Leeton,Leeton,MO,8265,"Sunflower Bank, N.A.",22-Jan-10,15-Jan-13 +Premier American Bank,Miami,FL,57147,"Premier American Bank, N.A.",22-Jan-10,13-Dec-12 +Barnes Banking Company,Kaysville,UT,1252,No Acquirer,15-Jan-10,23-Aug-12 +St. Stephen State Bank,St. Stephen,MN,17522,First State Bank of St. Joseph,15-Jan-10,23-Aug-12 +Town Community Bank & Trust,Antioch,IL,34705,First American Bank,15-Jan-10,23-Aug-12 +Horizon Bank,Bellingham,WA,22977,Washington Federal Savings and Loan Association,8-Jan-10,23-Aug-12 +"First Federal Bank of California, F.S.B.",Santa Monica,CA,28536,"OneWest Bank, FSB",18-Dec-09,23-Aug-12 +Imperial Capital Bank,La Jolla,CA,26348,City National Bank,18-Dec-09,5-Sep-12 +Independent Bankers' Bank,Springfield,IL,26820,The Independent BankersBank (TIB),18-Dec-09,23-Aug-12 +New South Federal Savings Bank,Irondale,AL,32276,Beal Bank,18-Dec-09,23-Aug-12 +Citizens State Bank,New Baltimore,MI,1006,No Acquirer,18-Dec-09,5-Nov-12 +Peoples First Community Bank,Panama City,FL,32167,Hancock Bank,18-Dec-09,5-Nov-12 +RockBridge Commercial Bank,Atlanta,GA,58315,No Acquirer,18-Dec-09,5-Nov-12 +SolutionsBank,Overland Park,KS,4731,Arvest Bank,11-Dec-09,23-Aug-12 +"Valley Capital Bank, N.A.",Mesa,AZ,58399,Enterprise Bank & Trust,11-Dec-09,23-Aug-12 +"Republic Federal Bank, N.A.",Miami,FL,22846,1st United Bank,11-Dec-09,5-Nov-12 +Greater Atlantic Bank,Reston,VA,32583,Sonabank,4-Dec-09,5-Nov-12 +Benchmark Bank,Aurora,IL,10440,"MB Financial Bank, N.A.",4-Dec-09,23-Aug-12 +AmTrust Bank,Cleveland,OH,29776,New York Community Bank,4-Dec-09,5-Nov-12 +The Tattnall Bank,Reidsville,GA,12080,Heritage Bank of the South,4-Dec-09,5-Nov-12 +First Security National Bank,Norcross,GA,26290,State Bank and Trust Company,4-Dec-09,5-Nov-12 +The Buckhead Community Bank,Atlanta,GA,34663,State Bank and Trust Company,4-Dec-09,5-Nov-12 +Commerce Bank of Southwest Florida,Fort Myers,FL,58016,Central Bank,20-Nov-09,5-Nov-12 +Pacific Coast National Bank,San Clemente,CA,57914,Sunwest Bank,13-Nov-09,22-Aug-12 +Orion Bank,Naples,FL,22427,IBERIABANK,13-Nov-09,5-Nov-12 +"Century Bank, F.S.B.",Sarasota,FL,32267,IBERIABANK,13-Nov-09,22-Aug-12 +United Commercial Bank,San Francisco,CA,32469,East West Bank,6-Nov-09,5-Nov-12 +Gateway Bank of St. Louis,St. Louis,MO,19450,Central Bank of Kansas City,6-Nov-09,22-Aug-12 +Prosperan Bank,Oakdale,MN,35074,"Alerus Financial, N.A.",6-Nov-09,22-Aug-12 +Home Federal Savings Bank,Detroit,MI,30329,Liberty Bank and Trust Company,6-Nov-09,22-Aug-12 +United Security Bank,Sparta,GA,22286,Ameris Bank,6-Nov-09,15-Jan-13 +North Houston Bank,Houston,TX,18776,U.S. Bank N.A.,30-Oct-09,22-Aug-12 +Madisonville State Bank,Madisonville,TX,33782,U.S. Bank N.A.,30-Oct-09,22-Aug-12 +Citizens National Bank,Teague,TX,25222,U.S. Bank N.A.,30-Oct-09,22-Aug-12 +Park National Bank,Chicago,IL,11677,U.S. Bank N.A.,30-Oct-09,22-Aug-12 +Pacific National Bank,San Francisco,CA,30006,U.S. Bank N.A.,30-Oct-09,22-Aug-12 +California National Bank,Los Angeles,CA,34659,U.S. Bank N.A.,30-Oct-09,5-Sep-12 +San Diego National Bank,San Diego,CA,23594,U.S. Bank N.A.,30-Oct-09,22-Aug-12 +Community Bank of Lemont,Lemont,IL,35291,U.S. Bank N.A.,30-Oct-09,15-Jan-13 +"Bank USA, N.A.",Phoenix,AZ,32218,U.S. Bank N.A.,30-Oct-09,22-Aug-12 +First DuPage Bank,Westmont,IL,35038,First Midwest Bank,23-Oct-09,22-Aug-12 +Riverview Community Bank,Otsego,MN,57525,Central Bank,23-Oct-09,22-Aug-12 +Bank of Elmwood,Racine,WI,18321,Tri City National Bank,23-Oct-09,22-Aug-12 +Flagship National Bank,Bradenton,FL,35044,First Federal Bank of Florida,23-Oct-09,22-Aug-12 +Hillcrest Bank Florida,Naples,FL,58336,Stonegate Bank,23-Oct-09,22-Aug-12 +American United Bank,Lawrenceville,GA,57794,Ameris Bank,23-Oct-09,5-Sep-12 +Partners Bank,Naples,FL,57959,Stonegate Bank,23-Oct-09,15-Jan-13 +San Joaquin Bank,Bakersfield,CA,23266,Citizens Business Bank,16-Oct-09,22-Aug-12 +Southern Colorado National Bank,Pueblo,CO,57263,Legacy Bank,2-Oct-09,5-Sep-12 +Jennings State Bank,Spring Grove,MN,11416,Central Bank,2-Oct-09,21-Aug-12 +Warren Bank,Warren,MI,34824,The Huntington National Bank,2-Oct-09,21-Aug-12 +Georgian Bank,Atlanta,GA,57151,"First Citizens Bank and Trust Company, Inc.",25-Sep-09,21-Aug-12 +"Irwin Union Bank, F.S.B.",Louisville,KY,57068,"First Financial Bank, N.A.",18-Sep-09,5-Sep-12 +Irwin Union Bank and Trust Company,Columbus,IN,10100,"First Financial Bank, N.A.",18-Sep-09,21-Aug-12 +Venture Bank,Lacey,WA,22868,First-Citizens Bank & Trust Company,11-Sep-09,21-Aug-12 +Brickwell Community Bank,Woodbury,MN,57736,CorTrust Bank N.A.,11-Sep-09,15-Jan-13 +"Corus Bank, N.A.",Chicago,IL,13693,"MB Financial Bank, N.A.",11-Sep-09,21-Aug-12 +First State Bank,Flagstaff,AZ,34875,Sunwest Bank,4-Sep-09,15-Jan-13 +Platinum Community Bank,Rolling Meadows,IL,35030,No Acquirer,4-Sep-09,21-Aug-12 +Vantus Bank,Sioux City,IN,27732,Great Southern Bank,4-Sep-09,21-Aug-12 +InBank,Oak Forest,IL,20203,"MB Financial Bank, N.A.",4-Sep-09,21-Aug-12 +First Bank of Kansas City,Kansas City,MO,25231,Great American Bank,4-Sep-09,21-Aug-12 +Affinity Bank,Ventura,CA,27197,Pacific Western Bank,28-Aug-09,21-Aug-12 +Mainstreet Bank,Forest Lake,MN,1909,Central Bank,28-Aug-09,21-Aug-12 +Bradford Bank,Baltimore,MD,28312,Manufacturers and Traders Trust Company (M&T Bank),28-Aug-09,15-Jan-13 +Guaranty Bank,Austin,TX,32618,BBVA Compass,21-Aug-09,21-Aug-12 +CapitalSouth Bank,Birmingham,AL,22130,IBERIABANK,21-Aug-09,15-Jan-13 +First Coweta Bank,Newnan,GA,57702,United Bank,21-Aug-09,15-Jan-13 +ebank,Atlanta,GA,34682,"Stearns Bank, N.A.",21-Aug-09,21-Aug-12 +Community Bank of Nevada,Las Vegas,NV,34043,No Acquirer,14-Aug-09,21-Aug-12 +Community Bank of Arizona,Phoenix,AZ,57645,MidFirst Bank,14-Aug-09,21-Aug-12 +"Union Bank, National Association",Gilbert,AZ,34485,MidFirst Bank,14-Aug-09,21-Aug-12 +Colonial Bank,Montgomery,AL,9609,"Branch Banking & Trust Company, (BB&T)",14-Aug-09,5-Sep-12 +Dwelling House Savings and Loan Association,Pittsburgh,PA,31559,"PNC Bank, N.A.",14-Aug-09,15-Jan-13 +Community First Bank,Prineville,OR,23268,Home Federal Bank,7-Aug-09,15-Jan-13 +Community National Bank of Sarasota County,Venice,FL,27183,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12 +First State Bank,Sarasota,FL,27364,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12 +Mutual Bank,Harvey,IL,18659,United Central Bank,31-Jul-09,20-Aug-12 +First BankAmericano,Elizabeth,NJ,34270,Crown Bank,31-Jul-09,20-Aug-12 +Peoples Community Bank,West Chester,OH,32288,"First Financial Bank, N.A.",31-Jul-09,20-Aug-12 +Integrity Bank,Jupiter,FL,57604,Stonegate Bank,31-Jul-09,20-Aug-12 +First State Bank of Altus,Altus,OK,9873,Herring Bank,31-Jul-09,20-Aug-12 +Security Bank of Jones County,Gray,GA,8486,State Bank and Trust Company,24-Jul-09,20-Aug-12 +Security Bank of Houston County,Perry,GA,27048,State Bank and Trust Company,24-Jul-09,20-Aug-12 +Security Bank of Bibb County,Macon,GA,27367,State Bank and Trust Company,24-Jul-09,20-Aug-12 +Security Bank of North Metro,Woodstock,GA,57105,State Bank and Trust Company,24-Jul-09,20-Aug-12 +Security Bank of North Fulton,Alpharetta,GA,57430,State Bank and Trust Company,24-Jul-09,20-Aug-12 +Security Bank of Gwinnett County,Suwanee,GA,57346,State Bank and Trust Company,24-Jul-09,20-Aug-12 +Waterford Village Bank,Williamsville,NY,58065,"Evans Bank, N.A.",24-Jul-09,20-Aug-12 +Temecula Valley Bank,Temecula,CA,34341,First-Citizens Bank & Trust Company,17-Jul-09,20-Aug-12 +Vineyard Bank,Rancho Cucamonga,CA,23556,California Bank & Trust,17-Jul-09,20-Aug-12 +BankFirst,Sioux Falls,SD,34103,"Alerus Financial, N.A.",17-Jul-09,20-Aug-12 +First Piedmont Bank,Winder,GA,34594,First American Bank and Trust Company,17-Jul-09,15-Jan-13 +Bank of Wyoming,Thermopolis,WY,22754,Central Bank & Trust,10-Jul-09,20-Aug-12 +Founders Bank,Worth,IL,18390,The PrivateBank and Trust Company,2-Jul-09,20-Aug-12 +Millennium State Bank of Texas,Dallas,TX,57667,State Bank of Texas,2-Jul-09,26-Oct-12 +First National Bank of Danville,Danville,IL,3644,"First Financial Bank, N.A.",2-Jul-09,20-Aug-12 +Elizabeth State Bank,Elizabeth,IL,9262,Galena State Bank and Trust Company,2-Jul-09,20-Aug-12 +Rock River Bank,Oregon,IL,15302,The Harvard State Bank,2-Jul-09,20-Aug-12 +First State Bank of Winchester,Winchester,IL,11710,The First National Bank of Beardstown,2-Jul-09,20-Aug-12 +John Warner Bank,Clinton,IL,12093,State Bank of Lincoln,2-Jul-09,20-Aug-12 +Mirae Bank,Los Angeles,CA,57332,Wilshire State Bank,26-Jun-09,20-Aug-12 +MetroPacific Bank,Irvine,CA,57893,Sunwest Bank,26-Jun-09,20-Aug-12 +Horizon Bank,Pine City,MN,9744,"Stearns Bank, N.A.",26-Jun-09,20-Aug-12 +Neighborhood Community Bank,Newnan,GA,35285,CharterBank,26-Jun-09,20-Aug-12 +Community Bank of West Georgia,Villa Rica,GA,57436,No Acquirer,26-Jun-09,17-Aug-12 +First National Bank of Anthony,Anthony,KS,4614,Bank of Kansas,19-Jun-09,17-Aug-12 +Cooperative Bank,Wilmington,NC,27837,First Bank,19-Jun-09,17-Aug-12 +Southern Community Bank,Fayetteville,GA,35251,United Community Bank,19-Jun-09,17-Aug-12 +Bank of Lincolnwood,Lincolnwood,IL,17309,Republic Bank of Chicago,5-Jun-09,17-Aug-12 +Citizens National Bank,Macomb,IL,5757,Morton Community Bank,22-May-09,4-Sep-12 +Strategic Capital Bank,Champaign,IL,35175,Midland States Bank,22-May-09,4-Sep-12 +"BankUnited, FSB",Coral Gables,FL,32247,BankUnited,21-May-09,17-Aug-12 +Westsound Bank,Bremerton,WA,34843,Kitsap Bank,8-May-09,4-Sep-12 +America West Bank,Layton,UT,35461,Cache Valley Bank,1-May-09,17-Aug-12 +Citizens Community Bank,Ridgewood,NJ,57563,North Jersey Community Bank,1-May-09,4-Sep-12 +"Silverton Bank, NA",Atlanta,GA,26535,No Acquirer,1-May-09,17-Aug-12 +First Bank of Idaho,Ketchum,ID,34396,"U.S. Bank, N.A.",24-Apr-09,17-Aug-12 +First Bank of Beverly Hills,Calabasas,CA,32069,No Acquirer,24-Apr-09,4-Sep-12 +Michigan Heritage Bank,Farmington Hills,MI,34369,Level One Bank,24-Apr-09,17-Aug-12 +American Southern Bank,Kennesaw,GA,57943,Bank of North Georgia,24-Apr-09,17-Aug-12 +Great Basin Bank of Nevada,Elko,NV,33824,Nevada State Bank,17-Apr-09,4-Sep-12 +American Sterling Bank,Sugar Creek,MO,8266,Metcalf Bank,17-Apr-09,31-Aug-12 +New Frontier Bank,Greeley,CO,34881,No Acquirer,10-Apr-09,4-Sep-12 +Cape Fear Bank,Wilmington,NC,34639,First Federal Savings and Loan Association,10-Apr-09,17-Aug-12 +Omni National Bank,Atlanta,GA,22238,No Acquirer,27-Mar-09,17-Aug-12 +"TeamBank, NA",Paola,KS,4754,Great Southern Bank,20-Mar-09,17-Aug-12 +Colorado National Bank,Colorado Springs,CO,18896,Herring Bank,20-Mar-09,17-Aug-12 +FirstCity Bank,Stockbridge,GA,18243,No Acquirer,20-Mar-09,17-Aug-12 +Freedom Bank of Georgia,Commerce,GA,57558,Northeast Georgia Bank,6-Mar-09,17-Aug-12 +Security Savings Bank,Henderson,NV,34820,Bank of Nevada,27-Feb-09,7-Sep-12 +Heritage Community Bank,Glenwood,IL,20078,"MB Financial Bank, N.A.",27-Feb-09,17-Aug-12 +Silver Falls Bank,Silverton,OR,35399,Citizens Bank,20-Feb-09,17-Aug-12 +Pinnacle Bank of Oregon,Beaverton,OR,57342,Washington Trust Bank of Spokane,13-Feb-09,17-Aug-12 +Corn Belt Bank & Trust Co.,Pittsfield,IL,16500,The Carlinville National Bank,13-Feb-09,17-Aug-12 +Riverside Bank of the Gulf Coast,Cape Coral,FL,34563,TIB Bank,13-Feb-09,17-Aug-12 +Sherman County Bank,Loup City,NE,5431,Heritage Bank,13-Feb-09,17-Aug-12 +County Bank,Merced,CA,22574,Westamerica Bank,6-Feb-09,4-Sep-12 +Alliance Bank,Culver City,CA,23124,California Bank & Trust,6-Feb-09,16-Aug-12 +FirstBank Financial Services,McDonough,GA,57017,Regions Bank,6-Feb-09,16-Aug-12 +Ocala National Bank,Ocala,FL,26538,"CenterState Bank of Florida, N.A.",30-Jan-09,4-Sep-12 +Suburban FSB,Crofton,MD,30763,Bank of Essex,30-Jan-09,16-Aug-12 +MagnetBank,Salt Lake City,UT,58001,No Acquirer,30-Jan-09,16-Aug-12 +1st Centennial Bank,Redlands,CA,33025,First California Bank,23-Jan-09,16-Aug-12 +Bank of Clark County,Vancouver,WA,34959,Umpqua Bank,16-Jan-09,16-Aug-12 +National Bank of Commerce,Berkeley,IL,19733,Republic Bank of Chicago,16-Jan-09,16-Aug-12 +Sanderson State Bank,Sanderson,TX,11568,The Pecos County State Bank,12-Dec-08,4-Sep-12 +Haven Trust Bank,Duluth,GA,35379,"Branch Banking & Trust Company, (BB&T)",12-Dec-08,16-Aug-12 +First Georgia Community Bank,Jackson,GA,34301,United Bank,5-Dec-08,16-Aug-12 +PFF Bank & Trust,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13 +Downey Savings & Loan,Newport Beach,CA,30968,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13 +Community Bank,Loganville,GA,16490,Bank of Essex,21-Nov-08,4-Sep-12 +Security Pacific Bank,Los Angeles,CA,23595,Pacific Western Bank,7-Nov-08,28-Aug-12 +"Franklin Bank, SSB",Houston,TX,26870,Prosperity Bank,7-Nov-08,16-Aug-12 +Freedom Bank,Bradenton,FL,57930,Fifth Third Bank,31-Oct-08,16-Aug-12 +Alpha Bank & Trust,Alpharetta,GA,58241,"Stearns Bank, N.A.",24-Oct-08,16-Aug-12 +Meridian Bank,Eldred,IL,13789,National Bank,10-Oct-08,31-May-12 +Main Street Bank,Northville,MI,57654,Monroe Bank & Trust,10-Oct-08,16-Aug-12 +Washington Mutual Bank,Henderson,NV,32633,JP Morgan Chase Bank,25-Sep-08,16-Aug-12 +Ameribank,Northfork,WV,6782,The Citizens Savings Bank,19-Sep-08,16-Aug-12 +Silver State Bank,Henderson,NV,34194,Nevada State Bank,5-Sep-08,16-Aug-12 +Integrity Bank,Alpharetta,GA,35469,Regions Bank,29-Aug-08,16-Aug-12 +Columbian Bank & Trust,Topeka,KS,22728,Citizens Bank & Trust,22-Aug-08,16-Aug-12 +First Priority Bank,Bradenton,FL,57523,SunTrust Bank,1-Aug-08,16-Aug-12 +"First Heritage Bank, NA",Newport Beach,CA,57961,Mutual of Omaha Bank,25-Jul-08,28-Aug-12 +First National Bank of Nevada,Reno,NV,27011,Mutual of Omaha Bank,25-Jul-08,28-Aug-12 +IndyMac Bank,Pasadena,CA,29730,"OneWest Bank, FSB",11-Jul-08,28-Aug-12 +"First Integrity Bank, NA",Staples,MN,12736,First International Bank and Trust,30-May-08,28-Aug-12 +"ANB Financial, NA",Bentonville,AR,33901,Pulaski Bank and Trust Company,9-May-08,28-Aug-12 +Hume Bank,Hume,MO,1971,Security Bank,7-Mar-08,28-Aug-12 +Douglass National Bank,Kansas City,MO,24660,Liberty Bank and Trust Company,25-Jan-08,26-Oct-12 +Miami Valley Bank,Lakeview,OH,16848,The Citizens Banking Company,4-Oct-07,28-Aug-12 +NetBank,Alpharetta,GA,32575,ING DIRECT,28-Sep-07,28-Aug-12 +Metropolitan Savings Bank,Pittsburgh,PA,35353,Allegheny Valley Bank of Pittsburgh,2-Feb-07,27-Oct-10 +Bank of Ephraim,Ephraim,UT,1249,Far West Bank,25-Jun-04,9-Apr-08 +Reliance Bank,White Plains,NY,26778,Union State Bank,19-Mar-04,9-Apr-08 +Guaranty National Bank of Tallahassee,Tallahassee,FL,26838,Hancock Bank of Florida,12-Mar-04,5-Jun-12 +Dollar Savings Bank,Newark,NJ,31330,No Acquirer,14-Feb-04,9-Apr-08 +Pulaski Savings Bank,Philadelphia,PA,27203,Earthstar Bank,14-Nov-03,22-Jul-05 +First National Bank of Blanchardville,Blanchardville,WI,11639,The Park Bank,9-May-03,5-Jun-12 +Southern Pacific Bank,Torrance,CA,27094,Beal Bank,7-Feb-03,20-Oct-08 +Farmers Bank of Cheneyville,Cheneyville,LA,16445,Sabine State Bank & Trust,17-Dec-02,20-Oct-04 +Bank of Alamo,Alamo,TN,9961,No Acquirer,8-Nov-02,18-Mar-05 +AmTrade International Bank,Atlanta,GA,33784,No Acquirer,30-Sep-02,11-Sep-06 +Universal Federal Savings Bank,Chicago,IL,29355,Chicago Community Bank,27-Jun-02,9-Apr-08 +Connecticut Bank of Commerce,Stamford,CT,19183,Hudson United Bank,26-Jun-02,14-Feb-12 +New Century Bank,Shelby Township,MI,34979,No Acquirer,28-Mar-02,18-Mar-05 +Net 1st National Bank,Boca Raton,FL,26652,Bank Leumi USA,1-Mar-02,9-Apr-08 +"NextBank, NA",Phoenix,AZ,22314,No Acquirer,7-Feb-02,27-Aug-10 +Oakwood Deposit Bank Co.,Oakwood,OH,8966,The State Bank & Trust Company,1-Feb-02,25-Oct-12 +Bank of Sierra Blanca,Sierra Blanca,TX,22002,The Security State Bank of Pecos,18-Jan-02,6-Nov-03 +"Hamilton Bank, NA",Miami,FL,24382,Israel Discount Bank of New York,11-Jan-02,5-Jun-12 +Sinclair National Bank,Gravette,AR,34248,Delta Trust & Bank,7-Sep-01,10-Feb-04 +"Superior Bank, FSB",Hinsdale,IL,32646,"Superior Federal, FSB",27-Jul-01,5-Jun-12 +Malta National Bank,Malta,OH,6629,North Valley Bank,3-May-01,18-Nov-02 +First Alliance Bank & Trust Co.,Manchester,NH,34264,Southern New Hampshire Bank & Trust,2-Feb-01,18-Feb-03 +National State Bank of Metropolis,Metropolis,IL,3815,Banterra Bank of Marion,14-Dec-00,17-Mar-05 +Bank of Honolulu,Honolulu,HI,21029,Bank of the Orient,13-Oct-00,17-Mar-05 diff --git a/pandas/io/tests/data/banklist.html b/pandas/io/tests/data/banklist.html index 801016e7a5478..8ec1561f8c394 100644 --- a/pandas/io/tests/data/banklist.html +++ b/pandas/io/tests/data/banklist.html @@ -1,4885 +1,4885 @@ -<!DOCTYPE html><!-- HTML5 --> -<html lang="en-US"> -<!-- Content language is American English. --> -<head> -<title>FDIC: Failed Bank List</title> -<!-- Meta Tags --> -<meta charset="UTF-8"> -<!-- Unicode character encoding --> -<meta http-equiv="X-UA-Compatible" content="IE=edge"> -<!-- Turns off IE Compatiblity Mode --> -<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"> -<!-- Makes it so phones don't auto zoom out. --> -<meta name="author" content="DRR"> -<meta http-equiv="keywords" name="keywords" content="banks, financial institutions, failed, failure, closing, deposits, depositors, banking services, assuming institution, acquiring institution, claims"> -<!-- CSS --> -<link rel="stylesheet" type="text/css" href="/responsive/css/responsive.css"> -<link rel="stylesheet" type="text/css" href="banklist.css"> -</head> -<body> - -<!-- START of Header --> -<script type="text/javascript" src="/responsive/header/js/header.js"></script> -<link rel="stylesheet" type="text/css" href="/responsive/header/css/header.css" /> -<!-- googleac.html includes Autocomplete functionality --> -<!-- Autocomplete files --> -<link rel="stylesheet" type="text/css" href="/responsive/header/css/jquery.autocomplete.css" /> -<script type="text/javascript" src="/responsive/js/jquery-1.4.1.min.js"></script> -<script type="text/javascript" src="/responsive/header/js/jquery.autocomplete-1.4.2.js"></script> -<script type="text/javascript"> -function findValue(li) { - if( li == null ) return alert("No match!"); - - // if coming from an AJAX call, let's use the Id as the value - if( !!li.extra ) var sValue = li.extra[0]; - - // otherwise, let's just display the value in the text box - else var sValue = li.selectValue; - - $('#googlesearch').submit(); - -} -function findValue2(li) { - if( li == null ) return alert("No match!"); - - // if coming from an AJAX call, let's use the Id as the value - if( !!li.extra ) var sValue = li.extra[0]; - - // otherwise, let's just display the value in the text box - else var sValue = li.selectValue; - - $('#googlesearch2').submit(); -} -function selectItem(li) { - findValue(li); -} -function selectItem2(li) { - findValue2(li); -} - -$().ready(function() { - - function log(event, data, formatted) { - $("<li>").html( !data ? "No match!" : "Selected: " + formatted).appendTo("#result"); - } - - function formatItem(row) { - return row[0] + " (<strong>id: " + row[1] + "</strong>)"; - } - function formatResult(row) { - return row[0].replace(/(<.+?>)/gi, ''); - } - - $("#newSearch").autocomplete("/searchjs.asp", { - width: 179, - autoFill: false, - //delay:10, - minChars:2, - cacheLength: 10, - onFindValue:findValue, - onItemSelect: selectItem, - selectFirst: false - - }); - - $("#search2").autocomplete("/searchjs.asp", { - width: 160, - autoFill: false, - //delay:10, - minChars:2, - cacheLength: 10, - onFindValue:findValue2, - onItemSelect: selectItem2, - selectFirst: false - - }); - -}); - -</script> -<!-- END CODE NEEDED TO MAKE THE SEARCH BOX WORK --> - -<!-- FORESEE Code --> -<script type="text/javascript" src="/foresee/foresee-trigger.js"></script> - -<a href="#after_header" class="responsive_header-skip_header">Skip Header</a> -<header> -<div id="responsive_header"> - <div id="responsive_header-right_side"> - <ul id="responsive_header-links"> - <li id="responsive_header-twitter" title="Visit FDIC on Twitter"><a tabindex="1" href="/social.html?site=http://twitter.com/FDICgov">Visit FDIC on Twitter</a></li> - <li id="responsive_header-facebook" title="Visit FDIC on Facebook"><a tabindex="1" href="/social.html?site=http://www.facebook.com/FDICgov">Visit FDIC on Facebook</a></li> - <li id="responsive_header-fdicchannel" title="Visit FDIC on YouTube"><a tabindex="1" href="/social.html?site=http://www.youtube.com/user/FDICchannel">Visit FDIC on YouTube</a></li> - <li id="responsive_header-rss" title="FDIC RSS Feed"><a tabindex="1" href="/rss.html">FDIC RSS Feed</a></li> - <li id="responsive_header-subscribe" title="Subscribe to FDIC alerts"><a tabindex="1" href="http://service.govdelivery.com/service/multi_subscribe.html?code=USFDIC">Subscribe to FDIC alerts</a></li> - </ul> - <div id="responsive_header-search"> - <a href="/search/advanced.html" class="search" title="Advanced Search">Advanced Search</a> - <form id="googlesearch" action="http://search.fdic.gov/search" method="get" name="Search box for FDIC.gov"> - <fieldset> - <div class="form"> - <label for="q">Search FDIC.gov</label> - <input tabindex="1" id="newSearch" name="q" class="field" type="text" style="outline: 0 none;" value="Search FDIC..." onblur="if(this.value == '') {this.value = 'Search FDIC...';}" onfocus="if(this.value == 'Search FDIC...') {this.value = '';}" /> - <input tabindex="1" id="searchsubmit" class="submit" alt="Search Icon" title="Search Icon" type="submit" value="" /> - <input value="date:D:L:d1" name="sort" type="hidden" /> - - <input value="xml_no_dtd" name="output" type="hidden" /> - <input value="UTF-8" name="ie" type="hidden" /> - <input value="UTF-8" name="oe" type="hidden" /> - <input value="wwwGOV" name="client" type="hidden" /> - <input value="wwwGOV" name="proxystylesheet" type="hidden" /> - <input value="default" name="site" type="hidden" /> - </div> - </fieldset> - </form> - </div> - </div> - <!-- close right side --> - <a id="responsive_header-fdic_logo" href="/" title="FDIC Homepage">FDIC Homepage</a> - <h1>Federal Deposit<br>Insurance Corporation</h1> - <h2>Each depositor insured to at least $250,000 per insured bank</h2> - <div class="clear"></div> - <nav> - <div id="responsive_header_nav"> - <div id="responsive_header-topnav"> - <div id="responsive_header-topnav-downarrow" onclick="show_rwdnav(this)"></div> - <ul id="responsive_header-topnav-list"> - <li id="responsive_header-topnav-home" title="Home" onmouseover="show_responsive_header_subnav(this)"><a href="/">Home</a></li> - <li id="responsive_header-topnav-deposit" title="Deposit Insurance" onmouseover="show_responsive_header_subnav(this)"><a href="/deposit/">Deposit Insurance</a></li> - <li id="responsive_header-topnav-consumers" title="Consumer Protection" onmouseover="show_responsive_header_subnav(this)"><a href="/consumers/">Consumer Protection</a></li> - <li id="responsive_header-topnav-bank" title="Industry Analysis" onmouseover="show_responsive_header_subnav(this)"><a href="/bank/">Industry Analysis</a></li> - <li id="responsive_header-topnav-regulations" title="Regulations &amp; Examinations" onmouseover="show_responsive_header_subnav(this)"><a href="/regulations/">Regulations &amp; Examinations</a></li> - <li id="responsive_header-topnav-buying" title="Asset Sales" onmouseover="show_responsive_header_subnav(this)"><a href="/buying/">Asset Sales</a></li> - <li id="responsive_header-topnav-news" title="News &amp; Events" onmouseover="show_responsive_header_subnav(this)"><a href="/news/">News &amp; Events</a></li> - <li id="responsive_header-topnav-about" title="About FDIC" onmouseover="show_responsive_header_subnav(this)"><a href="/about/">About FDIC</a></li> - </ul> - <div class="clear"></div> - </div> - <div id="responsive_header-topnav_subnav"> - <div id="responsive_header-topnav_subnav-downarrow" onclick="show_rwdnav(this)"></div> - <ul id="responsive_header-topnav-home_subnav"><li><a>&nbsp;</a></li></ul> - <ul id="responsive_header-topnav-deposit_subnav"> - <li title="BankFind"><a href="http://research.fdic.gov/bankfind/">BankFind</a></li> - <li title="Are My Deposits Insured?"><a href="/deposit/deposits/">Are My Deposits Insured?</a></li> - <li title="Uninsured Investments"><a href="/deposit/investments/">Uninsured Investments</a></li> - <li title="The Deposit Insurance Fund"><a href="/deposit/insurance/index.html">The Deposit Insurance Fund</a></li> - <li title="International Deposit Insurance"><a href="/deposit/deposits/international/">International Deposit Insurance</a></li> - </ul> - <ul id="responsive_header-topnav-consumers_subnav"> - <li title="Consumer News &amp; Information"><a href="/consumers/consumer/">Consumer News &amp; Information</a></li> - <li title="Loans &amp; Mortgages"><a href="/consumers/loans/">Loans &amp; Mortgages</a></li> - <li title="Banking &amp; Your Money"><a href="/consumers/banking/">Banking &amp; Your Money</a></li> - <li title="Financial Education &amp; Literacy"><a href="/consumers/education/">Financial Education &amp; Literacy</a></li> - <li title="Community Affairs"><a href="/consumers/community/">Community Affairs</a></li> - <li title="Identity Theft &amp; Fraud"><a href="/consumers/theft/">Identity Theft &amp; Fraud</a></li> - <li title="Consumer Financial Privacy"><a href="/consumers/privacy/">Consumer Financial Privacy</a></li> - </ul> - <ul id="responsive_header-topnav-bank_subnav"> - <li title="Bank Data &amp; Statistics"><a href="/bank/statistical/">Bank Data &amp; Statistics</a></li> - <li title="Research &amp; Analysis"><a href="/bank/analytical/">Research &amp; Analysis</a></li> - <li title="Failed Banks"><a href="/bank/individual/failed/">Failed Banks</a></li> - </ul> - <ul id="responsive_header-topnav-regulations_subnav"> - <li title="Bank Examinations"><a href="/regulations/examinations/">Bank Examinations</a></li> - <li title="Laws &amp; Regulations"><a href="/regulations/laws/">Laws &amp; Regulations</a></li> - <li title="Resources for Bank Officers &amp; Directors"><a href="/regulations/resources/">Resources for Bank Officers &amp; Directors</a></li> - <li title="FDICconnect"><a href="http://www.fdicconnect.gov/">FDIC<em>connect</em></a></li> - <li title="Required Financial Reports"><a href="/regulations/required/">Required Financial Reports</a></li> - <li title="Examiner Training Programs"><a href="/regulations/examiner/">Examiner Training Programs</a></li> - </ul> - <ul id="responsive_header-topnav-buying_subnav"> - <li title="Loan Sales"><a href="/buying/loan/">Loan Sales</a></li> - <li title="Real Estate Sales"><a href="/buying/owned/">Real Estate and Property Marketplace</a></li> - <li title="Financial Asset Sales"><a href="/buying/financial/">Financial Asset Sales</a></li> - <li title="Servicing Sales Announcements"><a href="/buying/servicing/">Servicing Sales Announcements</a></li> - <li title="Other Asset Sales"><a href="/buying/otherasset/">Other Asset Sales</a></li> - <li title="Historical Sales"><a href="/buying/historical/">Historical Sales</a></li> - </ul> - <ul id="responsive_header-topnav-news_subnav"> - <li title="Press Releases"><a href="/news/news/press/2013/">Press Releases</a></li> - <li title="Online Press Room"><a href="https://fdicsurvey.inquisiteasp.com/fdic/cgi-bin/qwebcorporate.dll?M58TRS">Online Press Room</a></li> - <li title="Conferences &amp; Events"><a href="/news/conferences/">Conferences &amp; Events</a></li> - <li title="Financial Institution Letters"><a href="/news/news/financial/2013/">Financial Institution Letters</a></li> - <li title="Special Alerts"><a href="/news/news/SpecialAlert/2012/">Special Alerts</a></li> - <li title="Letters to the Editor/Opinion Editorials"><a href="/news/letters/">Letters to the Editor/Opinion Editorials</a></li> - <li title="Speeches &amp; Testimony"><a href="/news/news/speeches/chairman/">Speeches &amp; Testimony</a></li> - </ul> - <ul id="responsive_header-topnav-about_subnav"> - <li title="Mission &amp; Purpose"><a href="/about/index.html#1">Mission &amp; Purpose</a></span></li> - <li title="Advisory Committees"><a href="/about/index.html#2">Advisory Committees</a></span></li> - <li title="Careers with the FDIC"><a href="/about/index.html#3">Careers with the FDIC</a></span></li> - <li title="Management Team"><a href="/about/index.html#4">Management Team</a></span></li> - <li title="Plans &amp; Reports"><a href="/about/index.html#5">Plans &amp; Reports</a></span></li> - <li title="What We Can Do for You"><a href="/about/index.html#6">What We Can Do for You</a></span></li> - <li title="Diversity at the FDIC"><a href="/about/index.html#7">Diversity at the FDIC</a></span></li> - </ul> - </div><!-- Close subnav --> - <div class="clear"></div> - </div> - </nav> -</div> -</header> -<a id="after_header" name="after_header"></a> -<script type="text/javascript"> -prepare_responsive_header_nav(); -</script> -<!-- END of Header --> - -<div id="breadcrumbs"><a href="/">Home</a> &gt; <a href="/bank/">Industry Analysis</a> &gt; <a href="/bank/individual/failed/">Failed Banks</a> &gt; Failed Bank List</div> - -<div id="content" class="failed_bank_list"> - - <h1 class="page_title">Failed Bank List</h1> - - <p>The FDIC is often appointed as receiver for failed banks. This page contains useful information for the customers and vendors of these banks. This includes information on the acquiring bank (if applicable), how your accounts and loans are affected, and how vendors can file claims against the receivership. <a href="http://www2.fdic.gov/drrip/cs/index.asp">Failed Financial Institution Contact Search</a> displays point of contact information related to failed banks.</p> - - <p>This list includes banks which have failed since October 1, 2000. To search for banks that failed prior to those on this page, visit this link: <a href="http://www2.fdic.gov/hsob/SelectRpt.asp?EntryTyp=30">Failures and Assistance Transactions</a></p> - - <p><a href="banklist.csv">Failed Bank List</a> - CSV file (Updated on Mondays. Also opens in Excel - <a href="/excel.html">Excel Help</a>)</p> - - <p class="small_screen_warning">Due to the small screen size some information is no longer visible.<br>Full information available when viewed on a larger screen.</p> - - <script type="text/javascript"> - <!-- - document.writeln("<p><em>Click arrows next to headers to sort in Ascending or Descending order.</em></p>"); - //--> - </script> - - <div id="table_wrapper"> - <table id="table" class="sortable"> - <thead> - <tr> - <th id="institution" scope="col">Bank Name</th> - <th id="city" class="nosort" scope="col">City</th> - <th id="state" scope="col">ST</th> - <th id="cert" class="nosort" scope="col">CERT</th> - <th id="ai" scope="col">Acquiring Institution</th> - <th id="closing" scope="col">Closing Date</th> - <th id="updated" scope="col">Updated Date</th> - </tr> - </thead> - <tbody> - <tr> - <td class="institution"><a href="kenosha.html">Banks of Wisconsin d/b/a Bank of Kenosha</a></td> - <td class="city">Kenosha</td> - <td class="state">WI</td> - <td class="cert">35386</td> - <td class="ai">North Shore Bank, FSB</td> - <td class="closing">May 31, 2013</td> - <td class="updated">May 31, 2013</td> - </tr> - <tr> - <td class="institution"><a href="centralaz.html">Central Arizona Bank</a></td> - <td class="city">Scottsdale</td> - <td class="state">AZ</td> - <td class="cert">34527</td> - <td class="ai">Western State Bank</td> - <td class="closing">May 14, 2013</td> - <td class="updated">May 20, 2013</td> - </tr> - <tr> - <td class="institution"><a href="sunrisebank.html">Sunrise Bank</a></td> - <td class="city">Valdosta</td> - <td class="state">GA</td> - <td class="cert">58185</td> - <td class="ai">Synovus Bank</td> - <td class="closing">May 10, 2013</td> - <td class="updated">May 21, 2013</td> - </tr> - <tr> - <td class="institution"><a href="pisgahcommbk.html">Pisgah Community Bank</a></td> - <td class="city">Asheville</td> - <td class="state">NC</td> - <td class="cert">58701</td> - <td class="ai">Capital Bank, N.A.</td> - <td class="closing">May 10, 2013</td> - <td class="updated">May 14, 2013</td> - </tr> - <tr> - <td class="institution"><a href="douglascb.html">Douglas County Bank</a></td> - <td class="city">Douglasville</td> - <td class="state">GA</td> - <td class="cert">21649</td> - <td class="ai">Hamilton State Bank</td> - <td class="closing">April 26, 2013</td> - <td class="updated">May 16, 2013</td> - </tr> - <tr> - <td class="institution"><a href="parkway.html">Parkway Bank</a></td> - <td class="city">Lenoir</td> - <td class="state">NC</td> - <td class="cert">57158</td> - <td class="ai">CertusBank, National Association</td> - <td class="closing">April 26, 2013</td> - <td class="updated">May 17, 2013</td> - </tr> - <tr> - <td class="institution"><a href="chipola.html">Chipola Community Bank</a></td> - <td class="city">Marianna</td> - <td class="state">FL</td> - <td class="cert">58034</td> - <td class="ai">First Federal Bank of Florida</td> - <td class="closing">April 19, 2013</td> - <td class="updated">May 16, 2013</td> - </tr> - <tr> - <td class="institution"><a href="heritagebank-fl.html">Heritage Bank of North Florida</a></td> - <td class="city">Orange Park</td> - <td class="state">FL</td> - <td class="cert">26680</td> - <td class="ai">FirstAtlantic Bank</td> - <td class="closing">April 19, 2013</td> - <td class="updated">May 16, 2013</td> - </tr> - <tr> - <td class="institution"><a href="firstfederal-ky.html">First Federal Bank</a></td> - <td class="city">Lexington</td> - <td class="state">KY</td> - <td class="cert">29594</td> - <td class="ai">Your Community Bank</td> - <td class="closing">April 19, 2013</td> - <td class="updated">April 23, 2013</td> - </tr> - <td class="institution"><a href="goldcanyon.html">Gold Canyon Bank</a></td> - <td class="city">Gold Canyon</td> - <td class="state">AZ</td> - <td class="cert">58066</td> - <td class="ai">First Scottsdale Bank, National Association</td> - <td class="closing">April 5, 2013</td> - <td class="updated">April 9, 2013</td> - </tr> - <tr> - <td class="institution"><a href="frontier-ga.html">Frontier Bank</a></td> - <td class="city">LaGrange</td> - <td class="state">GA</td> - <td class="cert">16431</td> - <td class="ai">HeritageBank of the South</td> - <td class="closing">March 8, 2013</td> - <td class="updated">March 26, 2013</td> - </tr> - <tr> - <td class="institution"><a href="covenant-il.html">Covenant Bank</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">22476</td> - <td class="ai">Liberty Bank and Trust Company</td> - <td class="closing">February 15, 2013</td> - <td class="updated">March 4, 2013</td> - </tr> - <tr> - <td class="institution"><a href="1stregents.html">1st Regents Bank</a></td> - <td class="city">Andover</td> - <td class="state">MN</td> - <td class="cert">57157</td> - <td class="ai">First Minnesota Bank</td> - <td class="closing">January 18, 2013</td> - <td class="updated">February 28, 2013</td> - </tr> - <tr> - <td class="institution"><a href="westside.html">Westside Community Bank</a></td> - <td class="city">University Place</td> - <td class="state">WA</td> - <td class="cert">33997</td> - <td class="ai">Sunwest Bank</td> - <td class="closing">January 11, 2013</td> - <td class="updated">January 24, 2013</td> - </tr> - <tr> - <td class="institution"><a href="cmbkozarks.html">Community Bank of the Ozarks</a></td> - <td class="city">Sunrise Beach</td> - <td class="state">MO</td> - <td class="cert">27331</td> - <td class="ai">Bank of Sullivan</td> - <td class="closing">December 14, 2012</td> - <td class="updated">January 24, 2013</td> - </tr> - <tr> - <td class="institution"><a href="hometown.html">Hometown Community Bank</a></td> - <td class="city">Braselton</td> - <td class="state">GA</td> - <td class="cert">57928</td> - <td class="ai">CertusBank, National Association</td> - <td class="closing">November 16, 2012</td> - <td class="updated">January 24, 2013</td> - </tr> - <tr> - <td class="institution"><a href="cfnb.html">Citizens First National Bank</a></td> - <td class="city">Princeton</td> - <td class="state">IL</td> - <td class="cert">3731</td> - <td class="ai">Heartland Bank and Trust Company</td> - <td class="closing">November 2, 2012</td> - <td class="updated">January 24, 2013</td> - </tr> - <tr> - <td class="institution"><a href="heritage_fl.html">Heritage Bank of Florida</a></td> - <td class="city">Lutz</td> - <td class="state">FL</td> - <td class="cert">35009</td> - <td class="ai">Centennial Bank</td> - <td class="closing">November 2, 2012</td> - <td class="updated">January 24, 2013</td> - </tr> - <tr> - <td class="institution"><a href="novabank.html">NOVA Bank</a></td> - <td class="city">Berwyn</td> - <td class="state">PA</td> - <td class="cert">27148</td> - <td class="ai">No Acquirer</td> - <td class="closing">October 26, 2012</td> - <td class="updated">January 24, 2013</td> - </tr> - <tr> - <td class="institution"><a href="excelbank.html">Excel Bank</a></td> - <td class="city">Sedalia</td> - <td class="state">MO</td> - <td class="cert">19189</td> - <td class="ai">Simmons First National Bank</td> - <td class="closing">October 19, 2012</td> - <td class="updated">January 24, 2013</td> - </tr> - <tr> - <td class="institution"><a href="firsteastside.html">First East Side Savings Bank</a></td> - <td class="city">Tamarac</td> - <td class="state">FL</td> - <td class="cert">28144</td> - <td class="ai">Stearns Bank N.A.</td> - <td class="closing">October 19, 2012</td> - <td class="updated">January 24, 2013</td> - </tr> - <tr> - <td class="institution"><a href="gulfsouth.html">GulfSouth Private Bank</a></td> - <td class="city">Destin</td> - <td class="state">FL</td> - <td class="cert">58073</td> - <td class="ai">SmartBank</td> - <td class="closing">October 19, 2012</td> - <td class="updated">January 24, 2013</td> - </tr> - <tr> - <td class="institution"><a href="firstunited.html">First United Bank</a></td> - <td class="city">Crete</td> - <td class="state">IL</td> - <td class="cert">20685</td> - <td class="ai">Old Plank Trail Community Bank, National Association</td> - <td class="closing">September 28, 2012</td> - <td class="updated">November 15, 2012</td> - </tr> - <tr> - <td class="institution"><a href="truman.html">Truman Bank</a></td> - <td class="city">St. Louis</td> - <td class="state">MO</td> - <td class="cert">27316</td> - <td class="ai">Simmons First National Bank</td> - <td class="closing">September 14, 2012</td> - <td class="updated">December 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstcommbk_mn.html">First Commercial Bank</a></td> - <td class="city">Bloomington</td> - <td class="state">MN</td> - <td class="cert">35246</td> - <td class="ai">Republic Bank &amp; Trust Company</td> - <td class="closing">September 7, 2012</td> - <td class="updated">December 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="waukegan.html">Waukegan Savings Bank</a></td> - <td class="city">Waukegan</td> - <td class="state">IL</td> - <td class="cert">28243</td> - <td class="ai">First Midwest Bank</td> - <td class="closing">August 3, 2012</td> - <td class="updated">October 11, 2012</td> - </tr> - <tr> - <td class="institution"><a href="jasper.html">Jasper Banking Company</a></td> - <td class="city">Jasper</td> - <td class="state">GA</td> - <td class="cert">16240</td> - <td class="ai">Stearns Bank N.A.</td> - <td class="closing">July 27, 2012</td> - <td class="updated">December 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="secondfederal.html">Second Federal Savings and Loan Association of Chicago</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">27986</td> - <td class="ai">Hinsdale Bank &amp; Trust Company</td> - <td class="closing">July 20, 2012</td> - <td class="updated">January 14, 2013</td> - </tr> - <tr> - <td class="institution"><a href="heartland.html">Heartland Bank</a></td> - <td class="city">Leawood</td> - <td class="state">KS</td> - <td class="cert">1361</td> - <td class="ai">Metcalf Bank</td> - <td class="closing">July 20, 2012</td> - <td class="updated">December 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cherokee.html">First Cherokee State Bank</a></td> - <td class="city">Woodstock</td> - <td class="state">GA</td> - <td class="cert">32711</td> - <td class="ai">Community &amp; Southern Bank</td> - <td class="closing">July 20, 2012</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="georgiatrust.html">Georgia Trust Bank</a></td> - <td class="city">Buford</td> - <td class="state">GA</td> - <td class="cert">57847</td> - <td class="ai">Community &amp; Southern Bank</td> - <td class="closing">July 20, 2012</td> - <td class="updated">December 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="royalpalm.html">The Royal Palm Bank of Florida</a></td> - <td class="city">Naples</td> - <td class="state">FL</td> - <td class="cert">57096</td> - <td class="ai">First National Bank of the Gulf Coast</td> - <td class="closing">July 20, 2012</td> - <td class="updated">January 7, 2013</td> - </tr> - <tr> - <td class="institution"><a href="glasgow.html">Glasgow Savings Bank</a></td> - <td class="city">Glasgow</td> - <td class="state">MO</td> - <td class="cert">1056</td> - <td class="ai">Regional Missouri Bank</td> - <td class="closing">July 13, 2012</td> - <td class="updated">October 11, 2012</td> - </tr> - <tr> - <td class="institution"><a href="montgomery.html">Montgomery Bank &amp; Trust</a></td> - <td class="city">Ailey</td> - <td class="state">GA</td> - <td class="cert">19498</td> - <td class="ai">Ameris Bank</td> - <td class="closing">July 6, 2012</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="farmersbank.html">The Farmers Bank of Lynchburg</a></td> - <td class="city">Lynchburg</td> - <td class="state">TN</td> - <td class="cert">1690</td> - <td class="ai">Clayton Bank and Trust</td> - <td class="closing">June 15, 2012</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="securityexchange.html">Security Exchange Bank</a></td> - <td class="city">Marietta</td> - <td class="state">GA</td> - <td class="cert">35299</td> - <td class="ai">Fidelity Bank</td> - <td class="closing">June 15, 2012</td> - <td class="updated">October 10, 2012</td> - </tr> - <tr> - <td class="institution"><a href="putnam.html">Putnam State Bank</a></td> - <td class="city">Palatka</td> - <td class="state">FL</td> - <td class="cert">27405</td> - <td class="ai">Harbor Community Bank</td> - <td class="closing">June 15, 2012</td> - <td class="updated">October 10, 2012</td> - </tr> - <tr> - <td class="institution"><a href="waccamaw.html">Waccamaw Bank</a></td> - <td class="city">Whiteville</td> - <td class="state">NC</td> - <td class="cert">34515</td> - <td class="ai">First Community Bank</td> - <td class="closing">June 8, 2012</td> - <td class="updated">November 8, 2012</td> - </tr> - <tr> - <td class="institution"><a href="ftsb.html">Farmers' and Traders' State Bank</a></td> - <td class="city">Shabbona</td> - <td class="state">IL</td> - <td class="cert">9257</td> - <td class="ai">First State Bank</td> - <td class="closing">June 8, 2012</td> - <td class="updated">October 10, 2012</td> - </tr> - <tr> - <td class="institution"><a href="carolina.html">Carolina Federal Savings Bank</a></td> - <td class="city">Charleston</td> - <td class="state">SC</td> - <td class="cert">35372</td> - <td class="ai">Bank of North Carolina</td> - <td class="closing">June 8, 2012</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstcapital.html">First Capital Bank</a></td> - <td class="city">Kingfisher</td> - <td class="state">OK</td> - <td class="cert">416</td> - <td class="ai">F &amp; M Bank</td> - <td class="closing">June 8, 2012</td> - <td class="updated">October 10, 2012</td> - </tr> - <tr> - <td class="institution"><a href="alabamatrust.html">Alabama Trust Bank, National Association</a></td> - <td class="city">Sylacauga</td> - <td class="state">AL</td> - <td class="cert">35224</td> - <td class="ai">Southern States Bank</td> - <td class="closing">May 18, 2012</td> - <td class="updated">May 20, 2013</td> - </tr> - <tr> - <td class="institution"><a href="securitybank.html">Security Bank, National Association</a></td> - <td class="city">North Lauderdale</td> - <td class="state">FL</td> - <td class="cert">23156</td> - <td class="ai">Banesco USA</td> - <td class="closing">May 4, 2012</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="palmdesert.html">Palm Desert National Bank</a></td> - <td class="city">Palm Desert</td> - <td class="state">CA</td> - <td class="cert">23632</td> - <td class="ai">Pacific Premier Bank</td> - <td class="closing">April 27, 2012</td> - <td class="updated">May 17, 2013</td> - </tr> - <tr> - <td class="institution"><a href="plantation.html">Plantation Federal Bank</a></td> - <td class="city">Pawleys Island</td> - <td class="state">SC</td> - <td class="cert">32503</td> - <td class="ai">First Federal Bank</td> - <td class="closing">April 27, 2012</td> - <td class="updated">May 17, 2013</td> - </tr> - <tr> - <td class="institution"><a href="interbank.html">Inter Savings Bank, fsb D/B/A InterBank, fsb</a></td> - <td class="city">Maple Grove</td> - <td class="state">MN</td> - <td class="cert">31495</td> - <td class="ai">Great Southern Bank</td> - <td class="closing">April 27, 2012</td> - <td class="updated">May 17, 2013</td> - </tr> - <tr> - <td class="institution"><a href="harvest.html">HarVest Bank of Maryland</a></td> - <td class="city">Gaithersburg</td> - <td class="state">MD</td> - <td class="cert">57766</td> - <td class="ai">Sonabank</td> - <td class="closing">April 27, 2012</td> - <td class="updated">May 17, 2013</td> - </tr> - <tr> - <td class="institution"><a href="easternshore.html">Bank of the Eastern Shore</a></td> - <td class="city">Cambridge</td> - <td class="state">MD</td> - <td class="cert">26759</td> - <td class="ai">No Acquirer</td> - <td class="closing">April 27, 2012</td> - <td class="updated">October 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fortlee.html">Fort Lee Federal Savings Bank, FSB</a></td> - <td class="city">Fort Lee</td> - <td class="state">NJ</td> - <td class="cert">35527</td> - <td class="ai">Alma Bank</td> - <td class="closing">April 20, 2012</td> - <td class="updated">May 17, 2013</td> - </tr> - <tr> - <td class="institution"><a href="fidelity.html">Fidelity Bank</a></td> - <td class="city">Dearborn</td> - <td class="state">MI</td> - <td class="cert">33883</td> - <td class="ai">The Huntington National Bank</td> - <td class="closing">March 30, 2012</td> - <td class="updated">May 16, 2013</td> - </tr> - <tr> - <td class="institution"><a href="premier-il.html">Premier Bank</a></td> - <td class="city">Wilmette</td> - <td class="state">IL</td> - <td class="cert">35419</td> - <td class="ai">International Bank of Chicago</td> - <td class="closing">March 23, 2012</td> - <td class="updated">October 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="covenant.html">Covenant Bank &amp; Trust</a></td> - <td class="city">Rock Spring</td> - <td class="state">GA</td> - <td class="cert">58068</td> - <td class="ai">Stearns Bank, N.A.</td> - <td class="closing">March 23, 2012</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="newcity.html">New City Bank</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">57597</td> - <td class="ai">No Acquirer</td> - <td class="closing">March 9, 2012</td> - <td class="updated">October 29, 2012</td> - </tr> - <tr> - <td class="institution"><a href="global.html">Global Commerce Bank</a></td> - <td class="city">Doraville</td> - <td class="state">GA</td> - <td class="cert">34046</td> - <td class="ai">Metro City Bank</td> - <td class="closing">March 2, 2012</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="homesvgs.html">Home Savings of America</a></td> - <td class="city">Little Falls</td> - <td class="state">MN</td> - <td class="cert">29178</td> - <td class="ai">No Acquirer</td> - <td class="closing">February 24, 2012</td> - <td class="updated">December 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cbg.html">Central Bank of Georgia</a></td> - <td class="city">Ellaville</td> - <td class="state">GA</td> - <td class="cert">5687</td> - <td class="ai">Ameris Bank</td> - <td class="closing">February 24, 2012</td> - <td class="updated">August 9, 2012</td> - </tr> - <tr> - <td class="institution"><a href="scbbank.html">SCB Bank</a></td> - <td class="city">Shelbyville</td> - <td class="state">IN</td> - <td class="cert">29761</td> - <td class="ai">First Merchants Bank, National Association</td> - <td class="closing">February 10, 2012</td> - <td class="updated">March 25, 2013</td> - </tr> - <tr> - <td class="institution"><a href="cnbt.html">Charter National Bank and Trust</a></td> - <td class="city">Hoffman Estates</td> - <td class="state">IL</td> - <td class="cert">23187</td> - <td class="ai">Barrington Bank &amp; Trust Company, National Association</td> - <td class="closing">February 10, 2012</td> - <td class="updated">March 25, 2013</td> - </tr> - <tr> - <td class="institution"><a href="bankeast.html">BankEast</a></td> - <td class="city">Knoxville</td> - <td class="state">TN</td> - <td class="cert">19869</td> - <td class="ai">U.S.Bank National Association</td> - <td class="closing">January 27, 2012</td> - <td class="updated">March 8, 2013</td> - </tr> - <tr> - <td class="institution"><a href="patriot-mn.html">Patriot Bank Minnesota</a></td> - <td class="city">Forest Lake</td> - <td class="state">MN</td> - <td class="cert">34823</td> - <td class="ai">First Resource Bank</td> - <td class="closing">January 27, 2012</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="tcb.html">Tennessee Commerce Bank</a></td> - <td class="city">Franklin</td> - <td class="state">TN</td> - <td class="cert">35296</td> - <td class="ai">Republic Bank &amp; Trust Company</td> - <td class="closing">January 27, 2012</td> - <td class="updated">November 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fgbtcj.html">First Guaranty Bank and Trust Company of Jacksonville</a></td> - <td class="city">Jacksonville</td> - <td class="state">FL</td> - <td class="cert">16579</td> - <td class="ai">CenterState Bank of Florida, N.A.</td> - <td class="closing">January 27, 2012</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="americaneagle.html">American Eagle Savings Bank</a></td> - <td class="city">Boothwyn</td> - <td class="state">PA</td> - <td class="cert">31581</td> - <td class="ai">Capital Bank, N.A.</td> - <td class="closing">January 20, 2012</td> - <td class="updated">January 25, 2013</td> - </tr> - <tr> - <td class="institution"><a href="firststatebank-ga.html">The First State Bank</a></td> - <td class="city">Stockbridge</td> - <td class="state">GA</td> - <td class="cert">19252</td> - <td class="ai">Hamilton State Bank</td> - <td class="closing">January 20, 2012</td> - <td class="updated">January 25, 2013</td> - </tr> - <tr> - <td class="institution"><a href="cfsb.html">Central Florida State Bank</a></td> - <td class="city">Belleview</td> - <td class="state">FL</td> - <td class="cert">57186</td> - <td class="ai">CenterState Bank of Florida, N.A.</td> - <td class="closing">January 20, 2012</td> - <td class="updated">January 25, 2013</td> - </tr> - <tr> - <td class="institution"><a href="westernnatl.html">Western National Bank</a></td> - <td class="city">Phoenix</td> - <td class="state">AZ</td> - <td class="cert">57917</td> - <td class="ai">Washington Federal</td> - <td class="closing">December 16, 2011</td> - <td class="updated">August 13, 2012</td> - </tr> - <tr> - <td class="institution"><a href="premier-fl.html">Premier Community Bank of the Emerald Coast</a></td> - <td class="city">Crestview</td> - <td class="state">FL</td> - <td class="cert">58343</td> - <td class="ai">Summit Bank</td> - <td class="closing">December 16, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="centralprog.html">Central Progressive Bank</a></td> - <td class="city">Lacombe</td> - <td class="state">LA</td> - <td class="cert">19657</td> - <td class="ai">First NBC Bank</td> - <td class="closing">November 18, 2011</td> - <td class="updated">August 13, 2012</td> - </tr> - <tr> - <td class="institution"><a href="polkcounty.html">Polk County Bank</a></td> - <td class="city">Johnston</td> - <td class="state">IA</td> - <td class="cert">14194</td> - <td class="ai">Grinnell State Bank</td> - <td class="closing">November 18, 2011</td> - <td class="updated">August 15, 2012</td> - </tr> - <tr> - <td class="institution"><a href="rockmart.html">Community Bank of Rockmart</a></td> - <td class="city">Rockmart</td> - <td class="state">GA</td> - <td class="cert">57860</td> - <td class="ai">Century Bank of Georgia</td> - <td class="closing">November 10, 2011</td> - <td class="updated">August 13, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sunfirst.html">SunFirst Bank</a></td> - <td class="city">Saint George</td> - <td class="state">UT</td> - <td class="cert">57087</td> - <td class="ai">Cache Valley Bank</td> - <td class="closing">November 4, 2011</td> - <td class="updated">November 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="midcity.html">Mid City Bank, Inc.</a></td> - <td class="city">Omaha</td> - <td class="state">NE</td> - <td class="cert">19397</td> - <td class="ai">Premier Bank</td> - <td class="closing">November 4, 2011</td> - <td class="updated">August 15, 2012</td> - </tr> - <tr> - <td class="institution"><a href="allamerican.html ">All American Bank</a></td> - <td class="city">Des Plaines</td> - <td class="state">IL</td> - <td class="cert">57759</td> - <td class="ai">International Bank of Chicago</td> - <td class="closing">October 28, 2011</td> - <td class="updated">August 15, 2012</td> - </tr> - <tr> - <td class="institution"><a href="commbanksco.html">Community Banks of Colorado</a></td> - <td class="city">Greenwood Village</td> - <td class="state">CO</td> - <td class="cert">21132</td> - <td class="ai">Bank Midwest, N.A.</td> - <td class="closing">October 21, 2011</td> - <td class="updated">January 2, 2013</td> - </tr> - <tr> - <td class="institution"><a href="commcapbk.html">Community Capital Bank</a></td> - <td class="city">Jonesboro</td> - <td class="state">GA</td> - <td class="cert">57036</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">October 21, 2011</td> - <td class="updated">November 8, 2012</td> - </tr> - <tr> - <td class="institution"><a href="decatur.html">Decatur First Bank</a></td> - <td class="city">Decatur</td> - <td class="state">GA</td> - <td class="cert">34392</td> - <td class="ai">Fidelity Bank</td> - <td class="closing">October 21, 2011</td> - <td class="updated">November 8, 2012</td> - </tr> - <tr> - <td class="institution"><a href="oldharbor.html">Old Harbor Bank</a></td> - <td class="city">Clearwater</td> - <td class="state">FL</td> - <td class="cert">57537</td> - <td class="ai">1st United Bank</td> - <td class="closing">October 21, 2011</td> - <td class="updated">November 8, 2012</td> - </tr> - <tr> - <td class="institution"><a href="countrybank.html">Country Bank</a></td> - <td class="city">Aledo</td> - <td class="state">IL</td> - <td class="cert">35395</td> - <td class="ai">Blackhawk Bank &amp; Trust</td> - <td class="closing">October 14, 2011</td> - <td class="updated">August 15, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firststatebank-nj.html">First State Bank</a></td> - <td class="city">Cranford</td> - <td class="state">NJ</td> - <td class="cert">58046</td> - <td class="ai">Northfield Bank</td> - <td class="closing">October 14, 2011</td> - <td class="updated">November 8, 2012</td> - </tr> - <tr> - <td class="institution"><a href="blueridge.html">Blue Ridge Savings Bank, Inc.</a></td> - <td class="city">Asheville</td> - <td class="state">NC</td> - <td class="cert">32347</td> - <td class="ai">Bank of North Carolina</td> - <td class="closing">October 14, 2011</td> - <td class="updated">November 8, 2012</td> - </tr> - <tr> - <td class="institution"><a href="piedmont-ga.html">Piedmont Community Bank</a></td> - <td class="city">Gray</td> - <td class="state">GA</td> - <td class="cert">57256</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">October 14, 2011</td> - <td class="updated">January 22, 2013</td> - </tr> - <tr> - <td class="institution"><a href="sunsecurity.html">Sun Security Bank</a></td> - <td class="city">Ellington</td> - <td class="state">MO</td> - <td class="cert">20115</td> - <td class="ai">Great Southern Bank</td> - <td class="closing">October 7, 2011</td> - <td class="updated">November 7, 2012</td> - </tr> - <tr> - <td class="institution"><a href="riverbank.html">The RiverBank</a></td> - <td class="city">Wyoming</td> - <td class="state">MN</td> - <td class="cert">10216</td> - <td class="ai">Central Bank</td> - <td class="closing">October 7, 2011</td> - <td class="updated">November 7, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstintlbank.html">First International Bank</a></td> - <td class="city">Plano</td> - <td class="state">TX</td> - <td class="cert">33513</td> - <td class="ai">American First National Bank</td> - <td class="closing">September 30, 2011</td> - <td class="updated">October 9, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cbnc.html">Citizens Bank of Northern California</a></td> - <td class="city">Nevada City</td> - <td class="state">CA</td> - <td class="cert">33983</td> - <td class="ai">Tri Counties Bank</td> - <td class="closing">September 23, 2011</td> - <td class="updated">October 9, 2012</td> - </tr> - <tr> - <td class="institution"><a href="boc-va.html">Bank of the Commonwealth</a></td> - <td class="city">Norfolk</td> - <td class="state">VA</td> - <td class="cert">20408</td> - <td class="ai">Southern Bank and Trust Company</td> - <td class="closing">September 23, 2011</td> - <td class="updated">October 9, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fnbf.html">The First National Bank of Florida</a></td> - <td class="city">Milton</td> - <td class="state">FL</td> - <td class="cert">25155</td> - <td class="ai">CharterBank</td> - <td class="closing">September 9, 2011</td> - <td class="updated">September 6, 2012</td> - </tr> - <tr> - <td class="institution"><a href="creekside.html">CreekSide Bank</a></td> - <td class="city">Woodstock</td> - <td class="state">GA</td> - <td class="cert">58226</td> - <td class="ai">Georgia Commerce Bank</td> - <td class="closing">September 2, 2011</td> - <td class="updated">September 6, 2012</td> - </tr> - <tr> - <td class="institution"><a href="patriot.html">Patriot Bank of Georgia</a></td> - <td class="city">Cumming</td> - <td class="state">GA</td> - <td class="cert">58273</td> - <td class="ai">Georgia Commerce Bank</td> - <td class="closing">September 2, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstchoice-il.html">First Choice Bank</a></td> - <td class="city">Geneva</td> - <td class="state">IL</td> - <td class="cert">57212</td> - <td class="ai">Inland Bank &amp; Trust</td> - <td class="closing">August 19, 2011</td> - <td class="updated">August 15, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstsouthern-ga.html">First Southern National Bank</a></td> - <td class="city">Statesboro</td> - <td class="state">GA</td> - <td class="cert">57239</td> - <td class="ai">Heritage Bank of the South</td> - <td class="closing">August 19, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="lydian.html">Lydian Private Bank</a></td> - <td class="city">Palm Beach</td> - <td class="state">FL</td> - <td class="cert">35356</td> - <td class="ai">Sabadell United Bank, N.A.</td> - <td class="closing">August 19, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="publicsvgs.html">Public Savings Bank</a></td> - <td class="city">Huntingdon Valley</td> - <td class="state">PA</td> - <td class="cert">34130</td> - <td class="ai">Capital Bank, N.A.</td> - <td class="closing">August 18, 2011</td> - <td class="updated">August 15, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fnbo.html">The First National Bank of Olathe</a></td> - <td class="city">Olathe</td> - <td class="state">KS</td> - <td class="cert">4744</td> - <td class="ai">Enterprise Bank &amp; Trust</td> - <td class="closing">August 12, 2011</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="whitman.html">Bank of Whitman</a></td> - <td class="city">Colfax</td> - <td class="state">WA</td> - <td class="cert">22528</td> - <td class="ai">Columbia State Bank</td> - <td class="closing">August 5, 2011</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="shorewood.html">Bank of Shorewood</a></td> - <td class="city">Shorewood</td> - <td class="state">IL</td> - <td class="cert">22637</td> - <td class="ai">Heartland Bank and Trust Company</td> - <td class="closing">August 5, 2011</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="integra.html">Integra Bank National Association</a></td> - <td class="city">Evansville</td> - <td class="state">IN</td> - <td class="cert">4392</td> - <td class="ai">Old National Bank</td> - <td class="closing">July 29, 2011</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankmeridian.html">BankMeridian, N.A.</a></td> - <td class="city">Columbia</td> - <td class="state">SC</td> - <td class="cert">58222</td> - <td class="ai">SCBT National Association</td> - <td class="closing">July 29, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="vbb.html">Virginia Business Bank</a></td> - <td class="city">Richmond</td> - <td class="state">VA</td> - <td class="cert">58283</td> - <td class="ai">Xenith Bank</td> - <td class="closing">July 29, 2011</td> - <td class="updated">October 9, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankofchoice.html">Bank of Choice</a></td> - <td class="city">Greeley</td> - <td class="state">CO</td> - <td class="cert">2994</td> - <td class="ai">Bank Midwest, N.A.</td> - <td class="closing">July 22, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="landmark.html">LandMark Bank of Florida</a></td> - <td class="city">Sarasota</td> - <td class="state">FL</td> - <td class="cert">35244</td> - <td class="ai">American Momentum Bank</td> - <td class="closing">July 22, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="southshore.html">Southshore Community Bank</a></td> - <td class="city">Apollo Beach</td> - <td class="state">FL</td> - <td class="cert">58056</td> - <td class="ai">American Momentum Bank</td> - <td class="closing">July 22, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="summitbank.html">Summit Bank</a></td> - <td class="city">Prescott</td> - <td class="state">AZ</td> - <td class="cert">57442</td> - <td class="ai">The Foothills Bank</td> - <td class="closing">July 15, 2011</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstpeoples.html">First Peoples Bank</a></td> - <td class="city">Port St. Lucie</td> - <td class="state">FL</td> - <td class="cert">34870</td> - <td class="ai">Premier American Bank, N.A.</td> - <td class="closing">July 15, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="hightrust.html">High Trust Bank</a></td> - <td class="city">Stockbridge</td> - <td class="state">GA</td> - <td class="cert">19554</td> - <td class="ai">Ameris Bank</td> - <td class="closing">July 15, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="onegeorgia.html">One Georgia Bank</a></td> - <td class="city">Atlanta</td> - <td class="state">GA</td> - <td class="cert">58238</td> - <td class="ai">Ameris Bank</td> - <td class="closing">July 15, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="signaturebank.html">Signature Bank</a></td> - <td class="city">Windsor</td> - <td class="state">CO</td> - <td class="cert">57835</td> - <td class="ai">Points West Community Bank</td> - <td class="closing">July 8, 2011</td> - <td class="updated">October 26, 2012</td> - </tr> - <tr> - <td class="institution"><a href="coloradocapital.html">Colorado Capital Bank</a></td> - <td class="city">Castle Rock</td> - <td class="state">CO</td> - <td class="cert">34522</td> - <td class="ai">First-Citizens Bank &amp; Trust Company</td> - <td class="closing">July 8, 2011</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="firstchicago.html">First Chicago Bank &amp; Trust</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">27935</td> - <td class="ai">Northbrook Bank &amp; Trust Company</td> - <td class="closing">July 8, 2011</td> - <td class="updated">September 9, 2012</td> - </tr> - <tr> - <td class="institution"><a href="mountain.html">Mountain Heritage Bank</a></td> - <td class="city">Clayton</td> - <td class="state">GA</td> - <td class="cert">57593</td> - <td class="ai">First American Bank and Trust Company</td> - <td class="closing">June 24, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fcbtb.html">First Commercial Bank of Tampa Bay</a></td> - <td class="city">Tampa</td> - <td class="state">FL</td> - <td class="cert">27583</td> - <td class="ai">Stonegate Bank</td> - <td class="closing">June 17, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="mcintoshstate.html">McIntosh State Bank</a></td> - <td class="city">Jackson</td> - <td class="state">GA</td> - <td class="cert">19237</td> - <td class="ai">Hamilton State Bank</td> - <td class="closing">June 17, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="atlanticbanktrust.html">Atlantic Bank and Trust</a></td> - <td class="city">Charleston</td> - <td class="state">SC</td> - <td class="cert">58420</td> - <td class="ai">First Citizens Bank and Trust Company, Inc.</td> - <td class="closing">June 3, 2011</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstheritage.html">First Heritage Bank</a></td> - <td class="city">Snohomish</td> - <td class="state">WA</td> - <td class="cert">23626</td> - <td class="ai">Columbia State Bank</td> - <td class="closing">May 27, 2011</td> - <td class="updated">January 28, 2013</td> - </tr> - <tr> - <td class="institution"><a href="summit.html">Summit Bank</a></td> - <td class="city">Burlington</td> - <td class="state">WA</td> - <td class="cert">513</td> - <td class="ai">Columbia State Bank</td> - <td class="closing">May 20, 2011</td> - <td class="updated">January 22, 2013</td> - </tr> - <tr> - <td class="institution"><a href="fgbc.html">First Georgia Banking Company</a></td> - <td class="city">Franklin</td> - <td class="state">GA</td> - <td class="cert">57647</td> - <td class="ai">CertusBank, National Association</td> - <td class="closing">May 20, 2011</td> - <td class="updated">November 13, 2012</td> - </tr> - <tr> - <td class="institution"><a href="atlanticsthrn.html">Atlantic Southern Bank</a></td> - <td class="city">Macon</td> - <td class="state">GA</td> - <td class="cert">57213</td> - <td class="ai">CertusBank, National Association</td> - <td class="closing">May 20, 2011</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="coastal_fl.html">Coastal Bank</a></td> - <td class="city">Cocoa Beach</td> - <td class="state">FL</td> - <td class="cert">34898</td> - <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td> - <td class="closing">May 6, 2011</td> - <td class="updated">November 30, 2012</td> - </tr> - <tr> - <td class="institution"><a href="communitycentral.html">Community Central Bank</a></td> - <td class="city">Mount Clemens</td> - <td class="state">MI</td> - <td class="cert">34234</td> - <td class="ai">Talmer Bank &amp; Trust</td> - <td class="closing">April 29, 2011</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="parkavenue_ga.html">The Park Avenue Bank</a></td> - <td class="city">Valdosta</td> - <td class="state">GA</td> - <td class="cert">19797</td> - <td class="ai">Bank of the Ozarks</td> - <td class="closing">April 29, 2011</td> - <td class="updated">November 30, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstchoice.html">First Choice Community Bank</a></td> - <td class="city">Dallas</td> - <td class="state">GA</td> - <td class="cert">58539</td> - <td class="ai">Bank of the Ozarks</td> - <td class="closing">April 29, 2011</td> - <td class="updated">January 22, 2013</td> - </tr> - <tr> - <td class="institution"><a href="cortez.html">Cortez Community Bank</a></td> - <td class="city">Brooksville</td> - <td class="state">FL</td> - <td class="cert">57625</td> - <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td> - <td class="closing">April 29, 2011</td> - <td class="updated">November 30, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fnbcf.html">First National Bank of Central Florida</a></td> - <td class="city">Winter Park</td> - <td class="state">FL</td> - <td class="cert">26297</td> - <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td> - <td class="closing">April 29, 2011</td> - <td class="updated">November 30, 2012</td> - </tr> - <tr> - <td class="institution"><a href="heritage_ms.html">Heritage Banking Group</a></td> - <td class="city">Carthage</td> - <td class="state">MS</td> - <td class="cert">14273</td> - <td class="ai">Trustmark National Bank</td> - <td class="closing">April 15, 2011</td> - <td class="updated">November 30, 2012</td> - </tr> - <tr> - <td class="institution"><a href="rosemount.html">Rosemount National Bank</a></td> - <td class="city">Rosemount</td> - <td class="state">MN</td> - <td class="cert">24099</td> - <td class="ai">Central Bank</td> - <td class="closing">April 15, 2011</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="superior_al.html">Superior Bank</a></td> - <td class="city">Birmingham</td> - <td class="state">AL</td> - <td class="cert">17750</td> - <td class="ai">Superior Bank, National Association</td> - <td class="closing">April 15, 2011</td> - <td class="updated">November 30, 2012</td> - </tr> - <tr> - <td class="institution"><a href="nexity.html">Nexity Bank</a></td> - <td class="city">Birmingham</td> - <td class="state">AL</td> - <td class="cert">19794</td> - <td class="ai">AloStar Bank of Commerce</td> - <td class="closing">April 15, 2011</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="newhorizons.html">New Horizons Bank</a></td> - <td class="city">East Ellijay</td> - <td class="state">GA</td> - <td class="cert">57705</td> - <td class="ai">Citizens South Bank</td> - <td class="closing">April 15, 2011</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bartow.html">Bartow County Bank</a></td> - <td class="city">Cartersville</td> - <td class="state">GA</td> - <td class="cert">21495</td> - <td class="ai">Hamilton State Bank</td> - <td class="closing">April 15, 2011</td> - <td class="updated">January 22, 2013</td> - </tr> - <tr> - <td class="institution"><a href="nevadacommerce.html">Nevada Commerce Bank</a></td> - <td class="city">Las Vegas</td> - <td class="state">NV</td> - <td class="cert">35418</td> - <td class="ai">City National Bank</td> - <td class="closing">April 8, 2011</td> - <td class="updated">September 9, 2012</td> - </tr> - <tr> - <td class="institution"><a href="westernsprings.html">Western Springs National Bank and Trust</a></td> - <td class="city">Western Springs</td> - <td class="state">IL</td> - <td class="cert">10086</td> - <td class="ai">Heartland Bank and Trust Company</td> - <td class="closing">April 8, 2011</td> - <td class="updated">January 22, 2013</td> - </tr> - <tr> - <td class="institution"><a href="bankofcommerce.html">The Bank of Commerce</a></td> - <td class="city">Wood Dale</td> - <td class="state">IL</td> - <td class="cert">34292</td> - <td class="ai">Advantage National Bank Group</td> - <td class="closing">March 25, 2011</td> - <td class="updated">January 22, 2013</td> - </tr> - <tr> - <td class="institution"><a href="legacy-wi.html">Legacy Bank</a></td> - <td class="city">Milwaukee</td> - <td class="state">WI</td> - <td class="cert">34818</td> - <td class="ai">Seaway Bank and Trust Company</td> - <td class="closing">March 11, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstnatldavis.html">First National Bank of Davis</a></td> - <td class="city">Davis</td> - <td class="state">OK</td> - <td class="cert">4077</td> - <td class="ai">The Pauls Valley National Bank</td> - <td class="closing">March 11, 2011</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="valleycomm.html">Valley Community Bank</a></td> - <td class="city">St. Charles</td> - <td class="state">IL</td> - <td class="cert">34187</td> - <td class="ai">First State Bank</td> - <td class="closing">February 25, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sanluistrust.html">San Luis Trust Bank, FSB</a></td> - <td class="city">San Luis Obispo</td> - <td class="state">CA</td> - <td class="cert">34783</td> - <td class="ai">First California Bank</td> - <td class="closing">February 18, 2011</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="charteroak.html">Charter Oak Bank</a></td> - <td class="city">Napa</td> - <td class="state">CA</td> - <td class="cert">57855</td> - <td class="ai">Bank of Marin</td> - <td class="closing">February 18, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="citizensbk_ga.html">Citizens Bank of Effingham</a></td> - <td class="city">Springfield</td> - <td class="state">GA</td> - <td class="cert">34601</td> - <td class="ai">Heritage Bank of the South</td> - <td class="closing">February 18, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="habersham.html">Habersham Bank</a></td> - <td class="city">Clarkesville</td> - <td class="state">GA</td> - <td class="cert">151</td> - <td class="ai">SCBT National Association</td> - <td class="closing">February 18, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="canyonstate.html">Canyon National Bank</a></td> - <td class="city">Palm Springs</td> - <td class="state">CA</td> - <td class="cert">34692</td> - <td class="ai">Pacific Premier Bank</td> - <td class="closing">February 11, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="badgerstate.html">Badger State Bank</a></td> - <td class="city">Cassville</td> - <td class="state">WI</td> - <td class="cert">13272</td> - <td class="ai">Royal Bank</td> - <td class="closing">February 11, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="peoplesstatebank.html">Peoples State Bank</a></td> - <td class="city">Hamtramck</td> - <td class="state">MI</td> - <td class="cert">14939</td> - <td class="ai">First Michigan Bank</td> - <td class="closing">February 11, 2011</td> - <td class="updated">January 22, 2013</td> - </tr> - <tr> - <td class="institution"><a href="sunshinestate.html">Sunshine State Community Bank</a></td> - <td class="city">Port Orange</td> - <td class="state">FL</td> - <td class="cert">35478</td> - <td class="ai">Premier American Bank, N.A.</td> - <td class="closing">February 11, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="commfirst_il.html">Community First Bank Chicago</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">57948</td> - <td class="ai">Northbrook Bank &amp; Trust Company</td> - <td class="closing">February 4, 2011</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="northgabank.html">North Georgia Bank</a></td> - <td class="city">Watkinsville</td> - <td class="state">GA</td> - <td class="cert">35242</td> - <td class="ai">BankSouth</td> - <td class="closing">February 4, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="americantrust.html">American Trust Bank</a></td> - <td class="city">Roswell</td> - <td class="state">GA</td> - <td class="cert">57432</td> - <td class="ai">Renasant Bank</td> - <td class="closing">February 4, 2011</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstcomm_nm.html">First Community Bank</a></td> - <td class="city">Taos</td> - <td class="state">NM</td> - <td class="cert">12261</td> - <td class="ai">U.S. Bank, N.A.</td> - <td class="closing">January 28, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstier.html">FirsTier Bank</a></td> - <td class="city">Louisville</td> - <td class="state">CO</td> - <td class="cert">57646</td> - <td class="ai">No Acquirer</td> - <td class="closing">January 28, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="evergreenstatewi.html">Evergreen State Bank</a></td> - <td class="city">Stoughton</td> - <td class="state">WI</td> - <td class="cert">5328</td> - <td class="ai">McFarland State Bank</td> - <td class="closing">January 28, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firststatebank_ok.html">The First State Bank</a></td> - <td class="city">Camargo</td> - <td class="state">OK</td> - <td class="cert">2303</td> - <td class="ai">Bank 7</td> - <td class="closing">January 28, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="unitedwestern.html">United Western Bank</a></td> - <td class="city">Denver</td> - <td class="state">CO</td> - <td class="cert">31293</td> - <td class="ai">First-Citizens Bank &amp; Trust Company</td> - <td class="closing">January 21, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankofasheville.html">The Bank of Asheville</a></td> - <td class="city">Asheville</td> - <td class="state">NC</td> - <td class="cert">34516</td> - <td class="ai">First Bank</td> - <td class="closing">January 21, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="commsouth.html">CommunitySouth Bank &amp; Trust</a></td> - <td class="city">Easley</td> - <td class="state">SC</td> - <td class="cert">57868</td> - <td class="ai">CertusBank, National Association</td> - <td class="closing">January 21, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="enterprise.html">Enterprise Banking Company</a></td> - <td class="city">McDonough</td> - <td class="state">GA</td> - <td class="cert">19758</td> - <td class="ai">No Acquirer</td> - <td class="closing">January 21, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="oglethorpe.html">Oglethorpe Bank</a></td> - <td class="city">Brunswick</td> - <td class="state">GA</td> - <td class="cert">57440</td> - <td class="ai">Bank of the Ozarks</td> - <td class="closing">January 14, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="legacybank.html">Legacy Bank</a></td> - <td class="city">Scottsdale</td> - <td class="state">AZ</td> - <td class="cert">57820</td> - <td class="ai">Enterprise Bank &amp; Trust</td> - <td class="closing">January 7, 2011</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstcommercial.html">First Commercial Bank of Florida</a></td> - <td class="city">Orlando</td> - <td class="state">FL</td> - <td class="cert">34965</td> - <td class="ai">First Southern Bank</td> - <td class="closing">January 7, 2011</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="communitynatl.html">Community National Bank</a></td> - <td class="city">Lino Lakes</td> - <td class="state">MN</td> - <td class="cert">23306</td> - <td class="ai">Farmers &amp; Merchants Savings Bank</td> - <td class="closing">December 17, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstsouthern.html">First Southern Bank</a></td> - <td class="city">Batesville</td> - <td class="state">AR</td> - <td class="cert">58052</td> - <td class="ai">Southern Bank</td> - <td class="closing">December 17, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="unitedamericas.html">United Americas Bank, N.A.</a></td> - <td class="city">Atlanta</td> - <td class="state">GA</td> - <td class="cert">35065</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">December 17, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="appalachianga.html">Appalachian Community Bank, FSB</a></td> - <td class="city">McCaysville</td> - <td class="state">GA</td> - <td class="cert">58495</td> - <td class="ai">Peoples Bank of East Tennessee</td> - <td class="closing">December 17, 2010</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="chestatee.html">Chestatee State Bank</a></td> - <td class="city">Dawsonville</td> - <td class="state">GA</td> - <td class="cert">34578</td> - <td class="ai">Bank of the Ozarks</td> - <td class="closing">December 17, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankofmiami.html">The Bank of Miami,N.A.</a></td> - <td class="city">Coral Gables</td> - <td class="state">FL</td> - <td class="cert">19040</td> - <td class="ai">1st United Bank</td> - <td class="closing">December 17, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="earthstar.html">Earthstar Bank</a></td> - <td class="city">Southampton</td> - <td class="state">PA</td> - <td class="cert">35561</td> - <td class="ai">Polonia Bank</td> - <td class="closing">December 10, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="paramount.html">Paramount Bank</a></td> - <td class="city">Farmington Hills</td> - <td class="state">MI</td> - <td class="cert">34673</td> - <td class="ai">Level One Bank</td> - <td class="closing">December 10, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstbanking.html">First Banking Center</a></td> - <td class="city">Burlington</td> - <td class="state">WI</td> - <td class="cert">5287</td> - <td class="ai">First Michigan Bank</td> - <td class="closing">November 19, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="allegbank.html">Allegiance Bank of North America</a></td> - <td class="city">Bala Cynwyd</td> - <td class="state">PA</td> - <td class="cert">35078</td> - <td class="ai">VIST Bank</td> - <td class="closing">November 19, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="gulfstate.html">Gulf State Community Bank</a></td> - <td class="city">Carrabelle</td> - <td class="state">FL</td> - <td class="cert">20340</td> - <td class="ai">Centennial Bank</td> - <td class="closing">November 19, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="copperstar.html">Copper Star Bank</a></td> - <td class="city">Scottsdale</td> - <td class="state">AZ</td> - <td class="cert">35463</td> - <td class="ai">Stearns Bank, N.A.</td> - <td class="closing">November 12, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="darbybank.html">Darby Bank &amp; Trust Co.</a></td> - <td class="city">Vidalia</td> - <td class="state">GA</td> - <td class="cert">14580</td> - <td class="ai">Ameris Bank</td> - <td class="closing">November 12, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="tifton.html">Tifton Banking Company</a></td> - <td class="city">Tifton</td> - <td class="state">GA</td> - <td class="cert">57831</td> - <td class="ai">Ameris Bank</td> - <td class="closing">November 12, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstvietnamese.html">First Vietnamese American Bank</a><br><a href="firstvietnamese_viet.pdf">In Vietnamese</a></td> - <td class="city">Westminster</td> - <td class="state">CA</td> - <td class="cert">57885</td> - <td class="ai">Grandpoint Bank</td> - <td class="closing">November 5, 2010</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="piercecommercial.html">Pierce Commercial Bank</a></td> - <td class="city">Tacoma</td> - <td class="state">WA</td> - <td class="cert">34411</td> - <td class="ai">Heritage Bank</td> - <td class="closing">November 5, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="westerncommercial_ca.html">Western Commercial Bank</a></td> - <td class="city">Woodland Hills</td> - <td class="state">CA</td> - <td class="cert">58087</td> - <td class="ai">First California Bank</td> - <td class="closing">November 5, 2010</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="kbank.html">K Bank</a></td> - <td class="city">Randallstown</td> - <td class="state">MD</td> - <td class="cert">31263</td> - <td class="ai">Manufacturers and Traders Trust Company (M&amp;T Bank)</td> - <td class="closing">November 5, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstazfsb.html">First Arizona Savings, A FSB</a></td> - <td class="city">Scottsdale</td> - <td class="state">AZ</td> - <td class="cert">32582</td> - <td class="ai">No Acquirer</td> - <td class="closing">October 22, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="hillcrest_ks.html">Hillcrest Bank</a></td> - <td class="city">Overland Park</td> - <td class="state">KS</td> - <td class="cert">22173</td> - <td class="ai">Hillcrest Bank, N.A.</td> - <td class="closing">October 22, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstsuburban.html">First Suburban National Bank</a></td> - <td class="city">Maywood</td> - <td class="state">IL</td> - <td class="cert">16089</td> - <td class="ai">Seaway Bank and Trust Company</td> - <td class="closing">October 22, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fnbbarnesville.html">The First National Bank of Barnesville</a></td> - <td class="city">Barnesville</td> - <td class="state">GA</td> - <td class="cert">2119</td> - <td class="ai">United Bank</td> - <td class="closing">October 22, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="gordon.html">The Gordon Bank</a></td> - <td class="city">Gordon</td> - <td class="state">GA</td> - <td class="cert">33904</td> - <td class="ai">Morris Bank</td> - <td class="closing">October 22, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="progress_fl.html">Progress Bank of Florida</a></td> - <td class="city">Tampa</td> - <td class="state">FL</td> - <td class="cert">32251</td> - <td class="ai">Bay Cities Bank</td> - <td class="closing">October 22, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstbankjacksonville.html">First Bank of Jacksonville</a></td> - <td class="city">Jacksonville</td> - <td class="state">FL</td> - <td class="cert">27573</td> - <td class="ai">Ameris Bank</td> - <td class="closing">October 22, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="premier_mo.html">Premier Bank</a></td> - <td class="city">Jefferson City</td> - <td class="state">MO</td> - <td class="cert">34016</td> - <td class="ai">Providence Bank</td> - <td class="closing">October 15, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="westbridge.html">WestBridge Bank and Trust Company</a></td> - <td class="city">Chesterfield</td> - <td class="state">MO</td> - <td class="cert">58205</td> - <td class="ai">Midland States Bank</td> - <td class="closing">October 15, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="securitysavingsfsb.html">Security Savings Bank, F.S.B.</a></td> - <td class="city">Olathe</td> - <td class="state">KS</td> - <td class="cert">30898</td> - <td class="ai">Simmons First National Bank</td> - <td class="closing">October 15, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="shoreline.html">Shoreline Bank</a></td> - <td class="city">Shoreline</td> - <td class="state">WA</td> - <td class="cert">35250</td> - <td class="ai">GBC International Bank</td> - <td class="closing">October 1, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="wakulla.html">Wakulla Bank</a></td> - <td class="city">Crawfordville</td> - <td class="state">FL</td> - <td class="cert">21777</td> - <td class="ai">Centennial Bank</td> - <td class="closing">October 1, 2010</td> - <td class="updated">November 2, 2012</td> - </tr> - <tr> - <td class="institution"><a href="northcounty.html">North County Bank</a></td> - <td class="city">Arlington</td> - <td class="state">WA</td> - <td class="cert">35053</td> - <td class="ai">Whidbey Island Bank</td> - <td class="closing">September 24, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="haventrust_fl.html">Haven Trust Bank Florida</a></td> - <td class="city">Ponte Vedra Beach</td> - <td class="state">FL</td> - <td class="cert">58308</td> - <td class="ai">First Southern Bank</td> - <td class="closing">September 24, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="maritimesavings.html">Maritime Savings Bank</a></td> - <td class="city">West Allis</td> - <td class="state">WI</td> - <td class="cert">28612</td> - <td class="ai">North Shore Bank, FSB</td> - <td class="closing">September 17, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bramblesavings.html">Bramble Savings Bank</a></td> - <td class="city">Milford</td> - <td class="state">OH</td> - <td class="cert">27808</td> - <td class="ai">Foundation Bank</td> - <td class="closing">September 17, 2010</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="peoplesbank_ga.html">The Peoples Bank</a></td> - <td class="city">Winder</td> - <td class="state">GA</td> - <td class="cert">182</td> - <td class="ai">Community &amp; Southern Bank</td> - <td class="closing">September 17, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstcommerce_ga.html">First Commerce Community Bank</a></td> - <td class="city">Douglasville</td> - <td class="state">GA</td> - <td class="cert">57448</td> - <td class="ai">Community &amp; Southern Bank</td> - <td class="closing">September 17, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="ellijay.html">Bank of Ellijay</a></td> - <td class="city">Ellijay</td> - <td class="state">GA</td> - <td class="cert">58197</td> - <td class="ai">Community &amp; Southern Bank</td> - <td class="closing">September 17, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="isnbank.html">ISN Bank</a></td> - <td class="city">Cherry Hill</td> - <td class="state">NJ</td> - <td class="cert">57107</td> - <td class="ai">Customers Bank</td> - <td class="closing">September 17, 2010</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="horizonfl.html">Horizon Bank</a></td> - <td class="city">Bradenton</td> - <td class="state">FL</td> - <td class="cert">35061</td> - <td class="ai">Bank of the Ozarks</td> - <td class="closing">September 10, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sonoma.html">Sonoma Valley Bank</a></td> - <td class="city">Sonoma</td> - <td class="state">CA</td> - <td class="cert">27259</td> - <td class="ai">Westamerica Bank</td> - <td class="closing">August 20, 2010</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="lospadres.html">Los Padres Bank</a></td> - <td class="city">Solvang</td> - <td class="state">CA</td> - <td class="cert">32165</td> - <td class="ai">Pacific Western Bank</td> - <td class="closing">August 20, 2010</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="butte.html">Butte Community Bank</a></td> - <td class="city">Chico</td> - <td class="state">CA</td> - <td class="cert">33219</td> - <td class="ai">Rabobank, N.A.</td> - <td class="closing">August 20, 2010</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="pacificbk.html">Pacific State Bank</a></td> - <td class="city">Stockton</td> - <td class="state">CA</td> - <td class="cert">27090</td> - <td class="ai">Rabobank, N.A.</td> - <td class="closing">August 20, 2010</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="shorebank.html">ShoreBank</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">15640</td> - <td class="ai">Urban Partnership Bank</td> - <td class="closing">August 20, 2010</td> - <td class="updated">May 16, 2013</td> - </tr> - <tr> - <td class="institution"><a href="imperialsvgs.html">Imperial Savings and Loan Association</a></td> - <td class="city">Martinsville</td> - <td class="state">VA</td> - <td class="cert">31623</td> - <td class="ai">River Community Bank, N.A.</td> - <td class="closing">August 20, 2010</td> - <td class="updated">August 24, 2012</td> - </tr> - <tr> - <td class="institution"><a href="inatbank.html">Independent National Bank</a></td> - <td class="city">Ocala</td> - <td class="state">FL</td> - <td class="cert">27344</td> - <td class="ai">CenterState Bank of Florida, N.A.</td> - <td class="closing">August 20, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cnbbartow.html">Community National Bank at Bartow</a></td> - <td class="city">Bartow</td> - <td class="state">FL</td> - <td class="cert">25266</td> - <td class="ai">CenterState Bank of Florida, N.A.</td> - <td class="closing">August 20, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="palosbank.html">Palos Bank and Trust Company</a></td> - <td class="city">Palos Heights</td> - <td class="state">IL</td> - <td class="cert">17599</td> - <td class="ai">First Midwest Bank</td> - <td class="closing">August 13, 2010</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="ravenswood.html">Ravenswood Bank</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">34231</td> - <td class="ai">Northbrook Bank &amp; Trust Company</td> - <td class="closing">August 6, 2010</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="libertyor.html">LibertyBank</a></td> - <td class="city">Eugene</td> - <td class="state">OR</td> - <td class="cert">31964</td> - <td class="ai">Home Federal Bank</td> - <td class="closing">July 30, 2010</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cowlitz.html">The Cowlitz Bank</a></td> - <td class="city">Longview</td> - <td class="state">WA</td> - <td class="cert">22643</td> - <td class="ai">Heritage Bank</td> - <td class="closing">July 30, 2010</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="coastal.html">Coastal Community Bank</a></td> - <td class="city">Panama City Beach</td> - <td class="state">FL</td> - <td class="cert">9619</td> - <td class="ai">Centennial Bank</td> - <td class="closing">July 30, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bayside.html">Bayside Savings Bank</a></td> - <td class="city">Port Saint Joe</td> - <td class="state">FL</td> - <td class="cert">57669</td> - <td class="ai">Centennial Bank</td> - <td class="closing">July 30, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="northwestga.html">Northwest Bank &amp; Trust</a></td> - <td class="city">Acworth</td> - <td class="state">GA</td> - <td class="cert">57658</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">July 30, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="homevalleyor.html">Home Valley Bank</a></td> - <td class="city">Cave Junction</td> - <td class="state">OR</td> - <td class="cert">23181</td> - <td class="ai">South Valley Bank &amp; Trust</td> - <td class="closing">July 23, 2010</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="southwestusanv.html">SouthwestUSA Bank</a></td> - <td class="city">Las Vegas</td> - <td class="state">NV</td> - <td class="cert">35434</td> - <td class="ai">Plaza Bank</td> - <td class="closing">July 23, 2010</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="communitysecmn.html">Community Security Bank</a></td> - <td class="city">New Prague</td> - <td class="state">MN</td> - <td class="cert">34486</td> - <td class="ai">Roundbank</td> - <td class="closing">July 23, 2010</td> - <td class="updated">September 12, 2012</td> - </tr> - <tr> - <td class="institution"><a href="thunderbankks.html">Thunder Bank</a></td> - <td class="city">Sylvan Grove</td> - <td class="state">KS</td> - <td class="cert">10506</td> - <td class="ai">The Bennington State Bank</td> - <td class="closing">July 23, 2010</td> - <td class="updated">September 13, 2012</td> - </tr> - <tr> - <td class="institution"><a href="williamsburgsc.html">Williamsburg First National Bank</a></td> - <td class="city">Kingstree</td> - <td class="state">SC</td> - <td class="cert">17837</td> - <td class="ai">First Citizens Bank and Trust Company, Inc.</td> - <td class="closing">July 23, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="crescentga.html">Crescent Bank and Trust Company</a></td> - <td class="city">Jasper</td> - <td class="state">GA</td> - <td class="cert">27559</td> - <td class="ai">Renasant Bank</td> - <td class="closing">July 23, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sterlingfl.html">Sterling Bank</a></td> - <td class="city">Lantana</td> - <td class="state">FL</td> - <td class="cert">32536</td> - <td class="ai">IBERIABANK</td> - <td class="closing">July 23, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="mainstsvgs.html">Mainstreet Savings Bank, FSB</a></td> - <td class="city">Hastings</td> - <td class="state">MI</td> - <td class="cert">28136</td> - <td class="ai">Commercial Bank</td> - <td class="closing">July 16, 2010</td> - <td class="updated">September 13, 2012</td> - </tr> - <tr> - <td class="institution"><a href="oldecypress.html">Olde Cypress Community Bank</a></td> - <td class="city">Clewiston</td> - <td class="state">FL</td> - <td class="cert">28864</td> - <td class="ai">CenterState Bank of Florida, N.A.</td> - <td class="closing">July 16, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="turnberry.html">Turnberry Bank</a></td> - <td class="city">Aventura</td> - <td class="state">FL</td> - <td class="cert">32280</td> - <td class="ai">NAFH National Bank</td> - <td class="closing">July 16, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="metrobankfl.html">Metro Bank of Dade County</a></td> - <td class="city">Miami</td> - <td class="state">FL</td> - <td class="cert">25172</td> - <td class="ai">NAFH National Bank</td> - <td class="closing">July 16, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstnatlsc.html">First National Bank of the South</a></td> - <td class="city">Spartanburg</td> - <td class="state">SC</td> - <td class="cert">35383</td> - <td class="ai">NAFH National Bank</td> - <td class="closing">July 16, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="woodlands.html">Woodlands Bank</a></td> - <td class="city">Bluffton</td> - <td class="state">SC</td> - <td class="cert">32571</td> - <td class="ai">Bank of the Ozarks</td> - <td class="closing">July 16, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="homenatlok.html">Home National Bank</a></td> - <td class="city">Blackwell</td> - <td class="state">OK</td> - <td class="cert">11636</td> - <td class="ai">RCB Bank</td> - <td class="closing">July 9, 2010</td> - <td class="updated">December 10, 2012</td> - </tr> - <tr> - <td class="institution"><a href="usabankny.html">USA Bank</a></td> - <td class="city">Port Chester</td> - <td class="state">NY</td> - <td class="cert">58072</td> - <td class="ai">New Century Bank</td> - <td class="closing">July 9, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="idealfedsvngsmd.html">Ideal Federal Savings Bank</a></td> - <td class="city">Baltimore</td> - <td class="state">MD</td> - <td class="cert">32456</td> - <td class="ai">No Acquirer</td> - <td class="closing">July 9, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="baynatlmd.html">Bay National Bank</a></td> - <td class="city">Baltimore</td> - <td class="state">MD</td> - <td class="cert">35462</td> - <td class="ai">Bay Bank, FSB</td> - <td class="closing">July 9, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="highdesertnm.html">High Desert State Bank</a></td> - <td class="city">Albuquerque</td> - <td class="state">NM</td> - <td class="cert">35279</td> - <td class="ai">First American Bank</td> - <td class="closing">June 25, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstnatga.html">First National Bank</a></td> - <td class="city">Savannah</td> - <td class="state">GA</td> - <td class="cert">34152</td> - <td class="ai">The Savannah Bank, N.A.</td> - <td class="closing">June 25, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="peninsulafl.html">Peninsula Bank</a></td> - <td class="city">Englewood</td> - <td class="state">FL</td> - <td class="cert">26563</td> - <td class="ai">Premier American Bank, N.A.</td> - <td class="closing">June 25, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="nevsecbank.html">Nevada Security Bank</a></td> - <td class="city">Reno</td> - <td class="state">NV</td> - <td class="cert">57110</td> - <td class="ai">Umpqua Bank</td> - <td class="closing">June 18, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="washfirstintl.html">Washington First International Bank</a></td> - <td class="city">Seattle</td> - <td class="state">WA</td> - <td class="cert">32955</td> - <td class="ai">East West Bank</td> - <td class="closing">June 11, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="tieronebankne.html">TierOne Bank</a></td> - <td class="city">Lincoln</td> - <td class="state">NE</td> - <td class="cert">29341</td> - <td class="ai">Great Western Bank</td> - <td class="closing">June 4, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="arcolail.html">Arcola Homestead Savings Bank</a></td> - <td class="city">Arcola</td> - <td class="state">IL</td> - <td class="cert">31813</td> - <td class="ai">No Acquirer</td> - <td class="closing">June 4, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstnatms.html">First National Bank</a></td> - <td class="city">Rosedale</td> - <td class="state">MS</td> - <td class="cert">15814</td> - <td class="ai">The Jefferson Bank</td> - <td class="closing">June 4, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="swbnevada.html">Sun West Bank</a></td> - <td class="city">Las Vegas</td> - <td class="state">NV</td> - <td class="cert">34785</td> - <td class="ai">City National Bank</td> - <td class="closing">May 28, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="graniteca.html">Granite Community Bank, NA</a></td> - <td class="city">Granite Bay</td> - <td class="state">CA</td> - <td class="cert">57315</td> - <td class="ai">Tri Counties Bank</td> - <td class="closing">May 28, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankoffloridatb.html">Bank of Florida - Tampa</a></td> - <td class="city">Tampa</td> - <td class="state">FL</td> - <td class="cert">57814</td> - <td class="ai">EverBank</td> - <td class="closing">May 28, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankoffloridasw.html">Bank of Florida - Southwest</a></td> - <td class="city">Naples</td> - <td class="state">FL</td> - <td class="cert">35106</td> - <td class="ai">EverBank</td> - <td class="closing">May 28, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankoffloridase.html">Bank of Florida - Southeast</a></td> - <td class="city">Fort Lauderdale</td> - <td class="state">FL</td> - <td class="cert">57360</td> - <td class="ai">EverBank</td> - <td class="closing">May 28, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="pinehurstmn.html">Pinehurst Bank</a></td> - <td class="city">Saint Paul</td> - <td class="state">MN</td> - <td class="cert">57735</td> - <td class="ai">Coulee Bank</td> - <td class="closing">May 21, 2010</td> - <td class="updated">October 26, 2012</td> - </tr> - <tr> - <td class="institution"><a href="midwestil.html">Midwest Bank and Trust Company</a></td> - <td class="city">Elmwood Park</td> - <td class="state">IL</td> - <td class="cert">18117</td> - <td class="ai">FirstMerit Bank, N.A.</td> - <td class="closing">May 14, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="swcmntymo.html">Southwest Community Bank</a></td> - <td class="city">Springfield</td> - <td class="state">MO</td> - <td class="cert">34255</td> - <td class="ai">Simmons First National Bank</td> - <td class="closing">May 14, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="newlibertymi.html">New Liberty Bank</a></td> - <td class="city">Plymouth</td> - <td class="state">MI</td> - <td class="cert">35586</td> - <td class="ai">Bank of Ann Arbor</td> - <td class="closing">May 14, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="satillacmntyga.html">Satilla Community Bank</a></td> - <td class="city">Saint Marys</td> - <td class="state">GA</td> - <td class="cert">35114</td> - <td class="ai">Ameris Bank</td> - <td class="closing">May 14, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="1stpacific.html">1st Pacific Bank of California</a></td> - <td class="city">San Diego</td> - <td class="state">CA</td> - <td class="cert">35517</td> - <td class="ai">City National Bank</td> - <td class="closing">May 7, 2010</td> - <td class="updated">December 13, 2012</td> - </tr> - <tr> - <td class="institution"><a href="townebank.html">Towne Bank of Arizona</a></td> - <td class="city">Mesa</td> - <td class="state">AZ</td> - <td class="cert">57697</td> - <td class="ai">Commerce Bank of Arizona</td> - <td class="closing">May 7, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="accessbank.html">Access Bank</a></td> - <td class="city">Champlin</td> - <td class="state">MN</td> - <td class="cert">16476</td> - <td class="ai">PrinsBank</td> - <td class="closing">May 7, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bonifay.html">The Bank of Bonifay</a></td> - <td class="city">Bonifay</td> - <td class="state">FL</td> - <td class="cert">14246</td> - <td class="ai">First Federal Bank of Florida</td> - <td class="closing">May 7, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="frontier.html">Frontier Bank</a></td> - <td class="city">Everett</td> - <td class="state">WA</td> - <td class="cert">22710</td> - <td class="ai">Union Bank, N.A.</td> - <td class="closing">April 30, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="bc-natl.html">BC National Banks</a></td> - <td class="city">Butler</td> - <td class="state">MO</td> - <td class="cert">17792</td> - <td class="ai">Community First Bank</td> - <td class="closing">April 30, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="champion.html">Champion Bank</a></td> - <td class="city">Creve Coeur</td> - <td class="state">MO</td> - <td class="cert">58362</td> - <td class="ai">BankLiberty</td> - <td class="closing">April 30, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cfbancorp.html">CF Bancorp</a></td> - <td class="city">Port Huron</td> - <td class="state">MI</td> - <td class="cert">30005</td> - <td class="ai">First Michigan Bank</td> - <td class="closing">April 30, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="westernbank-puertorico.html">Westernbank Puerto Rico</a><br><a href="westernbank-puertorico_spanish.html">En Espanol</a></td> - <td class="city">Mayaguez</td> - <td class="state">PR</td> - <td class="cert">31027</td> - <td class="ai">Banco Popular de Puerto Rico</td> - <td class="closing">April 30, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="r-gpremier-puertorico.html">R-G Premier Bank of Puerto Rico</a><br><a href="r-gpremier-puertorico_spanish.html">En Espanol</a></td> - <td class="city">Hato Rey</td> - <td class="state">PR</td> - <td class="cert">32185</td> - <td class="ai">Scotiabank de Puerto Rico</td> - <td class="closing">April 30, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="eurobank-puertorico.html">Eurobank</a><br><a href="eurobank-puertorico_spanish.html">En Espanol</a></td> - <td class="city">San Juan</td> - <td class="state">PR</td> - <td class="cert">27150</td> - <td class="ai">Oriental Bank and Trust</td> - <td class="closing">April 30, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="wheatland.html">Wheatland Bank</a></td> - <td class="city">Naperville</td> - <td class="state">IL</td> - <td class="cert">58429</td> - <td class="ai">Wheaton Bank &amp; Trust</td> - <td class="closing">April 23, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="peotone.html">Peotone Bank and Trust Company</a></td> - <td class="city">Peotone</td> - <td class="state">IL</td> - <td class="cert">10888</td> - <td class="ai">First Midwest Bank</td> - <td class="closing">April 23, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="lincoln-park.html">Lincoln Park Savings Bank</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">30600</td> - <td class="ai">Northbrook Bank &amp; Trust Company</td> - <td class="closing">April 23, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="new-century-il.html">New Century Bank</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">34821</td> - <td class="ai">MB Financial Bank, N.A.</td> - <td class="closing">April 23, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="citizens-bank.html">Citizens Bank and Trust Company of Chicago</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">34658</td> - <td class="ai">Republic Bank of Chicago</td> - <td class="closing">April 23, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="broadway.html">Broadway Bank</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">22853</td> - <td class="ai">MB Financial Bank, N.A.</td> - <td class="closing">April 23, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="amcore.html">Amcore Bank, National Association</a></td> - <td class="city">Rockford</td> - <td class="state">IL</td> - <td class="cert">3735</td> - <td class="ai">Harris N.A.</td> - <td class="closing">April 23, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="citybank.html">City Bank</a></td> - <td class="city">Lynnwood</td> - <td class="state">WA</td> - <td class="cert">21521</td> - <td class="ai">Whidbey Island Bank</td> - <td class="closing">April 16, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="tamalpais.html">Tamalpais Bank</a></td> - <td class="city">San Rafael</td> - <td class="state">CA</td> - <td class="cert">33493</td> - <td class="ai">Union Bank, N.A.</td> - <td class="closing">April 16, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="innovative.html">Innovative Bank</a></td> - <td class="city">Oakland</td> - <td class="state">CA</td> - <td class="cert">23876</td> - <td class="ai">Center Bank</td> - <td class="closing">April 16, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="butlerbank.html">Butler Bank</a></td> - <td class="city">Lowell</td> - <td class="state">MA</td> - <td class="cert">26619</td> - <td class="ai">People's United Bank</td> - <td class="closing">April 16, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="riverside-natl.html">Riverside National Bank of Florida</a></td> - <td class="city">Fort Pierce</td> - <td class="state">FL</td> - <td class="cert">24067</td> - <td class="ai">TD Bank, N.A.</td> - <td class="closing">April 16, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="americanfirst.html">AmericanFirst Bank</a></td> - <td class="city">Clermont</td> - <td class="state">FL</td> - <td class="cert">57724</td> - <td class="ai">TD Bank, N.A.</td> - <td class="closing">April 16, 2010</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="ffbnf.html">First Federal Bank of North Florida</a></td> - <td class="city">Palatka</td> - <td class="state">FL</td> - <td class="cert">28886</td> - <td class="ai">TD Bank, N.A.</td> - <td class="closing">April 16, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="lakeside-comm.html">Lakeside Community Bank</a></td> - <td class="city">Sterling Heights</td> - <td class="state">MI</td> - <td class="cert">34878</td> - <td class="ai">No Acquirer</td> - <td class="closing">April 16, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="beachfirst.html">Beach First National Bank</a></td> - <td class="city">Myrtle Beach</td> - <td class="state">SC</td> - <td class="cert">34242</td> - <td class="ai">Bank of North Carolina</td> - <td class="closing">April 9, 2010</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="deserthills.html">Desert Hills Bank</a></td> - <td class="city">Phoenix</td> - <td class="state">AZ</td> - <td class="cert">57060</td> - <td class="ai">New York Community Bank</td> - <td class="closing">March 26, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="unity-natl.html">Unity National Bank</a></td> - <td class="city">Cartersville</td> - <td class="state">GA</td> - <td class="cert">34678</td> - <td class="ai">Bank of the Ozarks</td> - <td class="closing">March 26, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="key-west.html">Key West Bank</a></td> - <td class="city">Key West</td> - <td class="state">FL</td> - <td class="cert">34684</td> - <td class="ai">Centennial Bank</td> - <td class="closing">March 26, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="mcintosh.html">McIntosh Commercial Bank</a></td> - <td class="city">Carrollton</td> - <td class="state">GA</td> - <td class="cert">57399</td> - <td class="ai">CharterBank</td> - <td class="closing">March 26, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="state-aurora.html">State Bank of Aurora</a></td> - <td class="city">Aurora</td> - <td class="state">MN</td> - <td class="cert">8221</td> - <td class="ai">Northern State Bank</td> - <td class="closing">March 19, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstlowndes.html">First Lowndes Bank</a></td> - <td class="city">Fort Deposit</td> - <td class="state">AL</td> - <td class="cert">24957</td> - <td class="ai">First Citizens Bank</td> - <td class="closing">March 19, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankofhiawassee.html">Bank of Hiawassee</a></td> - <td class="city">Hiawassee</td> - <td class="state">GA</td> - <td class="cert">10054</td> - <td class="ai">Citizens South Bank</td> - <td class="closing">March 19, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="appalachian.html">Appalachian Community Bank</a></td> - <td class="city">Ellijay</td> - <td class="state">GA</td> - <td class="cert">33989</td> - <td class="ai">Community &amp; Southern Bank</td> - <td class="closing">March 19, 2010</td> - <td class="updated">October 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="advanta-ut.html">Advanta Bank Corp.</a></td> - <td class="city">Draper</td> - <td class="state">UT</td> - <td class="cert">33535</td> - <td class="ai">No Acquirer</td> - <td class="closing">March 19, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cent-security.html">Century Security Bank</a></td> - <td class="city">Duluth</td> - <td class="state">GA</td> - <td class="cert">58104</td> - <td class="ai">Bank of Upson</td> - <td class="closing">March 19, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="amer-natl-oh.html">American National Bank</a></td> - <td class="city">Parma</td> - <td class="state">OH</td> - <td class="cert">18806</td> - <td class="ai">The National Bank and Trust Company</td> - <td class="closing">March 19, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="statewide.html">Statewide Bank</a></td> - <td class="city">Covington</td> - <td class="state">LA</td> - <td class="cert">29561</td> - <td class="ai">Home Bank</td> - <td class="closing">March 12, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="oldsouthern.html">Old Southern Bank</a></td> - <td class="city">Orlando</td> - <td class="state">FL</td> - <td class="cert">58182</td> - <td class="ai">Centennial Bank</td> - <td class="closing">March 12, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="parkavenue-ny.html">The Park Avenue Bank</a></td> - <td class="city">New York</td> - <td class="state">NY</td> - <td class="cert">27096</td> - <td class="ai">Valley National Bank</td> - <td class="closing">March 12, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="libertypointe.html">LibertyPointe Bank</a></td> - <td class="city">New York</td> - <td class="state">NY</td> - <td class="cert">58071</td> - <td class="ai">Valley National Bank</td> - <td class="closing">March 11, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="centennial-ut.html">Centennial Bank</a></td> - <td class="city">Ogden</td> - <td class="state">UT</td> - <td class="cert">34430</td> - <td class="ai">No Acquirer</td> - <td class="closing">March 5, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="waterfield.html">Waterfield Bank</a></td> - <td class="city">Germantown</td> - <td class="state">MD</td> - <td class="cert">34976</td> - <td class="ai">No Acquirer</td> - <td class="closing">March 5, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankofillinois.html">Bank of Illinois</a></td> - <td class="city">Normal</td> - <td class="state">IL</td> - <td class="cert">9268</td> - <td class="ai">Heartland Bank and Trust Company</td> - <td class="closing">March 5, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sunamerican.html">Sun American Bank</a></td> - <td class="city">Boca Raton</td> - <td class="state">FL</td> - <td class="cert">27126</td> - <td class="ai">First-Citizens Bank &amp; Trust Company</td> - <td class="closing">March 5, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="rainier.html">Rainier Pacific Bank</a></td> - <td class="city">Tacoma</td> - <td class="state">WA</td> - <td class="cert">38129</td> - <td class="ai">Umpqua Bank</td> - <td class="closing">February 26, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="carsonriver.html">Carson River Community Bank</a></td> - <td class="city">Carson City</td> - <td class="state">NV</td> - <td class="cert">58352</td> - <td class="ai">Heritage Bank of Nevada</td> - <td class="closing">February 26, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="lajolla.html">La Jolla Bank, FSB</a></td> - <td class="city">La Jolla</td> - <td class="state">CA</td> - <td class="cert">32423</td> - <td class="ai">OneWest Bank, FSB</td> - <td class="closing">February 19, 2010</td> - <td class="updated">August 24, 2012</td> - </tr> - <tr> - <td class="institution"><a href="georgewashington.html">George Washington Savings Bank</a></td> - <td class="city">Orland Park</td> - <td class="state">IL</td> - <td class="cert">29952</td> - <td class="ai">FirstMerit Bank, N.A.</td> - <td class="closing">February 19, 2010</td> - <td class="updated">August 24, 2012</td> - </tr> - <tr> - <td class="institution"><a href="lacoste.html">The La Coste National Bank</a></td> - <td class="city">La Coste</td> - <td class="state">TX</td> - <td class="cert">3287</td> - <td class="ai">Community National Bank</td> - <td class="closing">February 19, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="marco.html">Marco Community Bank</a></td> - <td class="city">Marco Island</td> - <td class="state">FL</td> - <td class="cert">57586</td> - <td class="ai">Mutual of Omaha Bank</td> - <td class="closing">February 19, 2010</td> - <td class="updated">August 24, 2012</td> - </tr> - <tr> - <td class="institution"><a href="1stamerican.html">1st American State Bank of Minnesota</a></td> - <td class="city">Hancock</td> - <td class="state">MN</td> - <td class="cert">15448</td> - <td class="ai">Community Development Bank, FSB</td> - <td class="closing">February 5, 2010</td> - <td class="updated">August 24, 2012</td> - </tr> - <tr> - <td class="institution"><a href="americanmarine.html">American Marine Bank</a></td> - <td class="city">Bainbridge Island</td> - <td class="state">WA</td> - <td class="cert">16730</td> - <td class="ai">Columbia State Bank</td> - <td class="closing">January 29, 2010</td> - <td class="updated">August 24, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstregional.html">First Regional Bank</a></td> - <td class="city">Los Angeles</td> - <td class="state">CA</td> - <td class="cert">23011</td> - <td class="ai">First-Citizens Bank &amp; Trust Company</td> - <td class="closing">January 29, 2010</td> - <td class="updated">August 24, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cbt-cornelia.html">Community Bank and Trust</a></td> - <td class="city">Cornelia</td> - <td class="state">GA</td> - <td class="cert">5702</td> - <td class="ai">SCBT National Association</td> - <td class="closing">January 29, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="marshall-mn.html">Marshall Bank, N.A.</a></td> - <td class="city">Hallock</td> - <td class="state">MN</td> - <td class="cert">16133</td> - <td class="ai">United Valley Bank</td> - <td class="closing">January 29, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="floridacommunity.html">Florida Community Bank</a></td> - <td class="city">Immokalee</td> - <td class="state">FL</td> - <td class="cert">5672</td> - <td class="ai">Premier American Bank, N.A.</td> - <td class="closing">January 29, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="firstnational-carrollton.html">First National Bank of Georgia</a></td> - <td class="city">Carrollton</td> - <td class="state">GA</td> - <td class="cert">16480</td> - <td class="ai">Community &amp; Southern Bank</td> - <td class="closing">January 29, 2010</td> - <td class="updated">December 13, 2012</td> - </tr> - <tr> - <td class="institution"><a href="columbiariver.html">Columbia River Bank</a></td> - <td class="city">The Dalles</td> - <td class="state">OR</td> - <td class="cert">22469</td> - <td class="ai">Columbia State Bank</td> - <td class="closing">January 22, 2010</td> - <td class="updated">September 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="evergreen-wa.html">Evergreen Bank</a></td> - <td class="city">Seattle</td> - <td class="state">WA</td> - <td class="cert">20501</td> - <td class="ai">Umpqua Bank</td> - <td class="closing">January 22, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="charter-nm.html">Charter Bank</a></td> - <td class="city">Santa Fe</td> - <td class="state">NM</td> - <td class="cert">32498</td> - <td class="ai">Charter Bank</td> - <td class="closing">January 22, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="leeton.html">Bank of Leeton</a></td> - <td class="city">Leeton</td> - <td class="state">MO</td> - <td class="cert">8265</td> - <td class="ai">Sunflower Bank, N.A.</td> - <td class="closing">January 22, 2010</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="premieramerican.html">Premier American Bank</a></td> - <td class="city">Miami</td> - <td class="state">FL</td> - <td class="cert">57147</td> - <td class="ai">Premier American Bank, N.A.</td> - <td class="closing">January 22, 2010</td> - <td class="updated">December 13, 2012</td> - </tr> - <tr> - <td class="institution"><a href="barnes.html">Barnes Banking Company</a></td> - <td class="city">Kaysville</td> - <td class="state">UT</td> - <td class="cert">1252</td> - <td class="ai">No Acquirer</td> - <td class="closing">January 15, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="ststephen.html">St. Stephen State Bank</a></td> - <td class="city">St. Stephen</td> - <td class="state">MN</td> - <td class="cert">17522</td> - <td class="ai">First State Bank of St. Joseph</td> - <td class="closing">January 15, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="towncommunity.html">Town Community Bank &amp; Trust</a></td> - <td class="city">Antioch</td> - <td class="state">IL</td> - <td class="cert">34705</td> - <td class="ai">First American Bank</td> - <td class="closing">January 15, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="horizon-wa.html">Horizon Bank</a></td> - <td class="city">Bellingham</td> - <td class="state">WA</td> - <td class="cert">22977</td> - <td class="ai">Washington Federal Savings and Loan Association</td> - <td class="closing">January 8, 2010</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstfederal-ca.html">First Federal Bank of California, F.S.B.</a></td> - <td class="city">Santa Monica</td> - <td class="state">CA</td> - <td class="cert">28536</td> - <td class="ai">OneWest Bank, FSB</td> - <td class="closing">December 18, 2009</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="imperialcapital.html">Imperial Capital Bank</a></td> - <td class="city">La Jolla</td> - <td class="state">CA</td> - <td class="cert">26348</td> - <td class="ai">City National Bank</td> - <td class="closing">December 18, 2009</td> - <td class="updated">September 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="ibb.html">Independent Bankers' Bank</a></td> - <td class="city">Springfield</td> - <td class="state">IL</td> - <td class="cert">26820</td> - <td class="ai">The Independent BankersBank (TIB)</td> - <td class="closing">December 18, 2009</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="newsouth.html">New South Federal Savings Bank</a></td> - <td class="city">Irondale</td> - <td class="state">AL</td> - <td class="cert">32276</td> - <td class="ai">Beal Bank</td> - <td class="closing">December 18, 2009</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="citizensstate-mi.html">Citizens State Bank</a></td> - <td class="city">New Baltimore</td> - <td class="state">MI</td> - <td class="cert">1006</td> - <td class="ai">No Acquirer</td> - <td class="closing">December 18, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="peoplesfirst-fl.html">Peoples First Community Bank</a></td> - <td class="city">Panama City</td> - <td class="state">FL</td> - <td class="cert">32167</td> - <td class="ai">Hancock Bank</td> - <td class="closing">December 18, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="rockbridge.html">RockBridge Commercial Bank</a></td> - <td class="city">Atlanta</td> - <td class="state">GA</td> - <td class="cert">58315</td> - <td class="ai">No Acquirer</td> - <td class="closing">December 18, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="solutions.html">SolutionsBank</a></td> - <td class="city">Overland Park</td> - <td class="state">KS</td> - <td class="cert">4731</td> - <td class="ai">Arvest Bank</td> - <td class="closing">December 11, 2009</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="valleycapital.html">Valley Capital Bank, N.A.</a></td> - <td class="city">Mesa</td> - <td class="state">AZ</td> - <td class="cert">58399</td> - <td class="ai">Enterprise Bank &amp; Trust</td> - <td class="closing">December 11, 2009</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="republicfederal.html">Republic Federal Bank, N.A.</a></td> - <td class="city">Miami</td> - <td class="state">FL</td> - <td class="cert">22846</td> - <td class="ai">1st United Bank</td> - <td class="closing">December 11, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="atlantic-va.html">Greater Atlantic Bank</a></td> - <td class="city">Reston</td> - <td class="state">VA</td> - <td class="cert">32583</td> - <td class="ai">Sonabank</td> - <td class="closing">December 4, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="benchmark-il.html">Benchmark Bank</a></td> - <td class="city">Aurora</td> - <td class="state">IL</td> - <td class="cert">10440</td> - <td class="ai">MB Financial Bank, N.A.</td> - <td class="closing">December 4, 2009</td> - <td class="updated">August 23, 2012</td> - </tr> - <tr> - <td class="institution"><a href="amtrust.html">AmTrust Bank</a></td> - <td class="city">Cleveland</td> - <td class="state">OH</td> - <td class="cert">29776</td> - <td class="ai">New York Community Bank</td> - <td class="closing">December 4, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="tattnall.html">The Tattnall Bank</a></td> - <td class="city">Reidsville</td> - <td class="state">GA</td> - <td class="cert">12080</td> - <td class="ai">Heritage Bank of the South</td> - <td class="closing">December 4, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstsecurity.html">First Security National Bank</a></td> - <td class="city">Norcross</td> - <td class="state">GA</td> - <td class="cert">26290</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">December 4, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="buckheadcommunity.html">The Buckhead Community Bank</a></td> - <td class="city">Atlanta</td> - <td class="state">GA</td> - <td class="cert">34663</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">December 4, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="commercesw-fl.html">Commerce Bank of Southwest Florida</a></td> - <td class="city">Fort Myers</td> - <td class="state">FL</td> - <td class="cert">58016</td> - <td class="ai">Central Bank</td> - <td class="closing">November 20, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="pacificcoastnatl.html">Pacific Coast National Bank</a></td> - <td class="city">San Clemente</td> - <td class="state">CA</td> - <td class="cert">57914</td> - <td class="ai">Sunwest Bank</td> - <td class="closing">November 13, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="orion-fl.html">Orion Bank</a></td> - <td class="city">Naples</td> - <td class="state">FL</td> - <td class="cert">22427</td> - <td class="ai">IBERIABANK</td> - <td class="closing">November 13, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="centuryfsb.html">Century Bank, F.S.B.</a></td> - <td class="city">Sarasota</td> - <td class="state">FL</td> - <td class="cert">32267</td> - <td class="ai">IBERIABANK</td> - <td class="closing">November 13, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="ucb.html">United Commercial Bank</a></td> - <td class="city">San Francisco</td> - <td class="state">CA</td> - <td class="cert">32469</td> - <td class="ai">East West Bank</td> - <td class="closing">November 6, 2009</td> - <td class="updated">November 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="gateway-mo.html">Gateway Bank of St. Louis</a></td> - <td class="city">St. Louis</td> - <td class="state">MO</td> - <td class="cert">19450</td> - <td class="ai">Central Bank of Kansas City</td> - <td class="closing">November 6, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="prosperan.html">Prosperan Bank</a></td> - <td class="city">Oakdale</td> - <td class="state">MN</td> - <td class="cert">35074</td> - <td class="ai">Alerus Financial, N.A.</td> - <td class="closing">November 6, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="homefsb-mi.html">Home Federal Savings Bank</a></td> - <td class="city">Detroit</td> - <td class="state">MI</td> - <td class="cert">30329</td> - <td class="ai">Liberty Bank and Trust Company</td> - <td class="closing">November 6, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="unitedsecurity-ga.html">United Security Bank</a></td> - <td class="city">Sparta</td> - <td class="state">GA</td> - <td class="cert">22286</td> - <td class="ai">Ameris Bank</td> - <td class="closing">November 6, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="northhouston-tx.html">North Houston Bank</a></td> - <td class="city">Houston</td> - <td class="state">TX</td> - <td class="cert">18776</td> - <td class="ai">U.S. Bank N.A.</td> - <td class="closing">October 30, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="madisonville-tx.html">Madisonville State Bank</a></td> - <td class="city">Madisonville</td> - <td class="state">TX</td> - <td class="cert">33782</td> - <td class="ai">U.S. Bank N.A.</td> - <td class="closing">October 30, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="citizens-teague.html">Citizens National Bank</a></td> - <td class="city">Teague</td> - <td class="state">TX</td> - <td class="cert">25222</td> - <td class="ai">U.S. Bank N.A.</td> - <td class="closing">October 30, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="park-il.html">Park National Bank</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">11677</td> - <td class="ai">U.S. Bank N.A.</td> - <td class="closing">October 30, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="pacificnational-ca.html">Pacific National Bank</a></td> - <td class="city">San Francisco</td> - <td class="state">CA</td> - <td class="cert">30006</td> - <td class="ai">U.S. Bank N.A.</td> - <td class="closing">October 30, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="calnational.html">California National Bank</a></td> - <td class="city">Los Angeles</td> - <td class="state">CA</td> - <td class="cert">34659</td> - <td class="ai">U.S. Bank N.A.</td> - <td class="closing">October 30, 2009</td> - <td class="updated">September 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sandiegonational.html">San Diego National Bank</a></td> - <td class="city">San Diego</td> - <td class="state">CA</td> - <td class="cert">23594</td> - <td class="ai">U.S. Bank N.A.</td> - <td class="closing">October 30, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="community-lemont.html">Community Bank of Lemont</a></td> - <td class="city">Lemont</td> - <td class="state">IL</td> - <td class="cert">35291</td> - <td class="ai">U.S. Bank N.A.</td> - <td class="closing">October 30, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="bankusa-az.html">Bank USA, N.A.</a></td> - <td class="city">Phoenix</td> - <td class="state">AZ</td> - <td class="cert">32218</td> - <td class="ai">U.S. Bank N.A.</td> - <td class="closing">October 30, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstdupage.html">First DuPage Bank</a></td> - <td class="city">Westmont</td> - <td class="state">IL</td> - <td class="cert">35038</td> - <td class="ai">First Midwest Bank</td> - <td class="closing">October 23, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="riverview-mn.html">Riverview Community Bank</a></td> - <td class="city">Otsego</td> - <td class="state">MN</td> - <td class="cert">57525</td> - <td class="ai">Central Bank</td> - <td class="closing">October 23, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="elmwood.html">Bank of Elmwood</a></td> - <td class="city">Racine</td> - <td class="state">WI</td> - <td class="cert">18321</td> - <td class="ai">Tri City National Bank</td> - <td class="closing">October 23, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="flagship.html">Flagship National Bank</a></td> - <td class="city">Bradenton</td> - <td class="state">FL</td> - <td class="cert">35044</td> - <td class="ai">First Federal Bank of Florida</td> - <td class="closing">October 23, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="hillcrest-fl.html">Hillcrest Bank Florida</a></td> - <td class="city">Naples</td> - <td class="state">FL</td> - <td class="cert">58336</td> - <td class="ai">Stonegate Bank</td> - <td class="closing">October 23, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="americanunited.html">American United Bank</a></td> - <td class="city">Lawrenceville</td> - <td class="state">GA</td> - <td class="cert">57794</td> - <td class="ai">Ameris Bank</td> - <td class="closing">October 23, 2009</td> - <td class="updated">September 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="partners-fl.html">Partners Bank</a></td> - <td class="city">Naples</td> - <td class="state">FL</td> - <td class="cert">57959</td> - <td class="ai">Stonegate Bank</td> - <td class="closing">October 23, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="sanjoaquin.html">San Joaquin Bank</a></td> - <td class="city">Bakersfield</td> - <td class="state">CA</td> - <td class="cert">23266</td> - <td class="ai">Citizens Business Bank</td> - <td class="closing">October 16, 2009</td> - <td class="updated">August 22, 2012</td> - </tr> - <tr> - <td class="institution"><a href="scnb-co.html">Southern Colorado National Bank</a></td> - <td class="city">Pueblo</td> - <td class="state">CO</td> - <td class="cert">57263</td> - <td class="ai">Legacy Bank</td> - <td class="closing">October 2, 2009</td> - <td class="updated">September 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="jennings-mn.html">Jennings State Bank</a></td> - <td class="city">Spring Grove</td> - <td class="state">MN</td> - <td class="cert">11416</td> - <td class="ai">Central Bank</td> - <td class="closing">October 2, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="warren-mi.html">Warren Bank</a></td> - <td class="city">Warren</td> - <td class="state">MI</td> - <td class="cert">34824</td> - <td class="ai">The Huntington National Bank</td> - <td class="closing">October 2, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="georgian.html">Georgian Bank</a></td> - <td class="city">Atlanta</td> - <td class="state">GA</td> - <td class="cert">57151</td> - <td class="ai">First Citizens Bank and Trust Company, Inc.</td> - <td class="closing">September 25, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="irwin-ky.html">Irwin Union Bank, F.S.B.</a></td> - <td class="city">Louisville</td> - <td class="state">KY</td> - <td class="cert">57068</td> - <td class="ai">First Financial Bank, N.A.</td> - <td class="closing">September 18, 2009</td> - <td class="updated">September 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="irwin-in.html">Irwin Union Bank and Trust Company</a></td> - <td class="city">Columbus</td> - <td class="state">IN</td> - <td class="cert">10100</td> - <td class="ai">First Financial Bank, N.A.</td> - <td class="closing">September 18, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="venture-wa.html">Venture Bank</a></td> - <td class="city">Lacey</td> - <td class="state">WA</td> - <td class="cert">22868</td> - <td class="ai">First-Citizens Bank &amp; Trust Company</td> - <td class="closing">September 11, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="brickwell-mn.html">Brickwell Community Bank</a></td> - <td class="city">Woodbury</td> - <td class="state">MN</td> - <td class="cert">57736</td> - <td class="ai">CorTrust Bank N.A.</td> - <td class="closing">September 11, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="corus.html">Corus Bank, N.A.</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">13693</td> - <td class="ai">MB Financial Bank, N.A.</td> - <td class="closing">September 11, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firststate-az.html">First State Bank</a></td> - <td class="city">Flagstaff</td> - <td class="state">AZ</td> - <td class="cert">34875</td> - <td class="ai">Sunwest Bank</td> - <td class="closing">September 4, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="platinum-il.html">Platinum Community Bank</a></td> - <td class="city">Rolling Meadows</td> - <td class="state">IL</td> - <td class="cert">35030</td> - <td class="ai">No Acquirer</td> - <td class="closing">September 4, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="vantus.html">Vantus Bank</a></td> - <td class="city">Sioux City</td> - <td class="state">IN</td> - <td class="cert">27732</td> - <td class="ai">Great Southern Bank</td> - <td class="closing">September 4, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="inbank.html">InBank</a></td> - <td class="city">Oak Forest</td> - <td class="state">IL</td> - <td class="cert">20203</td> - <td class="ai">MB Financial Bank, N.A.</td> - <td class="closing">September 4, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstbankkc-mo.html">First Bank of Kansas City</a></td> - <td class="city">Kansas City</td> - <td class="state">MO</td> - <td class="cert">25231</td> - <td class="ai">Great American Bank</td> - <td class="closing">September 4, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="affinity-ca.html">Affinity Bank</a></td> - <td class="city">Ventura</td> - <td class="state">CA</td> - <td class="cert">27197</td> - <td class="ai">Pacific Western Bank</td> - <td class="closing">August 28, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="mainstreet-mn.html">Mainstreet Bank</a></td> - <td class="city">Forest Lake</td> - <td class="state">MN</td> - <td class="cert">1909</td> - <td class="ai">Central Bank</td> - <td class="closing">August 28, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bradford-md.html">Bradford Bank</a></td> - <td class="city">Baltimore</td> - <td class="state">MD</td> - <td class="cert">28312</td> - <td class="ai">Manufacturers and Traders Trust Company (M&amp;T Bank)</td> - <td class="closing">August 28, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="guaranty-tx.html">Guaranty Bank</a></td> - <td class="city">Austin</td> - <td class="state">TX</td> - <td class="cert">32618</td> - <td class="ai">BBVA Compass</td> - <td class="closing">August 21, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="capitalsouth.html">CapitalSouth Bank</a></td> - <td class="city">Birmingham</td> - <td class="state">AL</td> - <td class="cert">22130</td> - <td class="ai">IBERIABANK</td> - <td class="closing">August 21, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="coweta.html">First Coweta Bank</a></td> - <td class="city">Newnan</td> - <td class="state">GA</td> - <td class="cert">57702</td> - <td class="ai">United Bank</td> - <td class="closing">August 21, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="ebank.html">ebank</a></td> - <td class="city">Atlanta</td> - <td class="state">GA</td> - <td class="cert">34682</td> - <td class="ai">Stearns Bank, N.A.</td> - <td class="closing">August 21, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="community-nv.html">Community Bank of Nevada</a></td> - <td class="city">Las Vegas</td> - <td class="state">NV</td> - <td class="cert">34043</td> - <td class="ai">No Acquirer</td> - <td class="closing">August 14, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="community-az.html">Community Bank of Arizona</a></td> - <td class="city">Phoenix</td> - <td class="state">AZ</td> - <td class="cert">57645</td> - <td class="ai">MidFirst Bank</td> - <td class="closing">August 14, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="union-az.html">Union Bank, National Association</a></td> - <td class="city">Gilbert</td> - <td class="state">AZ</td> - <td class="cert">34485</td> - <td class="ai">MidFirst Bank</td> - <td class="closing">August 14, 2009</td> - <td class="updated">August 21, 2012</td> - </tr> - <tr> - <td class="institution"><a href="colonial-al.html">Colonial Bank</a></td> - <td class="city">Montgomery</td> - <td class="state">AL</td> - <td class="cert">9609</td> - <td class="ai">Branch Banking &amp; Trust Company, (BB&amp;T)</td> - <td class="closing">August 14, 2009</td> - <td class="updated">September 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="dwelling.html">Dwelling House Savings and Loan Association</a></td> - <td class="city">Pittsburgh</td> - <td class="state">PA</td> - <td class="cert">31559</td> - <td class="ai">PNC Bank, N.A.</td> - <td class="closing">August 14, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="community-prineville.html">Community First Bank</a></td> - <td class="city">Prineville</td> - <td class="state">OR</td> - <td class="cert">23268</td> - <td class="ai">Home Federal Bank</td> - <td class="closing">August 7, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="community-venice.html">Community National Bank of Sarasota County</a></td> - <td class="city">Venice</td> - <td class="state">FL</td> - <td class="cert">27183</td> - <td class="ai">Stearns Bank, N.A.</td> - <td class="closing">August 7, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fsb-sarasota.html">First State Bank</a></td> - <td class="city">Sarasota</td> - <td class="state">FL</td> - <td class="cert">27364</td> - <td class="ai">Stearns Bank, N.A.</td> - <td class="closing">August 7, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="mutual-harvey.html">Mutual Bank</a></td> - <td class="city">Harvey</td> - <td class="state">IL</td> - <td class="cert">18659</td> - <td class="ai">United Central Bank</td> - <td class="closing">July 31, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="americano.html">First BankAmericano</a></td> - <td class="city">Elizabeth</td> - <td class="state">NJ</td> - <td class="cert">34270</td> - <td class="ai">Crown Bank</td> - <td class="closing">July 31, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="peoplescommunity-oh.html">Peoples Community Bank</a></td> - <td class="city">West Chester</td> - <td class="state">OH</td> - <td class="cert">32288</td> - <td class="ai">First Financial Bank, N.A.</td> - <td class="closing">July 31, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="integrity-fl.html">Integrity Bank</a></td> - <td class="city">Jupiter</td> - <td class="state">FL</td> - <td class="cert">57604</td> - <td class="ai">Stonegate Bank</td> - <td class="closing">July 31, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fsb-altus.html">First State Bank of Altus</a></td> - <td class="city">Altus</td> - <td class="state">OK</td> - <td class="cert">9873</td> - <td class="ai">Herring Bank</td> - <td class="closing">July 31, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sb-jones.html">Security Bank of Jones County</a></td> - <td class="city">Gray</td> - <td class="state">GA</td> - <td class="cert">8486</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">July 24, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sb-houston.html">Security Bank of Houston County</a></td> - <td class="city">Perry</td> - <td class="state">GA</td> - <td class="cert">27048</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">July 24, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sb-bibb.html">Security Bank of Bibb County</a></td> - <td class="city">Macon</td> - <td class="state">GA</td> - <td class="cert">27367</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">July 24, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sb-metro.html">Security Bank of North Metro</a></td> - <td class="city">Woodstock</td> - <td class="state">GA</td> - <td class="cert">57105</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">July 24, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sb-fulton.html">Security Bank of North Fulton</a></td> - <td class="city">Alpharetta</td> - <td class="state">GA</td> - <td class="cert">57430</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">July 24, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sb-gwinnett.html">Security Bank of Gwinnett County</a></td> - <td class="city">Suwanee</td> - <td class="state">GA</td> - <td class="cert">57346</td> - <td class="ai">State Bank and Trust Company</td> - <td class="closing">July 24, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="waterford.html">Waterford Village Bank</a></td> - <td class="city">Williamsville</td> - <td class="state">NY</td> - <td class="cert">58065</td> - <td class="ai">Evans Bank, N.A.</td> - <td class="closing">July 24, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="temecula.html">Temecula Valley Bank</a></td> - <td class="city">Temecula</td> - <td class="state">CA</td> - <td class="cert">34341</td> - <td class="ai">First-Citizens Bank &amp; Trust Company</td> - <td class="closing">July 17, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="vineyard.html">Vineyard Bank</a></td> - <td class="city">Rancho Cucamonga</td> - <td class="state">CA</td> - <td class="cert">23556</td> - <td class="ai">California Bank &amp; Trust</td> - <td class="closing">July 17, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankfirst.html">BankFirst</a></td> - <td class="city">Sioux Falls</td> - <td class="state">SD</td> - <td class="cert">34103</td> - <td class="ai">Alerus Financial, N.A.</td> - <td class="closing">July 17, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="piedmont.html">First Piedmont Bank</a></td> - <td class="city">Winder</td> - <td class="state">GA</td> - <td class="cert">34594</td> - <td class="ai">First American Bank and Trust Company</td> - <td class="closing">July 17, 2009</td> - <td class="updated">January 15, 2013</td> - </tr> - <tr> - <td class="institution"><a href="wyoming.html">Bank of Wyoming</a></td> - <td class="city">Thermopolis</td> - <td class="state">WY</td> - <td class="cert">22754</td> - <td class="ai">Central Bank &amp; Trust</td> - <td class="closing">July 10, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="founders.html">Founders Bank</a></td> - <td class="city">Worth</td> - <td class="state">IL</td> - <td class="cert">18390</td> - <td class="ai">The PrivateBank and Trust Company</td> - <td class="closing">July 2, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="millennium.html">Millennium State Bank of Texas</a></td> - <td class="city">Dallas</td> - <td class="state">TX</td> - <td class="cert">57667</td> - <td class="ai">State Bank of Texas</td> - <td class="closing">July 2, 2009</td> - <td class="updated">October 26, 2012</td> - </tr> - <tr> - <td class="institution"><a href="danville.html">First National Bank of Danville</a></td> - <td class="city">Danville</td> - <td class="state">IL</td> - <td class="cert">3644</td> - <td class="ai">First Financial Bank, N.A.</td> - <td class="closing">July 2, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="elizabeth.html">Elizabeth State Bank</a></td> - <td class="city">Elizabeth</td> - <td class="state">IL</td> - <td class="cert">9262</td> - <td class="ai">Galena State Bank and Trust Company</td> - <td class="closing">July 2, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="rockriver.html">Rock River Bank</a></td> - <td class="city">Oregon</td> - <td class="state">IL</td> - <td class="cert">15302</td> - <td class="ai">The Harvard State Bank</td> - <td class="closing">July 2, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="winchester.html">First State Bank of Winchester</a></td> - <td class="city">Winchester</td> - <td class="state">IL</td> - <td class="cert">11710</td> - <td class="ai">The First National Bank of Beardstown</td> - <td class="closing">July 2, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="warner.html">John Warner Bank</a></td> - <td class="city">Clinton</td> - <td class="state">IL</td> - <td class="cert">12093</td> - <td class="ai">State Bank of Lincoln</td> - <td class="closing">July 2, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="mirae.html">Mirae Bank</a></td> - <td class="city">Los Angeles</td> - <td class="state">CA</td> - <td class="cert">57332</td> - <td class="ai">Wilshire State Bank</td> - <td class="closing">June 26, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="metropacific.html">MetroPacific Bank</a></td> - <td class="city">Irvine</td> - <td class="state">CA</td> - <td class="cert">57893</td> - <td class="ai">Sunwest Bank</td> - <td class="closing">June 26, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="horizon.html">Horizon Bank</a></td> - <td class="city">Pine City</td> - <td class="state">MN</td> - <td class="cert">9744</td> - <td class="ai">Stearns Bank, N.A.</td> - <td class="closing">June 26, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="neighbor.html">Neighborhood Community Bank</a></td> - <td class="city">Newnan</td> - <td class="state">GA</td> - <td class="cert">35285</td> - <td class="ai">CharterBank</td> - <td class="closing">June 26, 2009</td> - <td class="updated">August 20, 2012</td> - </tr> - <tr> - <td class="institution"><a href="communityga.html">Community Bank of West Georgia</a></td> - <td class="city">Villa Rica</td> - <td class="state">GA</td> - <td class="cert">57436</td> - <td class="ai">No Acquirer</td> - <td class="closing">June 26, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="anthony.html">First National Bank of Anthony</a></td> - <td class="city">Anthony</td> - <td class="state">KS</td> - <td class="cert">4614</td> - <td class="ai">Bank of Kansas</td> - <td class="closing">June 19, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cooperative.html">Cooperative Bank</a></td> - <td class="city">Wilmington</td> - <td class="state">NC</td> - <td class="cert">27837</td> - <td class="ai">First Bank</td> - <td class="closing">June 19, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="scb.html">Southern Community Bank</a></td> - <td class="city">Fayetteville</td> - <td class="state">GA</td> - <td class="cert">35251</td> - <td class="ai">United Community Bank</td> - <td class="closing">June 19, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="lincolnwood.html">Bank of Lincolnwood</a></td> - <td class="city">Lincolnwood</td> - <td class="state">IL</td> - <td class="cert">17309</td> - <td class="ai">Republic Bank of Chicago</td> - <td class="closing">June 5, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="citizensnational.html">Citizens National Bank</a></td> - <td class="city">Macomb</td> - <td class="state">IL</td> - <td class="cert">5757</td> - <td class="ai">Morton Community Bank</td> - <td class="closing">May 22, 2009</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="strategiccapital.html">Strategic Capital Bank</a></td> - <td class="city">Champaign</td> - <td class="state">IL</td> - <td class="cert">35175</td> - <td class="ai">Midland States Bank</td> - <td class="closing">May 22, 2009</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="bankunited.html">BankUnited, FSB</a></td> - <td class="city">Coral Gables</td> - <td class="state">FL</td> - <td class="cert">32247</td> - <td class="ai">BankUnited</td> - <td class="closing">May 21, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="westsound.html">Westsound Bank</a></td> - <td class="city">Bremerton</td> - <td class="state">WA</td> - <td class="cert">34843</td> - <td class="ai">Kitsap Bank</td> - <td class="closing">May 8, 2009</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="americawest.html">America West Bank</a></td> - <td class="city">Layton</td> - <td class="state">UT</td> - <td class="cert">35461</td> - <td class="ai">Cache Valley Bank</td> - <td class="closing">May 1, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="citizens.html">Citizens Community Bank</a></td> - <td class="city">Ridgewood</td> - <td class="state">NJ</td> - <td class="cert">57563</td> - <td class="ai">North Jersey Community Bank</td> - <td class="closing">May 1, 2009</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="silverton.html">Silverton Bank, NA</a></td> - <td class="city">Atlanta</td> - <td class="state">GA</td> - <td class="cert">26535</td> - <td class="ai">No Acquirer</td> - <td class="closing">May 1, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstbankidaho.html">First Bank of Idaho</a></td> - <td class="city">Ketchum</td> - <td class="state">ID</td> - <td class="cert">34396</td> - <td class="ai">U.S. Bank, N.A.</td> - <td class="closing">April 24, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="beverlyhills.html">First Bank of Beverly Hills</a></td> - <td class="city">Calabasas</td> - <td class="state">CA</td> - <td class="cert">32069</td> - <td class="ai">No Acquirer</td> - <td class="closing">April 24, 2009</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="michiganheritage.html">Michigan Heritage Bank</a></td> - <td class="city">Farmington Hills</td> - <td class="state">MI</td> - <td class="cert">34369</td> - <td class="ai">Level One Bank</td> - <td class="closing">April 24, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="amsouthern.html">American Southern Bank</a></td> - <td class="city">Kennesaw</td> - <td class="state">GA</td> - <td class="cert">57943</td> - <td class="ai">Bank of North Georgia</td> - <td class="closing">April 24, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="greatbasin.html">Great Basin Bank of Nevada</a></td> - <td class="city">Elko</td> - <td class="state">NV</td> - <td class="cert">33824</td> - <td class="ai">Nevada State Bank</td> - <td class="closing">April 17, 2009</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="amsterling.html">American Sterling Bank</a></td> - <td class="city">Sugar Creek</td> - <td class="state">MO</td> - <td class="cert">8266</td> - <td class="ai">Metcalf Bank</td> - <td class="closing">April 17, 2009</td> - <td class="updated">August 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="newfrontier.html">New Frontier Bank</a></td> - <td class="city">Greeley</td> - <td class="state">CO</td> - <td class="cert">34881</td> - <td class="ai">No Acquirer</td> - <td class="closing">April 10, 2009</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="capefear.html">Cape Fear Bank</a></td> - <td class="city">Wilmington</td> - <td class="state">NC</td> - <td class="cert">34639</td> - <td class="ai">First Federal Savings and Loan Association</td> - <td class="closing">April 10, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="omni.html">Omni National Bank</a></td> - <td class="city">Atlanta</td> - <td class="state">GA</td> - <td class="cert">22238</td> - <td class="ai">No Acquirer</td> - <td class="closing">March 27, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="teambank.html">TeamBank, NA</a></td> - <td class="city">Paola</td> - <td class="state">KS</td> - <td class="cert">4754</td> - <td class="ai">Great Southern Bank</td> - <td class="closing">March 20, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="coloradonational.html">Colorado National Bank</a></td> - <td class="city">Colorado Springs</td> - <td class="state">CO</td> - <td class="cert">18896</td> - <td class="ai">Herring Bank</td> - <td class="closing">March 20, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstcity.html">FirstCity Bank</a></td> - <td class="city">Stockbridge</td> - <td class="state">GA</td> - <td class="cert">18243</td> - <td class="ai">No Acquirer</td> - <td class="closing">March 20, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="freedomga.html">Freedom Bank of Georgia</a></td> - <td class="city">Commerce</td> - <td class="state">GA</td> - <td class="cert">57558</td> - <td class="ai">Northeast Georgia Bank</td> - <td class="closing">March 6, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="securitysavings.html">Security Savings Bank</a></td> - <td class="city">Henderson</td> - <td class="state">NV</td> - <td class="cert">34820</td> - <td class="ai">Bank of Nevada</td> - <td class="closing">February 27, 2009</td> - <td class="updated">September 7, 2012</td> - </tr> - <tr> - <td class="institution"><a href="heritagebank.html">Heritage Community Bank</a></td> - <td class="city">Glenwood</td> - <td class="state">IL</td> - <td class="cert">20078</td> - <td class="ai">MB Financial Bank, N.A.</td> - <td class="closing">February 27, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="silverfalls.html">Silver Falls Bank</a></td> - <td class="city">Silverton</td> - <td class="state">OR</td> - <td class="cert">35399</td> - <td class="ai">Citizens Bank</td> - <td class="closing">February 20, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="pinnacle.html">Pinnacle Bank of Oregon</a></td> - <td class="city">Beaverton</td> - <td class="state">OR</td> - <td class="cert">57342</td> - <td class="ai">Washington Trust Bank of Spokane</td> - <td class="closing">February 13, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="cornbelt.html">Corn Belt Bank &amp; Trust Co.</a></td> - <td class="city">Pittsfield</td> - <td class="state">IL</td> - <td class="cert">16500</td> - <td class="ai">The Carlinville National Bank</td> - <td class="closing">February 13, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="riverside.html">Riverside Bank of the Gulf Coast</a></td> - <td class="city">Cape Coral</td> - <td class="state">FL</td> - <td class="cert">34563</td> - <td class="ai">TIB Bank</td> - <td class="closing">February 13, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sherman.html">Sherman County Bank</a></td> - <td class="city">Loup City</td> - <td class="state">NE</td> - <td class="cert">5431</td> - <td class="ai">Heritage Bank</td> - <td class="closing">February 13, 2009</td> - <td class="updated">August 17, 2012</td> - </tr> - <tr> - <td class="institution"><a href="county.html">County Bank</a></td> - <td class="city">Merced</td> - <td class="state">CA</td> - <td class="cert">22574</td> - <td class="ai">Westamerica Bank</td> - <td class="closing">February 6, 2009</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="alliance.html">Alliance Bank</a></td> - <td class="city">Culver City</td> - <td class="state">CA</td> - <td class="cert">23124</td> - <td class="ai">California Bank &amp; Trust</td> - <td class="closing">February 6, 2009</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstbank.html">FirstBank Financial Services</a></td> - <td class="city">McDonough</td> - <td class="state">GA</td> - <td class="cert">57017</td> - <td class="ai">Regions Bank</td> - <td class="closing">February 6, 2009</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="ocala.html">Ocala National Bank</a></td> - <td class="city">Ocala</td> - <td class="state">FL</td> - <td class="cert">26538</td> - <td class="ai">CenterState Bank of Florida, N.A.</td> - <td class="closing">January 30, 2009</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="suburban.html">Suburban FSB</a></td> - <td class="city">Crofton</td> - <td class="state">MD</td> - <td class="cert">30763</td> - <td class="ai">Bank of Essex</td> - <td class="closing">January 30, 2009</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="magnet.html">MagnetBank</a></td> - <td class="city">Salt Lake City</td> - <td class="state">UT</td> - <td class="cert">58001</td> - <td class="ai">No Acquirer</td> - <td class="closing">January 30, 2009</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="centennial.html">1st Centennial Bank</a></td> - <td class="city">Redlands</td> - <td class="state">CA</td> - <td class="cert">33025</td> - <td class="ai">First California Bank</td> - <td class="closing">January 23, 2009</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="clark.html">Bank of Clark County</a></td> - <td class="city">Vancouver</td> - <td class="state">WA</td> - <td class="cert">34959</td> - <td class="ai">Umpqua Bank</td> - <td class="closing">January 16, 2009</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="commerce.html">National Bank of Commerce</a></td> - <td class="city">Berkeley</td> - <td class="state">IL</td> - <td class="cert">19733</td> - <td class="ai">Republic Bank of Chicago</td> - <td class="closing">January 16, 2009</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sanderson.html">Sanderson State Bank</a><br><a href="sanderson_spanish.html">En Espanol</a></td> - <td class="city">Sanderson</td> - <td class="state">TX</td> - <td class="cert">11568</td> - <td class="ai">The Pecos County State Bank</td> - <td class="closing">December 12, 2008</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="haventrust.html">Haven Trust Bank</a></td> - <td class="city">Duluth</td> - <td class="state">GA</td> - <td class="cert">35379</td> - <td class="ai">Branch Banking &amp; Trust Company, (BB&amp;T)</td> - <td class="closing">December 12, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstga.html">First Georgia Community Bank</a></td> - <td class="city">Jackson</td> - <td class="state">GA</td> - <td class="cert">34301</td> - <td class="ai">United Bank</td> - <td class="closing">December 5, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="pff.html">PFF Bank &amp; Trust</a></td> - <td class="city">Pomona</td> - <td class="state">CA</td> - <td class="cert">28344</td> - <td class="ai">U.S. Bank, N.A.</td> - <td class="closing">November 21, 2008</td> - <td class="updated">January 4, 2013</td> - </tr> - <tr> - <td class="institution"><a href="downey.html">Downey Savings &amp; Loan</a></td> - <td class="city">Newport Beach</td> - <td class="state">CA</td> - <td class="cert">30968</td> - <td class="ai">U.S. Bank, N.A.</td> - <td class="closing">November 21, 2008</td> - <td class="updated">January 4, 2013</td> - </tr> - <tr> - <td class="institution"><a href="community.html">Community Bank</a></td> - <td class="city">Loganville</td> - <td class="state">GA</td> - <td class="cert">16490</td> - <td class="ai">Bank of Essex</td> - <td class="closing">November 21, 2008</td> - <td class="updated">September 4, 2012</td> - </tr> - <tr> - <td class="institution"><a href="securitypacific.html">Security Pacific Bank</a></td> - <td class="city">Los Angeles</td> - <td class="state">CA</td> - <td class="cert">23595</td> - <td class="ai">Pacific Western Bank</td> - <td class="closing">November 7, 2008</td> - <td class="updated">August 28, 2012</td> - </tr> - <tr> - <td class="institution"><a href="franklinbank.html">Franklin Bank, SSB</a></td> - <td class="city">Houston</td> - <td class="state">TX</td> - <td class="cert">26870</td> - <td class="ai">Prosperity Bank</td> - <td class="closing">November 7, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="freedom.html">Freedom Bank</a></td> - <td class="city">Bradenton</td> - <td class="state">FL</td> - <td class="cert">57930</td> - <td class="ai">Fifth Third Bank</td> - <td class="closing">October 31, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="alpha.html">Alpha Bank &amp; Trust</a></td> - <td class="city">Alpharetta</td> - <td class="state">GA</td> - <td class="cert">58241</td> - <td class="ai">Stearns Bank, N.A.</td> - <td class="closing">October 24, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="meridian.html">Meridian Bank</a></td> - <td class="city">Eldred</td> - <td class="state">IL</td> - <td class="cert">13789</td> - <td class="ai">National Bank</td> - <td class="closing">October 10, 2008</td> - <td class="updated">May 31, 2012</td> - </tr> - <tr> - <td class="institution"><a href="mainstreet.html">Main Street Bank</a></td> - <td class="city">Northville</td> - <td class="state">MI</td> - <td class="cert">57654</td> - <td class="ai">Monroe Bank &amp; Trust</td> - <td class="closing">October 10, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="wamu.html">Washington Mutual Bank<br>(Including its subsidiary Washington Mutual Bank FSB)</a></td> - <td class="city">Henderson</td> - <td class="state">NV</td> - <td class="cert">32633</td> - <td class="ai">JP Morgan Chase Bank</td> - <td class="closing">September 25, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="ameribank.html">Ameribank</a></td> - <td class="city">Northfork</td> - <td class="state">WV</td> - <td class="cert">6782</td> - <td class="ai">The Citizens Savings Bank<br><br>Pioneer Community Bank, Inc.</td> - <td class="closing">September 19, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="silverstate.html">Silver State Bank</a><br><a href="silverstatesp.html">En Espanol</a></td> - <td class="city">Henderson</td> - <td class="state">NV</td> - <td class="cert">34194</td> - <td class="ai">Nevada State Bank</td> - <td class="closing">September 5, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="integrity.html">Integrity Bank</a></td> - <td class="city">Alpharetta</td> - <td class="state">GA</td> - <td class="cert">35469</td> - <td class="ai">Regions Bank</td> - <td class="closing">August 29, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="columbian.html">Columbian Bank &amp; Trust</a></td> - <td class="city">Topeka</td> - <td class="state">KS</td> - <td class="cert">22728</td> - <td class="ai">Citizens Bank &amp; Trust</td> - <td class="closing">August 22, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="firstprioritybank.html">First Priority Bank</a></td> - <td class="city">Bradenton</td> - <td class="state">FL</td> - <td class="cert">57523</td> - <td class="ai">SunTrust Bank</td> - <td class="closing">August 1, 2008</td> - <td class="updated">August 16, 2012</td> - </tr> - <tr> - <td class="institution"><a href="heritage.html">First Heritage Bank, NA</a></td> - <td class="city">Newport Beach</td> - <td class="state">CA</td> - <td class="cert">57961</td> - <td class="ai">Mutual of Omaha Bank</td> - <td class="closing">July 25, 2008</td> - <td class="updated">August 28, 2012</td> - </tr> - <tr> - <td class="institution"><a href="fnbnv.html">First National Bank of Nevada</a></td> - <td class="city">Reno</td> - <td class="state">NV</td> - <td class="cert">27011</td> - <td class="ai">Mutual of Omaha Bank</td> - <td class="closing">July 25, 2008</td> - <td class="updated">August 28, 2012</td> - </tr> - <tr> - <td class="institution"><a href="IndyMac.html">IndyMac Bank</a></td> - <td class="city">Pasadena</td> - <td class="state">CA</td> - <td class="cert">29730</td> - <td class="ai">OneWest Bank, FSB</td> - <td class="closing">July 11, 2008</td> - <td class="updated">August 28, 2012</td> - </tr> - <tr> - <td class="institution"><a href="first_integrity_bank.html">First Integrity Bank, NA</a></td> - <td class="city">Staples</td> - <td class="state">MN</td> - <td class="cert">12736</td> - <td class="ai">First International Bank and Trust</td> - <td class="closing">May 30, 2008</td> - <td class="updated">August 28, 2012</td> - </tr> - <tr> - <td class="institution"><a href="anb.html">ANB Financial, NA</a></td> - <td class="city">Bentonville</td> - <td class="state">AR</td> - <td class="cert">33901</td> - <td class="ai">Pulaski Bank and Trust Company</td> - <td class="closing">May 9, 2008</td> - <td class="updated">August 28, 2012</td> - </tr> - <tr> - <td class="institution"><a href="Hume.html">Hume Bank</a></td> - <td class="city">Hume</td> - <td class="state">MO</td> - <td class="cert">1971</td> - <td class="ai">Security Bank</td> - <td class="closing">March 7, 2008</td> - <td class="updated">August 28, 2012</td> - </tr> - <tr> - <td class="institution"><a href="Douglass.html">Douglass National Bank</a></td> - <td class="city">Kansas City</td> - <td class="state">MO</td> - <td class="cert">24660</td> - <td class="ai">Liberty Bank and Trust Company</td> - <td class="closing">January 25, 2008</td> - <td class="updated">October 26, 2012</td> - </tr> - <tr> - <td class="institution"><a href="MiamiValley.html">Miami Valley Bank</a></td> - <td class="city">Lakeview</td> - <td class="state">OH</td> - <td class="cert">16848</td> - <td class="ai">The Citizens Banking Company</td> - <td class="closing">October 4, 2007</td> - <td class="updated">August 28, 2012</td> - </tr> - <tr> - <td class="institution"><a href="NetBank.html">NetBank</a></td> - <td class="city">Alpharetta</td> - <td class="state">GA</td> - <td class="cert">32575</td> - <td class="ai">ING DIRECT</td> - <td class="closing">September 28, 2007</td> - <td class="updated">August 28, 2012</td> - </tr> - <tr> - <td class="institution"><a href="MetropolitanSB.html">Metropolitan Savings Bank</a></td> - <td class="city">Pittsburgh</td> - <td class="state">PA</td> - <td class="cert">35353</td> - <td class="ai">Allegheny Valley Bank of Pittsburgh</td> - <td class="closing">February 2, 2007</td> - <td class="updated">October 27, 2010</td> - </tr> - <tr> - <td class="institution"><a href="ephraim.html">Bank of Ephraim</a></td> - <td class="city">Ephraim</td> - <td class="state">UT</td> - <td class="cert">1249</td> - <td class="ai">Far West Bank</td> - <td class="closing">June 25, 2004</td> - <td class="updated">April 9, 2008</td> - </tr> - <tr> - <td class="institution"><a href="reliance.html">Reliance Bank</a></td> - <td class="city">White Plains</td> - <td class="state">NY</td> - <td class="cert">26778</td> - <td class="ai">Union State Bank</td> - <td class="closing">March 19, 2004</td> - <td class="updated">April 9, 2008</td> - </tr> - <tr> - <td class="institution"><a href="gnb.html">Guaranty National Bank of Tallahassee</a></td> - <td class="city">Tallahassee</td> - <td class="state">FL</td> - <td class="cert">26838</td> - <td class="ai">Hancock Bank of Florida</td> - <td class="closing">March 12, 2004</td> - <td class="updated">June 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="dollar.html">Dollar Savings Bank</a></td> - <td class="city">Newark</td> - <td class="state">NJ</td> - <td class="cert">31330</td> - <td class="ai">No Acquirer</td> - <td class="closing">February 14, 2004</td> - <td class="updated">April 9, 2008</td> - </tr> - <tr> - <td class="institution"><a href="pulaski.html">Pulaski Savings Bank</a></td> - <td class="city">Philadelphia</td> - <td class="state">PA</td> - <td class="cert">27203</td> - <td class="ai">Earthstar Bank</td> - <td class="closing">November 14, 2003</td> - <td class="updated">July 22, 2005</td> - </tr> - <tr> - <td class="institution"><a href="blanchardville.html">First National Bank of Blanchardville</a></td> - <td class="city">Blanchardville</td> - <td class="state">WI</td> - <td class="cert">11639</td> - <td class="ai">The Park Bank</td> - <td class="closing">May 9, 2003</td> - <td class="updated">June 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="spbank.html">Southern Pacific Bank</a></td> - <td class="city">Torrance</td> - <td class="state">CA</td> - <td class="cert">27094</td> - <td class="ai">Beal Bank</td> - <td class="closing">February 7, 2003</td> - <td class="updated">October 20, 2008</td> - </tr> - <tr> - <td class="institution"><a href="farmers.html">Farmers Bank of Cheneyville</a></td> - <td class="city">Cheneyville</td> - <td class="state">LA</td> - <td class="cert">16445</td> - <td class="ai">Sabine State Bank &amp; Trust</td> - <td class="closing">December 17, 2002</td> - <td class="updated">October 20, 2004</td> - </tr> - <tr> - <td class="institution"><a href="bankofalamo.html">Bank of Alamo</a></td> - <td class="city">Alamo</td> - <td class="state">TN</td> - <td class="cert">9961</td> - <td class="ai">No Acquirer</td> - <td class="closing">November 8, 2002</td> - <td class="updated">March 18, 2005</td> - </tr> - <tr> - <td class="institution"><a href="amtrade.html">AmTrade International Bank</a><br><a href="amtrade-spanish.html">En Espanol</a></td> - <td class="city">Atlanta</td> - <td class="state">GA</td> - <td class="cert">33784</td> - <td class="ai">No Acquirer</td> - <td class="closing">September 30, 2002</td> - <td class="updated">September 11, 2006</td> - </tr> - <tr> - <td class="institution"><a href="universal.html">Universal Federal Savings Bank</a></td> - <td class="city">Chicago</td> - <td class="state">IL</td> - <td class="cert">29355</td> - <td class="ai">Chicago Community Bank</td> - <td class="closing">June 27, 2002</td> - <td class="updated">April 9, 2008</td> - </tr> - <tr> - <td class="institution"><a href="cbc.html">Connecticut Bank of Commerce</a></td> - <td class="city">Stamford</td> - <td class="state">CT</td> - <td class="cert">19183</td> - <td class="ai">Hudson United Bank</td> - <td class="closing">June 26, 2002</td> - <td class="updated">February 14, 2012</td> - </tr> - <tr> - <td class="institution"><a href="newcentury.html">New Century Bank</a></td> - <td class="city">Shelby Township</td> - <td class="state">MI</td> - <td class="cert">34979</td> - <td class="ai">No Acquirer</td> - <td class="closing">March 28, 2002</td> - <td class="updated">March 18, 2005</td> - </tr> - <tr> - <td class="institution"><a href="netfirst.html">Net 1st National Bank</a></td> - <td class="city">Boca Raton</td> - <td class="state">FL</td> - <td class="cert">26652</td> - <td class="ai">Bank Leumi USA</td> - <td class="closing">March 1, 2002</td> - <td class="updated">April 9, 2008</td> - </tr> - <tr> - <td class="institution"><a href="nextbank.html">NextBank, NA</a></td> - <td class="city">Phoenix</td> - <td class="state">AZ</td> - <td class="cert">22314</td> - <td class="ai">No Acquirer</td> - <td class="closing">February 7, 2002</td> - <td class="updated">August 27, 2010</td> - </tr> - <tr> - <td class="institution"><a href="Oakwood.html">Oakwood Deposit Bank Co.</a></td> - <td class="city">Oakwood</td> - <td class="state">OH</td> - <td class="cert">8966</td> - <td class="ai">The State Bank &amp; Trust Company</td> - <td class="closing">February 1, 2002</td> - <td class="updated">October 25, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sierrablanca.html">Bank of Sierra Blanca</a></td> - <td class="city">Sierra Blanca</td> - <td class="state">TX</td> - <td class="cert">22002</td> - <td class="ai">The Security State Bank of Pecos</td> - <td class="closing">January 18, 2002</td> - <td class="updated">November 6, 2003</td> - </tr> - <tr> - <td class="institution"><a href="hamilton.html">Hamilton Bank, NA</a><br><a href="hamilton-spanish.html">En Espanol</a></td> - <td class="city">Miami</td> - <td class="state">FL</td> - <td class="cert">24382</td> - <td class="ai">Israel Discount Bank of New York</td> - <td class="closing">January 11, 2002</td> - <td class="updated">June 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="sinclair.html">Sinclair National Bank</a></td> - <td class="city">Gravette</td> - <td class="state">AR</td> - <td class="cert">34248</td> - <td class="ai">Delta Trust &amp; Bank</td> - <td class="closing">September 7, 2001</td> - <td class="updated">February 10, 2004</td> - </tr> - <tr> - <td class="institution"><a href="superior.html">Superior Bank, FSB</a></td> - <td class="city">Hinsdale</td> - <td class="state">IL</td> - <td class="cert">32646</td> - <td class="ai">Superior Federal, FSB</td> - <td class="closing">July 27, 2001</td> - <td class="updated">June 5, 2012</td> - </tr> - <tr> - <td class="institution"><a href="Malta.html">Malta National Bank</a></td> - <td class="city">Malta</td> - <td class="state">OH</td> - <td class="cert">6629</td> - <td class="ai">North Valley Bank</td> - <td class="closing">May 3, 2001</td> - <td class="updated">November 18, 2002</td> - </tr> - <tr> - <td class="institution"><a href="firstalliance.html">First Alliance Bank &amp; Trust Co.</a></td> - <td class="city">Manchester</td> - <td class="state">NH</td> - <td class="cert">34264</td> - <td class="ai">Southern New Hampshire Bank &amp; Trust</td> - <td class="closing">February 2, 2001</td> - <td class="updated">February 18, 2003</td> - </tr> - <tr> - <td class="institution"><a href="nsb.html">National State Bank of Metropolis</a></td> - <td class="city">Metropolis</td> - <td class="state">IL</td> - <td class="cert">3815</td> - <td class="ai">Banterra Bank of Marion</td> - <td class="closing">December 14, 2000</td> - <td class="updated">March 17, 2005</td> - </tr> - <tr> - <td class="institution"><a href="boh.html">Bank of Honolulu</a></td> - <td class="city">Honolulu</td> - <td class="state">HI</td> - <td class="cert">21029</td> - <td class="ai">Bank of the Orient</td> - <td class="closing">October 13, 2000</td> - <td class="updated">March 17, 2005</td> - </tr> - </tbody> - </table> - </div> - -</div> -<div id="page_foot"> - <div class="date">Last Updated 05/31/2013</div> - <div class="email"><a href="mailto:cservicefdicdal@fdic.gov">cservicefdicdal@fdic.gov</a></div> - <div class="clear"></div> -</div> - -<!-- START of Footer --> -<footer> -<link rel="stylesheet" type="text/css" href="/responsive/footer/css/footer.css" /> -<div id="responsive_footer"> - <div id="responsive_footer-full"> - <ul> - <li><a href="/" title="Home">Home</a></li> - <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li> - <li><a href="/search/" title="Search">Search</a></li> - <li><a href="/help/" title="Help">Help</a></li> - <li><a href="/sitemap/" title="SiteMap">SiteMap</a></li> - <li><a href="/regulations/laws/forms/" title="Forms">Forms</a></li> - <li><a href="/quicklinks/spanish.html" title="En Espa&ntilde;ol">En Espa&ntilde;ol</a></li> - </ul> - <hr> - <ul> - <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li> - <li><a href="/about/privacy/policy/" title="Privacy Policy">Privacy Policy</a></li> - <li><a href="/plainlanguage/" title="Privacy Policy">Plain Writing Act of 2010 </a></li> - <li><a href="http://www.usa.gov/" title="USA.gov">USA.gov</a></li> - <li><a href="http://www.fdicoig.gov/" title="FDIC Office of Inspector General">FDIC Office of Inspector General</a></li> - </ul> - <hr> - <ul> - <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li> - <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a></li> - <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li> - </ul> - </div> - <div id="responsive_footer-small"> - <ul> - <li><a href="/" title="Home">Home</a></li> - <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li> - <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li> - <li><a href="/search/" title="Search">Search</a></li> - </ul> - </div> -</div> -</footer> -<!-- START Omniture SiteCatalyst Code --> -<script language="JavaScript" type="text/javascript" src="/js/s_code_v1.js"></script> -<script type="text/javascript"> -/************* DO NOT ALTER ANYTHING BELOW THIS LINE ! **************/ -var s_code=s.t();if(s_code)document.write(s_code)</script> -<script type="text/javascript"> -if(navigator.appVersion.indexOf('MSIE')>=0)document.write(unescape('%3C')+'\!-'+'-') -</script> -<noscript> -<a href="http://www.omniture.com" title="Web Analytics"> -<img src="http://fdic.122.2o7.net/b/ss/fdicgovprod/1/H.21--NS/0?[AQB]%26cl=Session%26AQE" height="1" width="1" border="0" alt="" /></a></li> -</noscript> -<!--/DO NOT REMOVE/--> -<!-- END Omniture SiteCatalyst Code --> -<!-- END of Footer --> - -<script type="text/javascript" src="/responsive/js/jquery.tablesorter.js"></script> -<script type="text/javascript" src="banklist.js"></script> - -</body> -</html> +<!DOCTYPE html><!-- HTML5 --> +<html lang="en-US"> +<!-- Content language is American English. --> +<head> +<title>FDIC: Failed Bank List</title> +<!-- Meta Tags --> +<meta charset="UTF-8"> +<!-- Unicode character encoding --> +<meta http-equiv="X-UA-Compatible" content="IE=edge"> +<!-- Turns off IE Compatiblity Mode --> +<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"> +<!-- Makes it so phones don't auto zoom out. --> +<meta name="author" content="DRR"> +<meta http-equiv="keywords" name="keywords" content="banks, financial institutions, failed, failure, closing, deposits, depositors, banking services, assuming institution, acquiring institution, claims"> +<!-- CSS --> +<link rel="stylesheet" type="text/css" href="/responsive/css/responsive.css"> +<link rel="stylesheet" type="text/css" href="banklist.css"> +</head> +<body> + +<!-- START of Header --> +<script type="text/javascript" src="/responsive/header/js/header.js"></script> +<link rel="stylesheet" type="text/css" href="/responsive/header/css/header.css" /> +<!-- googleac.html includes Autocomplete functionality --> +<!-- Autocomplete files --> +<link rel="stylesheet" type="text/css" href="/responsive/header/css/jquery.autocomplete.css" /> +<script type="text/javascript" src="/responsive/js/jquery-1.4.1.min.js"></script> +<script type="text/javascript" src="/responsive/header/js/jquery.autocomplete-1.4.2.js"></script> +<script type="text/javascript"> +function findValue(li) { + if( li == null ) return alert("No match!"); + + // if coming from an AJAX call, let's use the Id as the value + if( !!li.extra ) var sValue = li.extra[0]; + + // otherwise, let's just display the value in the text box + else var sValue = li.selectValue; + + $('#googlesearch').submit(); + +} +function findValue2(li) { + if( li == null ) return alert("No match!"); + + // if coming from an AJAX call, let's use the Id as the value + if( !!li.extra ) var sValue = li.extra[0]; + + // otherwise, let's just display the value in the text box + else var sValue = li.selectValue; + + $('#googlesearch2').submit(); +} +function selectItem(li) { + findValue(li); +} +function selectItem2(li) { + findValue2(li); +} + +$().ready(function() { + + function log(event, data, formatted) { + $("<li>").html( !data ? "No match!" : "Selected: " + formatted).appendTo("#result"); + } + + function formatItem(row) { + return row[0] + " (<strong>id: " + row[1] + "</strong>)"; + } + function formatResult(row) { + return row[0].replace(/(<.+?>)/gi, ''); + } + + $("#newSearch").autocomplete("/searchjs.asp", { + width: 179, + autoFill: false, + //delay:10, + minChars:2, + cacheLength: 10, + onFindValue:findValue, + onItemSelect: selectItem, + selectFirst: false + + }); + + $("#search2").autocomplete("/searchjs.asp", { + width: 160, + autoFill: false, + //delay:10, + minChars:2, + cacheLength: 10, + onFindValue:findValue2, + onItemSelect: selectItem2, + selectFirst: false + + }); + +}); + +</script> +<!-- END CODE NEEDED TO MAKE THE SEARCH BOX WORK --> + +<!-- FORESEE Code --> +<script type="text/javascript" src="/foresee/foresee-trigger.js"></script> + +<a href="#after_header" class="responsive_header-skip_header">Skip Header</a> +<header> +<div id="responsive_header"> + <div id="responsive_header-right_side"> + <ul id="responsive_header-links"> + <li id="responsive_header-twitter" title="Visit FDIC on Twitter"><a tabindex="1" href="/social.html?site=http://twitter.com/FDICgov">Visit FDIC on Twitter</a></li> + <li id="responsive_header-facebook" title="Visit FDIC on Facebook"><a tabindex="1" href="/social.html?site=http://www.facebook.com/FDICgov">Visit FDIC on Facebook</a></li> + <li id="responsive_header-fdicchannel" title="Visit FDIC on YouTube"><a tabindex="1" href="/social.html?site=http://www.youtube.com/user/FDICchannel">Visit FDIC on YouTube</a></li> + <li id="responsive_header-rss" title="FDIC RSS Feed"><a tabindex="1" href="/rss.html">FDIC RSS Feed</a></li> + <li id="responsive_header-subscribe" title="Subscribe to FDIC alerts"><a tabindex="1" href="http://service.govdelivery.com/service/multi_subscribe.html?code=USFDIC">Subscribe to FDIC alerts</a></li> + </ul> + <div id="responsive_header-search"> + <a href="/search/advanced.html" class="search" title="Advanced Search">Advanced Search</a> + <form id="googlesearch" action="http://search.fdic.gov/search" method="get" name="Search box for FDIC.gov"> + <fieldset> + <div class="form"> + <label for="q">Search FDIC.gov</label> + <input tabindex="1" id="newSearch" name="q" class="field" type="text" style="outline: 0 none;" value="Search FDIC..." onblur="if(this.value == '') {this.value = 'Search FDIC...';}" onfocus="if(this.value == 'Search FDIC...') {this.value = '';}" /> + <input tabindex="1" id="searchsubmit" class="submit" alt="Search Icon" title="Search Icon" type="submit" value="" /> + <input value="date:D:L:d1" name="sort" type="hidden" /> + + <input value="xml_no_dtd" name="output" type="hidden" /> + <input value="UTF-8" name="ie" type="hidden" /> + <input value="UTF-8" name="oe" type="hidden" /> + <input value="wwwGOV" name="client" type="hidden" /> + <input value="wwwGOV" name="proxystylesheet" type="hidden" /> + <input value="default" name="site" type="hidden" /> + </div> + </fieldset> + </form> + </div> + </div> + <!-- close right side --> + <a id="responsive_header-fdic_logo" href="/" title="FDIC Homepage">FDIC Homepage</a> + <h1>Federal Deposit<br>Insurance Corporation</h1> + <h2>Each depositor insured to at least $250,000 per insured bank</h2> + <div class="clear"></div> + <nav> + <div id="responsive_header_nav"> + <div id="responsive_header-topnav"> + <div id="responsive_header-topnav-downarrow" onclick="show_rwdnav(this)"></div> + <ul id="responsive_header-topnav-list"> + <li id="responsive_header-topnav-home" title="Home" onmouseover="show_responsive_header_subnav(this)"><a href="/">Home</a></li> + <li id="responsive_header-topnav-deposit" title="Deposit Insurance" onmouseover="show_responsive_header_subnav(this)"><a href="/deposit/">Deposit Insurance</a></li> + <li id="responsive_header-topnav-consumers" title="Consumer Protection" onmouseover="show_responsive_header_subnav(this)"><a href="/consumers/">Consumer Protection</a></li> + <li id="responsive_header-topnav-bank" title="Industry Analysis" onmouseover="show_responsive_header_subnav(this)"><a href="/bank/">Industry Analysis</a></li> + <li id="responsive_header-topnav-regulations" title="Regulations &amp; Examinations" onmouseover="show_responsive_header_subnav(this)"><a href="/regulations/">Regulations &amp; Examinations</a></li> + <li id="responsive_header-topnav-buying" title="Asset Sales" onmouseover="show_responsive_header_subnav(this)"><a href="/buying/">Asset Sales</a></li> + <li id="responsive_header-topnav-news" title="News &amp; Events" onmouseover="show_responsive_header_subnav(this)"><a href="/news/">News &amp; Events</a></li> + <li id="responsive_header-topnav-about" title="About FDIC" onmouseover="show_responsive_header_subnav(this)"><a href="/about/">About FDIC</a></li> + </ul> + <div class="clear"></div> + </div> + <div id="responsive_header-topnav_subnav"> + <div id="responsive_header-topnav_subnav-downarrow" onclick="show_rwdnav(this)"></div> + <ul id="responsive_header-topnav-home_subnav"><li><a>&nbsp;</a></li></ul> + <ul id="responsive_header-topnav-deposit_subnav"> + <li title="BankFind"><a href="http://research.fdic.gov/bankfind/">BankFind</a></li> + <li title="Are My Deposits Insured?"><a href="/deposit/deposits/">Are My Deposits Insured?</a></li> + <li title="Uninsured Investments"><a href="/deposit/investments/">Uninsured Investments</a></li> + <li title="The Deposit Insurance Fund"><a href="/deposit/insurance/index.html">The Deposit Insurance Fund</a></li> + <li title="International Deposit Insurance"><a href="/deposit/deposits/international/">International Deposit Insurance</a></li> + </ul> + <ul id="responsive_header-topnav-consumers_subnav"> + <li title="Consumer News &amp; Information"><a href="/consumers/consumer/">Consumer News &amp; Information</a></li> + <li title="Loans &amp; Mortgages"><a href="/consumers/loans/">Loans &amp; Mortgages</a></li> + <li title="Banking &amp; Your Money"><a href="/consumers/banking/">Banking &amp; Your Money</a></li> + <li title="Financial Education &amp; Literacy"><a href="/consumers/education/">Financial Education &amp; Literacy</a></li> + <li title="Community Affairs"><a href="/consumers/community/">Community Affairs</a></li> + <li title="Identity Theft &amp; Fraud"><a href="/consumers/theft/">Identity Theft &amp; Fraud</a></li> + <li title="Consumer Financial Privacy"><a href="/consumers/privacy/">Consumer Financial Privacy</a></li> + </ul> + <ul id="responsive_header-topnav-bank_subnav"> + <li title="Bank Data &amp; Statistics"><a href="/bank/statistical/">Bank Data &amp; Statistics</a></li> + <li title="Research &amp; Analysis"><a href="/bank/analytical/">Research &amp; Analysis</a></li> + <li title="Failed Banks"><a href="/bank/individual/failed/">Failed Banks</a></li> + </ul> + <ul id="responsive_header-topnav-regulations_subnav"> + <li title="Bank Examinations"><a href="/regulations/examinations/">Bank Examinations</a></li> + <li title="Laws &amp; Regulations"><a href="/regulations/laws/">Laws &amp; Regulations</a></li> + <li title="Resources for Bank Officers &amp; Directors"><a href="/regulations/resources/">Resources for Bank Officers &amp; Directors</a></li> + <li title="FDICconnect"><a href="http://www.fdicconnect.gov/">FDIC<em>connect</em></a></li> + <li title="Required Financial Reports"><a href="/regulations/required/">Required Financial Reports</a></li> + <li title="Examiner Training Programs"><a href="/regulations/examiner/">Examiner Training Programs</a></li> + </ul> + <ul id="responsive_header-topnav-buying_subnav"> + <li title="Loan Sales"><a href="/buying/loan/">Loan Sales</a></li> + <li title="Real Estate Sales"><a href="/buying/owned/">Real Estate and Property Marketplace</a></li> + <li title="Financial Asset Sales"><a href="/buying/financial/">Financial Asset Sales</a></li> + <li title="Servicing Sales Announcements"><a href="/buying/servicing/">Servicing Sales Announcements</a></li> + <li title="Other Asset Sales"><a href="/buying/otherasset/">Other Asset Sales</a></li> + <li title="Historical Sales"><a href="/buying/historical/">Historical Sales</a></li> + </ul> + <ul id="responsive_header-topnav-news_subnav"> + <li title="Press Releases"><a href="/news/news/press/2013/">Press Releases</a></li> + <li title="Online Press Room"><a href="https://fdicsurvey.inquisiteasp.com/fdic/cgi-bin/qwebcorporate.dll?M58TRS">Online Press Room</a></li> + <li title="Conferences &amp; Events"><a href="/news/conferences/">Conferences &amp; Events</a></li> + <li title="Financial Institution Letters"><a href="/news/news/financial/2013/">Financial Institution Letters</a></li> + <li title="Special Alerts"><a href="/news/news/SpecialAlert/2012/">Special Alerts</a></li> + <li title="Letters to the Editor/Opinion Editorials"><a href="/news/letters/">Letters to the Editor/Opinion Editorials</a></li> + <li title="Speeches &amp; Testimony"><a href="/news/news/speeches/chairman/">Speeches &amp; Testimony</a></li> + </ul> + <ul id="responsive_header-topnav-about_subnav"> + <li title="Mission &amp; Purpose"><a href="/about/index.html#1">Mission &amp; Purpose</a></span></li> + <li title="Advisory Committees"><a href="/about/index.html#2">Advisory Committees</a></span></li> + <li title="Careers with the FDIC"><a href="/about/index.html#3">Careers with the FDIC</a></span></li> + <li title="Management Team"><a href="/about/index.html#4">Management Team</a></span></li> + <li title="Plans &amp; Reports"><a href="/about/index.html#5">Plans &amp; Reports</a></span></li> + <li title="What We Can Do for You"><a href="/about/index.html#6">What We Can Do for You</a></span></li> + <li title="Diversity at the FDIC"><a href="/about/index.html#7">Diversity at the FDIC</a></span></li> + </ul> + </div><!-- Close subnav --> + <div class="clear"></div> + </div> + </nav> +</div> +</header> +<a id="after_header" name="after_header"></a> +<script type="text/javascript"> +prepare_responsive_header_nav(); +</script> +<!-- END of Header --> + +<div id="breadcrumbs"><a href="/">Home</a> &gt; <a href="/bank/">Industry Analysis</a> &gt; <a href="/bank/individual/failed/">Failed Banks</a> &gt; Failed Bank List</div> + +<div id="content" class="failed_bank_list"> + + <h1 class="page_title">Failed Bank List</h1> + + <p>The FDIC is often appointed as receiver for failed banks. This page contains useful information for the customers and vendors of these banks. This includes information on the acquiring bank (if applicable), how your accounts and loans are affected, and how vendors can file claims against the receivership. <a href="http://www2.fdic.gov/drrip/cs/index.asp">Failed Financial Institution Contact Search</a> displays point of contact information related to failed banks.</p> + + <p>This list includes banks which have failed since October 1, 2000. To search for banks that failed prior to those on this page, visit this link: <a href="http://www2.fdic.gov/hsob/SelectRpt.asp?EntryTyp=30">Failures and Assistance Transactions</a></p> + + <p><a href="banklist.csv">Failed Bank List</a> - CSV file (Updated on Mondays. Also opens in Excel - <a href="/excel.html">Excel Help</a>)</p> + + <p class="small_screen_warning">Due to the small screen size some information is no longer visible.<br>Full information available when viewed on a larger screen.</p> + + <script type="text/javascript"> + <!-- + document.writeln("<p><em>Click arrows next to headers to sort in Ascending or Descending order.</em></p>"); + //--> + </script> + + <div id="table_wrapper"> + <table id="table" class="sortable"> + <thead> + <tr> + <th id="institution" scope="col">Bank Name</th> + <th id="city" class="nosort" scope="col">City</th> + <th id="state" scope="col">ST</th> + <th id="cert" class="nosort" scope="col">CERT</th> + <th id="ai" scope="col">Acquiring Institution</th> + <th id="closing" scope="col">Closing Date</th> + <th id="updated" scope="col">Updated Date</th> + </tr> + </thead> + <tbody> + <tr> + <td class="institution"><a href="kenosha.html">Banks of Wisconsin d/b/a Bank of Kenosha</a></td> + <td class="city">Kenosha</td> + <td class="state">WI</td> + <td class="cert">35386</td> + <td class="ai">North Shore Bank, FSB</td> + <td class="closing">May 31, 2013</td> + <td class="updated">May 31, 2013</td> + </tr> + <tr> + <td class="institution"><a href="centralaz.html">Central Arizona Bank</a></td> + <td class="city">Scottsdale</td> + <td class="state">AZ</td> + <td class="cert">34527</td> + <td class="ai">Western State Bank</td> + <td class="closing">May 14, 2013</td> + <td class="updated">May 20, 2013</td> + </tr> + <tr> + <td class="institution"><a href="sunrisebank.html">Sunrise Bank</a></td> + <td class="city">Valdosta</td> + <td class="state">GA</td> + <td class="cert">58185</td> + <td class="ai">Synovus Bank</td> + <td class="closing">May 10, 2013</td> + <td class="updated">May 21, 2013</td> + </tr> + <tr> + <td class="institution"><a href="pisgahcommbk.html">Pisgah Community Bank</a></td> + <td class="city">Asheville</td> + <td class="state">NC</td> + <td class="cert">58701</td> + <td class="ai">Capital Bank, N.A.</td> + <td class="closing">May 10, 2013</td> + <td class="updated">May 14, 2013</td> + </tr> + <tr> + <td class="institution"><a href="douglascb.html">Douglas County Bank</a></td> + <td class="city">Douglasville</td> + <td class="state">GA</td> + <td class="cert">21649</td> + <td class="ai">Hamilton State Bank</td> + <td class="closing">April 26, 2013</td> + <td class="updated">May 16, 2013</td> + </tr> + <tr> + <td class="institution"><a href="parkway.html">Parkway Bank</a></td> + <td class="city">Lenoir</td> + <td class="state">NC</td> + <td class="cert">57158</td> + <td class="ai">CertusBank, National Association</td> + <td class="closing">April 26, 2013</td> + <td class="updated">May 17, 2013</td> + </tr> + <tr> + <td class="institution"><a href="chipola.html">Chipola Community Bank</a></td> + <td class="city">Marianna</td> + <td class="state">FL</td> + <td class="cert">58034</td> + <td class="ai">First Federal Bank of Florida</td> + <td class="closing">April 19, 2013</td> + <td class="updated">May 16, 2013</td> + </tr> + <tr> + <td class="institution"><a href="heritagebank-fl.html">Heritage Bank of North Florida</a></td> + <td class="city">Orange Park</td> + <td class="state">FL</td> + <td class="cert">26680</td> + <td class="ai">FirstAtlantic Bank</td> + <td class="closing">April 19, 2013</td> + <td class="updated">May 16, 2013</td> + </tr> + <tr> + <td class="institution"><a href="firstfederal-ky.html">First Federal Bank</a></td> + <td class="city">Lexington</td> + <td class="state">KY</td> + <td class="cert">29594</td> + <td class="ai">Your Community Bank</td> + <td class="closing">April 19, 2013</td> + <td class="updated">April 23, 2013</td> + </tr> + <td class="institution"><a href="goldcanyon.html">Gold Canyon Bank</a></td> + <td class="city">Gold Canyon</td> + <td class="state">AZ</td> + <td class="cert">58066</td> + <td class="ai">First Scottsdale Bank, National Association</td> + <td class="closing">April 5, 2013</td> + <td class="updated">April 9, 2013</td> + </tr> + <tr> + <td class="institution"><a href="frontier-ga.html">Frontier Bank</a></td> + <td class="city">LaGrange</td> + <td class="state">GA</td> + <td class="cert">16431</td> + <td class="ai">HeritageBank of the South</td> + <td class="closing">March 8, 2013</td> + <td class="updated">March 26, 2013</td> + </tr> + <tr> + <td class="institution"><a href="covenant-il.html">Covenant Bank</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">22476</td> + <td class="ai">Liberty Bank and Trust Company</td> + <td class="closing">February 15, 2013</td> + <td class="updated">March 4, 2013</td> + </tr> + <tr> + <td class="institution"><a href="1stregents.html">1st Regents Bank</a></td> + <td class="city">Andover</td> + <td class="state">MN</td> + <td class="cert">57157</td> + <td class="ai">First Minnesota Bank</td> + <td class="closing">January 18, 2013</td> + <td class="updated">February 28, 2013</td> + </tr> + <tr> + <td class="institution"><a href="westside.html">Westside Community Bank</a></td> + <td class="city">University Place</td> + <td class="state">WA</td> + <td class="cert">33997</td> + <td class="ai">Sunwest Bank</td> + <td class="closing">January 11, 2013</td> + <td class="updated">January 24, 2013</td> + </tr> + <tr> + <td class="institution"><a href="cmbkozarks.html">Community Bank of the Ozarks</a></td> + <td class="city">Sunrise Beach</td> + <td class="state">MO</td> + <td class="cert">27331</td> + <td class="ai">Bank of Sullivan</td> + <td class="closing">December 14, 2012</td> + <td class="updated">January 24, 2013</td> + </tr> + <tr> + <td class="institution"><a href="hometown.html">Hometown Community Bank</a></td> + <td class="city">Braselton</td> + <td class="state">GA</td> + <td class="cert">57928</td> + <td class="ai">CertusBank, National Association</td> + <td class="closing">November 16, 2012</td> + <td class="updated">January 24, 2013</td> + </tr> + <tr> + <td class="institution"><a href="cfnb.html">Citizens First National Bank</a></td> + <td class="city">Princeton</td> + <td class="state">IL</td> + <td class="cert">3731</td> + <td class="ai">Heartland Bank and Trust Company</td> + <td class="closing">November 2, 2012</td> + <td class="updated">January 24, 2013</td> + </tr> + <tr> + <td class="institution"><a href="heritage_fl.html">Heritage Bank of Florida</a></td> + <td class="city">Lutz</td> + <td class="state">FL</td> + <td class="cert">35009</td> + <td class="ai">Centennial Bank</td> + <td class="closing">November 2, 2012</td> + <td class="updated">January 24, 2013</td> + </tr> + <tr> + <td class="institution"><a href="novabank.html">NOVA Bank</a></td> + <td class="city">Berwyn</td> + <td class="state">PA</td> + <td class="cert">27148</td> + <td class="ai">No Acquirer</td> + <td class="closing">October 26, 2012</td> + <td class="updated">January 24, 2013</td> + </tr> + <tr> + <td class="institution"><a href="excelbank.html">Excel Bank</a></td> + <td class="city">Sedalia</td> + <td class="state">MO</td> + <td class="cert">19189</td> + <td class="ai">Simmons First National Bank</td> + <td class="closing">October 19, 2012</td> + <td class="updated">January 24, 2013</td> + </tr> + <tr> + <td class="institution"><a href="firsteastside.html">First East Side Savings Bank</a></td> + <td class="city">Tamarac</td> + <td class="state">FL</td> + <td class="cert">28144</td> + <td class="ai">Stearns Bank N.A.</td> + <td class="closing">October 19, 2012</td> + <td class="updated">January 24, 2013</td> + </tr> + <tr> + <td class="institution"><a href="gulfsouth.html">GulfSouth Private Bank</a></td> + <td class="city">Destin</td> + <td class="state">FL</td> + <td class="cert">58073</td> + <td class="ai">SmartBank</td> + <td class="closing">October 19, 2012</td> + <td class="updated">January 24, 2013</td> + </tr> + <tr> + <td class="institution"><a href="firstunited.html">First United Bank</a></td> + <td class="city">Crete</td> + <td class="state">IL</td> + <td class="cert">20685</td> + <td class="ai">Old Plank Trail Community Bank, National Association</td> + <td class="closing">September 28, 2012</td> + <td class="updated">November 15, 2012</td> + </tr> + <tr> + <td class="institution"><a href="truman.html">Truman Bank</a></td> + <td class="city">St. Louis</td> + <td class="state">MO</td> + <td class="cert">27316</td> + <td class="ai">Simmons First National Bank</td> + <td class="closing">September 14, 2012</td> + <td class="updated">December 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstcommbk_mn.html">First Commercial Bank</a></td> + <td class="city">Bloomington</td> + <td class="state">MN</td> + <td class="cert">35246</td> + <td class="ai">Republic Bank &amp; Trust Company</td> + <td class="closing">September 7, 2012</td> + <td class="updated">December 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="waukegan.html">Waukegan Savings Bank</a></td> + <td class="city">Waukegan</td> + <td class="state">IL</td> + <td class="cert">28243</td> + <td class="ai">First Midwest Bank</td> + <td class="closing">August 3, 2012</td> + <td class="updated">October 11, 2012</td> + </tr> + <tr> + <td class="institution"><a href="jasper.html">Jasper Banking Company</a></td> + <td class="city">Jasper</td> + <td class="state">GA</td> + <td class="cert">16240</td> + <td class="ai">Stearns Bank N.A.</td> + <td class="closing">July 27, 2012</td> + <td class="updated">December 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="secondfederal.html">Second Federal Savings and Loan Association of Chicago</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">27986</td> + <td class="ai">Hinsdale Bank &amp; Trust Company</td> + <td class="closing">July 20, 2012</td> + <td class="updated">January 14, 2013</td> + </tr> + <tr> + <td class="institution"><a href="heartland.html">Heartland Bank</a></td> + <td class="city">Leawood</td> + <td class="state">KS</td> + <td class="cert">1361</td> + <td class="ai">Metcalf Bank</td> + <td class="closing">July 20, 2012</td> + <td class="updated">December 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cherokee.html">First Cherokee State Bank</a></td> + <td class="city">Woodstock</td> + <td class="state">GA</td> + <td class="cert">32711</td> + <td class="ai">Community &amp; Southern Bank</td> + <td class="closing">July 20, 2012</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="georgiatrust.html">Georgia Trust Bank</a></td> + <td class="city">Buford</td> + <td class="state">GA</td> + <td class="cert">57847</td> + <td class="ai">Community &amp; Southern Bank</td> + <td class="closing">July 20, 2012</td> + <td class="updated">December 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="royalpalm.html">The Royal Palm Bank of Florida</a></td> + <td class="city">Naples</td> + <td class="state">FL</td> + <td class="cert">57096</td> + <td class="ai">First National Bank of the Gulf Coast</td> + <td class="closing">July 20, 2012</td> + <td class="updated">January 7, 2013</td> + </tr> + <tr> + <td class="institution"><a href="glasgow.html">Glasgow Savings Bank</a></td> + <td class="city">Glasgow</td> + <td class="state">MO</td> + <td class="cert">1056</td> + <td class="ai">Regional Missouri Bank</td> + <td class="closing">July 13, 2012</td> + <td class="updated">October 11, 2012</td> + </tr> + <tr> + <td class="institution"><a href="montgomery.html">Montgomery Bank &amp; Trust</a></td> + <td class="city">Ailey</td> + <td class="state">GA</td> + <td class="cert">19498</td> + <td class="ai">Ameris Bank</td> + <td class="closing">July 6, 2012</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="farmersbank.html">The Farmers Bank of Lynchburg</a></td> + <td class="city">Lynchburg</td> + <td class="state">TN</td> + <td class="cert">1690</td> + <td class="ai">Clayton Bank and Trust</td> + <td class="closing">June 15, 2012</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="securityexchange.html">Security Exchange Bank</a></td> + <td class="city">Marietta</td> + <td class="state">GA</td> + <td class="cert">35299</td> + <td class="ai">Fidelity Bank</td> + <td class="closing">June 15, 2012</td> + <td class="updated">October 10, 2012</td> + </tr> + <tr> + <td class="institution"><a href="putnam.html">Putnam State Bank</a></td> + <td class="city">Palatka</td> + <td class="state">FL</td> + <td class="cert">27405</td> + <td class="ai">Harbor Community Bank</td> + <td class="closing">June 15, 2012</td> + <td class="updated">October 10, 2012</td> + </tr> + <tr> + <td class="institution"><a href="waccamaw.html">Waccamaw Bank</a></td> + <td class="city">Whiteville</td> + <td class="state">NC</td> + <td class="cert">34515</td> + <td class="ai">First Community Bank</td> + <td class="closing">June 8, 2012</td> + <td class="updated">November 8, 2012</td> + </tr> + <tr> + <td class="institution"><a href="ftsb.html">Farmers' and Traders' State Bank</a></td> + <td class="city">Shabbona</td> + <td class="state">IL</td> + <td class="cert">9257</td> + <td class="ai">First State Bank</td> + <td class="closing">June 8, 2012</td> + <td class="updated">October 10, 2012</td> + </tr> + <tr> + <td class="institution"><a href="carolina.html">Carolina Federal Savings Bank</a></td> + <td class="city">Charleston</td> + <td class="state">SC</td> + <td class="cert">35372</td> + <td class="ai">Bank of North Carolina</td> + <td class="closing">June 8, 2012</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstcapital.html">First Capital Bank</a></td> + <td class="city">Kingfisher</td> + <td class="state">OK</td> + <td class="cert">416</td> + <td class="ai">F &amp; M Bank</td> + <td class="closing">June 8, 2012</td> + <td class="updated">October 10, 2012</td> + </tr> + <tr> + <td class="institution"><a href="alabamatrust.html">Alabama Trust Bank, National Association</a></td> + <td class="city">Sylacauga</td> + <td class="state">AL</td> + <td class="cert">35224</td> + <td class="ai">Southern States Bank</td> + <td class="closing">May 18, 2012</td> + <td class="updated">May 20, 2013</td> + </tr> + <tr> + <td class="institution"><a href="securitybank.html">Security Bank, National Association</a></td> + <td class="city">North Lauderdale</td> + <td class="state">FL</td> + <td class="cert">23156</td> + <td class="ai">Banesco USA</td> + <td class="closing">May 4, 2012</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="palmdesert.html">Palm Desert National Bank</a></td> + <td class="city">Palm Desert</td> + <td class="state">CA</td> + <td class="cert">23632</td> + <td class="ai">Pacific Premier Bank</td> + <td class="closing">April 27, 2012</td> + <td class="updated">May 17, 2013</td> + </tr> + <tr> + <td class="institution"><a href="plantation.html">Plantation Federal Bank</a></td> + <td class="city">Pawleys Island</td> + <td class="state">SC</td> + <td class="cert">32503</td> + <td class="ai">First Federal Bank</td> + <td class="closing">April 27, 2012</td> + <td class="updated">May 17, 2013</td> + </tr> + <tr> + <td class="institution"><a href="interbank.html">Inter Savings Bank, fsb D/B/A InterBank, fsb</a></td> + <td class="city">Maple Grove</td> + <td class="state">MN</td> + <td class="cert">31495</td> + <td class="ai">Great Southern Bank</td> + <td class="closing">April 27, 2012</td> + <td class="updated">May 17, 2013</td> + </tr> + <tr> + <td class="institution"><a href="harvest.html">HarVest Bank of Maryland</a></td> + <td class="city">Gaithersburg</td> + <td class="state">MD</td> + <td class="cert">57766</td> + <td class="ai">Sonabank</td> + <td class="closing">April 27, 2012</td> + <td class="updated">May 17, 2013</td> + </tr> + <tr> + <td class="institution"><a href="easternshore.html">Bank of the Eastern Shore</a></td> + <td class="city">Cambridge</td> + <td class="state">MD</td> + <td class="cert">26759</td> + <td class="ai">No Acquirer</td> + <td class="closing">April 27, 2012</td> + <td class="updated">October 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fortlee.html">Fort Lee Federal Savings Bank, FSB</a></td> + <td class="city">Fort Lee</td> + <td class="state">NJ</td> + <td class="cert">35527</td> + <td class="ai">Alma Bank</td> + <td class="closing">April 20, 2012</td> + <td class="updated">May 17, 2013</td> + </tr> + <tr> + <td class="institution"><a href="fidelity.html">Fidelity Bank</a></td> + <td class="city">Dearborn</td> + <td class="state">MI</td> + <td class="cert">33883</td> + <td class="ai">The Huntington National Bank</td> + <td class="closing">March 30, 2012</td> + <td class="updated">May 16, 2013</td> + </tr> + <tr> + <td class="institution"><a href="premier-il.html">Premier Bank</a></td> + <td class="city">Wilmette</td> + <td class="state">IL</td> + <td class="cert">35419</td> + <td class="ai">International Bank of Chicago</td> + <td class="closing">March 23, 2012</td> + <td class="updated">October 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="covenant.html">Covenant Bank &amp; Trust</a></td> + <td class="city">Rock Spring</td> + <td class="state">GA</td> + <td class="cert">58068</td> + <td class="ai">Stearns Bank, N.A.</td> + <td class="closing">March 23, 2012</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="newcity.html">New City Bank</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">57597</td> + <td class="ai">No Acquirer</td> + <td class="closing">March 9, 2012</td> + <td class="updated">October 29, 2012</td> + </tr> + <tr> + <td class="institution"><a href="global.html">Global Commerce Bank</a></td> + <td class="city">Doraville</td> + <td class="state">GA</td> + <td class="cert">34046</td> + <td class="ai">Metro City Bank</td> + <td class="closing">March 2, 2012</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="homesvgs.html">Home Savings of America</a></td> + <td class="city">Little Falls</td> + <td class="state">MN</td> + <td class="cert">29178</td> + <td class="ai">No Acquirer</td> + <td class="closing">February 24, 2012</td> + <td class="updated">December 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cbg.html">Central Bank of Georgia</a></td> + <td class="city">Ellaville</td> + <td class="state">GA</td> + <td class="cert">5687</td> + <td class="ai">Ameris Bank</td> + <td class="closing">February 24, 2012</td> + <td class="updated">August 9, 2012</td> + </tr> + <tr> + <td class="institution"><a href="scbbank.html">SCB Bank</a></td> + <td class="city">Shelbyville</td> + <td class="state">IN</td> + <td class="cert">29761</td> + <td class="ai">First Merchants Bank, National Association</td> + <td class="closing">February 10, 2012</td> + <td class="updated">March 25, 2013</td> + </tr> + <tr> + <td class="institution"><a href="cnbt.html">Charter National Bank and Trust</a></td> + <td class="city">Hoffman Estates</td> + <td class="state">IL</td> + <td class="cert">23187</td> + <td class="ai">Barrington Bank &amp; Trust Company, National Association</td> + <td class="closing">February 10, 2012</td> + <td class="updated">March 25, 2013</td> + </tr> + <tr> + <td class="institution"><a href="bankeast.html">BankEast</a></td> + <td class="city">Knoxville</td> + <td class="state">TN</td> + <td class="cert">19869</td> + <td class="ai">U.S.Bank National Association</td> + <td class="closing">January 27, 2012</td> + <td class="updated">March 8, 2013</td> + </tr> + <tr> + <td class="institution"><a href="patriot-mn.html">Patriot Bank Minnesota</a></td> + <td class="city">Forest Lake</td> + <td class="state">MN</td> + <td class="cert">34823</td> + <td class="ai">First Resource Bank</td> + <td class="closing">January 27, 2012</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="tcb.html">Tennessee Commerce Bank</a></td> + <td class="city">Franklin</td> + <td class="state">TN</td> + <td class="cert">35296</td> + <td class="ai">Republic Bank &amp; Trust Company</td> + <td class="closing">January 27, 2012</td> + <td class="updated">November 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fgbtcj.html">First Guaranty Bank and Trust Company of Jacksonville</a></td> + <td class="city">Jacksonville</td> + <td class="state">FL</td> + <td class="cert">16579</td> + <td class="ai">CenterState Bank of Florida, N.A.</td> + <td class="closing">January 27, 2012</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="americaneagle.html">American Eagle Savings Bank</a></td> + <td class="city">Boothwyn</td> + <td class="state">PA</td> + <td class="cert">31581</td> + <td class="ai">Capital Bank, N.A.</td> + <td class="closing">January 20, 2012</td> + <td class="updated">January 25, 2013</td> + </tr> + <tr> + <td class="institution"><a href="firststatebank-ga.html">The First State Bank</a></td> + <td class="city">Stockbridge</td> + <td class="state">GA</td> + <td class="cert">19252</td> + <td class="ai">Hamilton State Bank</td> + <td class="closing">January 20, 2012</td> + <td class="updated">January 25, 2013</td> + </tr> + <tr> + <td class="institution"><a href="cfsb.html">Central Florida State Bank</a></td> + <td class="city">Belleview</td> + <td class="state">FL</td> + <td class="cert">57186</td> + <td class="ai">CenterState Bank of Florida, N.A.</td> + <td class="closing">January 20, 2012</td> + <td class="updated">January 25, 2013</td> + </tr> + <tr> + <td class="institution"><a href="westernnatl.html">Western National Bank</a></td> + <td class="city">Phoenix</td> + <td class="state">AZ</td> + <td class="cert">57917</td> + <td class="ai">Washington Federal</td> + <td class="closing">December 16, 2011</td> + <td class="updated">August 13, 2012</td> + </tr> + <tr> + <td class="institution"><a href="premier-fl.html">Premier Community Bank of the Emerald Coast</a></td> + <td class="city">Crestview</td> + <td class="state">FL</td> + <td class="cert">58343</td> + <td class="ai">Summit Bank</td> + <td class="closing">December 16, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="centralprog.html">Central Progressive Bank</a></td> + <td class="city">Lacombe</td> + <td class="state">LA</td> + <td class="cert">19657</td> + <td class="ai">First NBC Bank</td> + <td class="closing">November 18, 2011</td> + <td class="updated">August 13, 2012</td> + </tr> + <tr> + <td class="institution"><a href="polkcounty.html">Polk County Bank</a></td> + <td class="city">Johnston</td> + <td class="state">IA</td> + <td class="cert">14194</td> + <td class="ai">Grinnell State Bank</td> + <td class="closing">November 18, 2011</td> + <td class="updated">August 15, 2012</td> + </tr> + <tr> + <td class="institution"><a href="rockmart.html">Community Bank of Rockmart</a></td> + <td class="city">Rockmart</td> + <td class="state">GA</td> + <td class="cert">57860</td> + <td class="ai">Century Bank of Georgia</td> + <td class="closing">November 10, 2011</td> + <td class="updated">August 13, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sunfirst.html">SunFirst Bank</a></td> + <td class="city">Saint George</td> + <td class="state">UT</td> + <td class="cert">57087</td> + <td class="ai">Cache Valley Bank</td> + <td class="closing">November 4, 2011</td> + <td class="updated">November 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="midcity.html">Mid City Bank, Inc.</a></td> + <td class="city">Omaha</td> + <td class="state">NE</td> + <td class="cert">19397</td> + <td class="ai">Premier Bank</td> + <td class="closing">November 4, 2011</td> + <td class="updated">August 15, 2012</td> + </tr> + <tr> + <td class="institution"><a href="allamerican.html ">All American Bank</a></td> + <td class="city">Des Plaines</td> + <td class="state">IL</td> + <td class="cert">57759</td> + <td class="ai">International Bank of Chicago</td> + <td class="closing">October 28, 2011</td> + <td class="updated">August 15, 2012</td> + </tr> + <tr> + <td class="institution"><a href="commbanksco.html">Community Banks of Colorado</a></td> + <td class="city">Greenwood Village</td> + <td class="state">CO</td> + <td class="cert">21132</td> + <td class="ai">Bank Midwest, N.A.</td> + <td class="closing">October 21, 2011</td> + <td class="updated">January 2, 2013</td> + </tr> + <tr> + <td class="institution"><a href="commcapbk.html">Community Capital Bank</a></td> + <td class="city">Jonesboro</td> + <td class="state">GA</td> + <td class="cert">57036</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">October 21, 2011</td> + <td class="updated">November 8, 2012</td> + </tr> + <tr> + <td class="institution"><a href="decatur.html">Decatur First Bank</a></td> + <td class="city">Decatur</td> + <td class="state">GA</td> + <td class="cert">34392</td> + <td class="ai">Fidelity Bank</td> + <td class="closing">October 21, 2011</td> + <td class="updated">November 8, 2012</td> + </tr> + <tr> + <td class="institution"><a href="oldharbor.html">Old Harbor Bank</a></td> + <td class="city">Clearwater</td> + <td class="state">FL</td> + <td class="cert">57537</td> + <td class="ai">1st United Bank</td> + <td class="closing">October 21, 2011</td> + <td class="updated">November 8, 2012</td> + </tr> + <tr> + <td class="institution"><a href="countrybank.html">Country Bank</a></td> + <td class="city">Aledo</td> + <td class="state">IL</td> + <td class="cert">35395</td> + <td class="ai">Blackhawk Bank &amp; Trust</td> + <td class="closing">October 14, 2011</td> + <td class="updated">August 15, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firststatebank-nj.html">First State Bank</a></td> + <td class="city">Cranford</td> + <td class="state">NJ</td> + <td class="cert">58046</td> + <td class="ai">Northfield Bank</td> + <td class="closing">October 14, 2011</td> + <td class="updated">November 8, 2012</td> + </tr> + <tr> + <td class="institution"><a href="blueridge.html">Blue Ridge Savings Bank, Inc.</a></td> + <td class="city">Asheville</td> + <td class="state">NC</td> + <td class="cert">32347</td> + <td class="ai">Bank of North Carolina</td> + <td class="closing">October 14, 2011</td> + <td class="updated">November 8, 2012</td> + </tr> + <tr> + <td class="institution"><a href="piedmont-ga.html">Piedmont Community Bank</a></td> + <td class="city">Gray</td> + <td class="state">GA</td> + <td class="cert">57256</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">October 14, 2011</td> + <td class="updated">January 22, 2013</td> + </tr> + <tr> + <td class="institution"><a href="sunsecurity.html">Sun Security Bank</a></td> + <td class="city">Ellington</td> + <td class="state">MO</td> + <td class="cert">20115</td> + <td class="ai">Great Southern Bank</td> + <td class="closing">October 7, 2011</td> + <td class="updated">November 7, 2012</td> + </tr> + <tr> + <td class="institution"><a href="riverbank.html">The RiverBank</a></td> + <td class="city">Wyoming</td> + <td class="state">MN</td> + <td class="cert">10216</td> + <td class="ai">Central Bank</td> + <td class="closing">October 7, 2011</td> + <td class="updated">November 7, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstintlbank.html">First International Bank</a></td> + <td class="city">Plano</td> + <td class="state">TX</td> + <td class="cert">33513</td> + <td class="ai">American First National Bank</td> + <td class="closing">September 30, 2011</td> + <td class="updated">October 9, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cbnc.html">Citizens Bank of Northern California</a></td> + <td class="city">Nevada City</td> + <td class="state">CA</td> + <td class="cert">33983</td> + <td class="ai">Tri Counties Bank</td> + <td class="closing">September 23, 2011</td> + <td class="updated">October 9, 2012</td> + </tr> + <tr> + <td class="institution"><a href="boc-va.html">Bank of the Commonwealth</a></td> + <td class="city">Norfolk</td> + <td class="state">VA</td> + <td class="cert">20408</td> + <td class="ai">Southern Bank and Trust Company</td> + <td class="closing">September 23, 2011</td> + <td class="updated">October 9, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fnbf.html">The First National Bank of Florida</a></td> + <td class="city">Milton</td> + <td class="state">FL</td> + <td class="cert">25155</td> + <td class="ai">CharterBank</td> + <td class="closing">September 9, 2011</td> + <td class="updated">September 6, 2012</td> + </tr> + <tr> + <td class="institution"><a href="creekside.html">CreekSide Bank</a></td> + <td class="city">Woodstock</td> + <td class="state">GA</td> + <td class="cert">58226</td> + <td class="ai">Georgia Commerce Bank</td> + <td class="closing">September 2, 2011</td> + <td class="updated">September 6, 2012</td> + </tr> + <tr> + <td class="institution"><a href="patriot.html">Patriot Bank of Georgia</a></td> + <td class="city">Cumming</td> + <td class="state">GA</td> + <td class="cert">58273</td> + <td class="ai">Georgia Commerce Bank</td> + <td class="closing">September 2, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstchoice-il.html">First Choice Bank</a></td> + <td class="city">Geneva</td> + <td class="state">IL</td> + <td class="cert">57212</td> + <td class="ai">Inland Bank &amp; Trust</td> + <td class="closing">August 19, 2011</td> + <td class="updated">August 15, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstsouthern-ga.html">First Southern National Bank</a></td> + <td class="city">Statesboro</td> + <td class="state">GA</td> + <td class="cert">57239</td> + <td class="ai">Heritage Bank of the South</td> + <td class="closing">August 19, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="lydian.html">Lydian Private Bank</a></td> + <td class="city">Palm Beach</td> + <td class="state">FL</td> + <td class="cert">35356</td> + <td class="ai">Sabadell United Bank, N.A.</td> + <td class="closing">August 19, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="publicsvgs.html">Public Savings Bank</a></td> + <td class="city">Huntingdon Valley</td> + <td class="state">PA</td> + <td class="cert">34130</td> + <td class="ai">Capital Bank, N.A.</td> + <td class="closing">August 18, 2011</td> + <td class="updated">August 15, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fnbo.html">The First National Bank of Olathe</a></td> + <td class="city">Olathe</td> + <td class="state">KS</td> + <td class="cert">4744</td> + <td class="ai">Enterprise Bank &amp; Trust</td> + <td class="closing">August 12, 2011</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="whitman.html">Bank of Whitman</a></td> + <td class="city">Colfax</td> + <td class="state">WA</td> + <td class="cert">22528</td> + <td class="ai">Columbia State Bank</td> + <td class="closing">August 5, 2011</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="shorewood.html">Bank of Shorewood</a></td> + <td class="city">Shorewood</td> + <td class="state">IL</td> + <td class="cert">22637</td> + <td class="ai">Heartland Bank and Trust Company</td> + <td class="closing">August 5, 2011</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="integra.html">Integra Bank National Association</a></td> + <td class="city">Evansville</td> + <td class="state">IN</td> + <td class="cert">4392</td> + <td class="ai">Old National Bank</td> + <td class="closing">July 29, 2011</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankmeridian.html">BankMeridian, N.A.</a></td> + <td class="city">Columbia</td> + <td class="state">SC</td> + <td class="cert">58222</td> + <td class="ai">SCBT National Association</td> + <td class="closing">July 29, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="vbb.html">Virginia Business Bank</a></td> + <td class="city">Richmond</td> + <td class="state">VA</td> + <td class="cert">58283</td> + <td class="ai">Xenith Bank</td> + <td class="closing">July 29, 2011</td> + <td class="updated">October 9, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankofchoice.html">Bank of Choice</a></td> + <td class="city">Greeley</td> + <td class="state">CO</td> + <td class="cert">2994</td> + <td class="ai">Bank Midwest, N.A.</td> + <td class="closing">July 22, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="landmark.html">LandMark Bank of Florida</a></td> + <td class="city">Sarasota</td> + <td class="state">FL</td> + <td class="cert">35244</td> + <td class="ai">American Momentum Bank</td> + <td class="closing">July 22, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="southshore.html">Southshore Community Bank</a></td> + <td class="city">Apollo Beach</td> + <td class="state">FL</td> + <td class="cert">58056</td> + <td class="ai">American Momentum Bank</td> + <td class="closing">July 22, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="summitbank.html">Summit Bank</a></td> + <td class="city">Prescott</td> + <td class="state">AZ</td> + <td class="cert">57442</td> + <td class="ai">The Foothills Bank</td> + <td class="closing">July 15, 2011</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstpeoples.html">First Peoples Bank</a></td> + <td class="city">Port St. Lucie</td> + <td class="state">FL</td> + <td class="cert">34870</td> + <td class="ai">Premier American Bank, N.A.</td> + <td class="closing">July 15, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="hightrust.html">High Trust Bank</a></td> + <td class="city">Stockbridge</td> + <td class="state">GA</td> + <td class="cert">19554</td> + <td class="ai">Ameris Bank</td> + <td class="closing">July 15, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="onegeorgia.html">One Georgia Bank</a></td> + <td class="city">Atlanta</td> + <td class="state">GA</td> + <td class="cert">58238</td> + <td class="ai">Ameris Bank</td> + <td class="closing">July 15, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="signaturebank.html">Signature Bank</a></td> + <td class="city">Windsor</td> + <td class="state">CO</td> + <td class="cert">57835</td> + <td class="ai">Points West Community Bank</td> + <td class="closing">July 8, 2011</td> + <td class="updated">October 26, 2012</td> + </tr> + <tr> + <td class="institution"><a href="coloradocapital.html">Colorado Capital Bank</a></td> + <td class="city">Castle Rock</td> + <td class="state">CO</td> + <td class="cert">34522</td> + <td class="ai">First-Citizens Bank &amp; Trust Company</td> + <td class="closing">July 8, 2011</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="firstchicago.html">First Chicago Bank &amp; Trust</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">27935</td> + <td class="ai">Northbrook Bank &amp; Trust Company</td> + <td class="closing">July 8, 2011</td> + <td class="updated">September 9, 2012</td> + </tr> + <tr> + <td class="institution"><a href="mountain.html">Mountain Heritage Bank</a></td> + <td class="city">Clayton</td> + <td class="state">GA</td> + <td class="cert">57593</td> + <td class="ai">First American Bank and Trust Company</td> + <td class="closing">June 24, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fcbtb.html">First Commercial Bank of Tampa Bay</a></td> + <td class="city">Tampa</td> + <td class="state">FL</td> + <td class="cert">27583</td> + <td class="ai">Stonegate Bank</td> + <td class="closing">June 17, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="mcintoshstate.html">McIntosh State Bank</a></td> + <td class="city">Jackson</td> + <td class="state">GA</td> + <td class="cert">19237</td> + <td class="ai">Hamilton State Bank</td> + <td class="closing">June 17, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="atlanticbanktrust.html">Atlantic Bank and Trust</a></td> + <td class="city">Charleston</td> + <td class="state">SC</td> + <td class="cert">58420</td> + <td class="ai">First Citizens Bank and Trust Company, Inc.</td> + <td class="closing">June 3, 2011</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstheritage.html">First Heritage Bank</a></td> + <td class="city">Snohomish</td> + <td class="state">WA</td> + <td class="cert">23626</td> + <td class="ai">Columbia State Bank</td> + <td class="closing">May 27, 2011</td> + <td class="updated">January 28, 2013</td> + </tr> + <tr> + <td class="institution"><a href="summit.html">Summit Bank</a></td> + <td class="city">Burlington</td> + <td class="state">WA</td> + <td class="cert">513</td> + <td class="ai">Columbia State Bank</td> + <td class="closing">May 20, 2011</td> + <td class="updated">January 22, 2013</td> + </tr> + <tr> + <td class="institution"><a href="fgbc.html">First Georgia Banking Company</a></td> + <td class="city">Franklin</td> + <td class="state">GA</td> + <td class="cert">57647</td> + <td class="ai">CertusBank, National Association</td> + <td class="closing">May 20, 2011</td> + <td class="updated">November 13, 2012</td> + </tr> + <tr> + <td class="institution"><a href="atlanticsthrn.html">Atlantic Southern Bank</a></td> + <td class="city">Macon</td> + <td class="state">GA</td> + <td class="cert">57213</td> + <td class="ai">CertusBank, National Association</td> + <td class="closing">May 20, 2011</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="coastal_fl.html">Coastal Bank</a></td> + <td class="city">Cocoa Beach</td> + <td class="state">FL</td> + <td class="cert">34898</td> + <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td> + <td class="closing">May 6, 2011</td> + <td class="updated">November 30, 2012</td> + </tr> + <tr> + <td class="institution"><a href="communitycentral.html">Community Central Bank</a></td> + <td class="city">Mount Clemens</td> + <td class="state">MI</td> + <td class="cert">34234</td> + <td class="ai">Talmer Bank &amp; Trust</td> + <td class="closing">April 29, 2011</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="parkavenue_ga.html">The Park Avenue Bank</a></td> + <td class="city">Valdosta</td> + <td class="state">GA</td> + <td class="cert">19797</td> + <td class="ai">Bank of the Ozarks</td> + <td class="closing">April 29, 2011</td> + <td class="updated">November 30, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstchoice.html">First Choice Community Bank</a></td> + <td class="city">Dallas</td> + <td class="state">GA</td> + <td class="cert">58539</td> + <td class="ai">Bank of the Ozarks</td> + <td class="closing">April 29, 2011</td> + <td class="updated">January 22, 2013</td> + </tr> + <tr> + <td class="institution"><a href="cortez.html">Cortez Community Bank</a></td> + <td class="city">Brooksville</td> + <td class="state">FL</td> + <td class="cert">57625</td> + <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td> + <td class="closing">April 29, 2011</td> + <td class="updated">November 30, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fnbcf.html">First National Bank of Central Florida</a></td> + <td class="city">Winter Park</td> + <td class="state">FL</td> + <td class="cert">26297</td> + <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td> + <td class="closing">April 29, 2011</td> + <td class="updated">November 30, 2012</td> + </tr> + <tr> + <td class="institution"><a href="heritage_ms.html">Heritage Banking Group</a></td> + <td class="city">Carthage</td> + <td class="state">MS</td> + <td class="cert">14273</td> + <td class="ai">Trustmark National Bank</td> + <td class="closing">April 15, 2011</td> + <td class="updated">November 30, 2012</td> + </tr> + <tr> + <td class="institution"><a href="rosemount.html">Rosemount National Bank</a></td> + <td class="city">Rosemount</td> + <td class="state">MN</td> + <td class="cert">24099</td> + <td class="ai">Central Bank</td> + <td class="closing">April 15, 2011</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="superior_al.html">Superior Bank</a></td> + <td class="city">Birmingham</td> + <td class="state">AL</td> + <td class="cert">17750</td> + <td class="ai">Superior Bank, National Association</td> + <td class="closing">April 15, 2011</td> + <td class="updated">November 30, 2012</td> + </tr> + <tr> + <td class="institution"><a href="nexity.html">Nexity Bank</a></td> + <td class="city">Birmingham</td> + <td class="state">AL</td> + <td class="cert">19794</td> + <td class="ai">AloStar Bank of Commerce</td> + <td class="closing">April 15, 2011</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="newhorizons.html">New Horizons Bank</a></td> + <td class="city">East Ellijay</td> + <td class="state">GA</td> + <td class="cert">57705</td> + <td class="ai">Citizens South Bank</td> + <td class="closing">April 15, 2011</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bartow.html">Bartow County Bank</a></td> + <td class="city">Cartersville</td> + <td class="state">GA</td> + <td class="cert">21495</td> + <td class="ai">Hamilton State Bank</td> + <td class="closing">April 15, 2011</td> + <td class="updated">January 22, 2013</td> + </tr> + <tr> + <td class="institution"><a href="nevadacommerce.html">Nevada Commerce Bank</a></td> + <td class="city">Las Vegas</td> + <td class="state">NV</td> + <td class="cert">35418</td> + <td class="ai">City National Bank</td> + <td class="closing">April 8, 2011</td> + <td class="updated">September 9, 2012</td> + </tr> + <tr> + <td class="institution"><a href="westernsprings.html">Western Springs National Bank and Trust</a></td> + <td class="city">Western Springs</td> + <td class="state">IL</td> + <td class="cert">10086</td> + <td class="ai">Heartland Bank and Trust Company</td> + <td class="closing">April 8, 2011</td> + <td class="updated">January 22, 2013</td> + </tr> + <tr> + <td class="institution"><a href="bankofcommerce.html">The Bank of Commerce</a></td> + <td class="city">Wood Dale</td> + <td class="state">IL</td> + <td class="cert">34292</td> + <td class="ai">Advantage National Bank Group</td> + <td class="closing">March 25, 2011</td> + <td class="updated">January 22, 2013</td> + </tr> + <tr> + <td class="institution"><a href="legacy-wi.html">Legacy Bank</a></td> + <td class="city">Milwaukee</td> + <td class="state">WI</td> + <td class="cert">34818</td> + <td class="ai">Seaway Bank and Trust Company</td> + <td class="closing">March 11, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstnatldavis.html">First National Bank of Davis</a></td> + <td class="city">Davis</td> + <td class="state">OK</td> + <td class="cert">4077</td> + <td class="ai">The Pauls Valley National Bank</td> + <td class="closing">March 11, 2011</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="valleycomm.html">Valley Community Bank</a></td> + <td class="city">St. Charles</td> + <td class="state">IL</td> + <td class="cert">34187</td> + <td class="ai">First State Bank</td> + <td class="closing">February 25, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sanluistrust.html">San Luis Trust Bank, FSB</a></td> + <td class="city">San Luis Obispo</td> + <td class="state">CA</td> + <td class="cert">34783</td> + <td class="ai">First California Bank</td> + <td class="closing">February 18, 2011</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="charteroak.html">Charter Oak Bank</a></td> + <td class="city">Napa</td> + <td class="state">CA</td> + <td class="cert">57855</td> + <td class="ai">Bank of Marin</td> + <td class="closing">February 18, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="citizensbk_ga.html">Citizens Bank of Effingham</a></td> + <td class="city">Springfield</td> + <td class="state">GA</td> + <td class="cert">34601</td> + <td class="ai">Heritage Bank of the South</td> + <td class="closing">February 18, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="habersham.html">Habersham Bank</a></td> + <td class="city">Clarkesville</td> + <td class="state">GA</td> + <td class="cert">151</td> + <td class="ai">SCBT National Association</td> + <td class="closing">February 18, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="canyonstate.html">Canyon National Bank</a></td> + <td class="city">Palm Springs</td> + <td class="state">CA</td> + <td class="cert">34692</td> + <td class="ai">Pacific Premier Bank</td> + <td class="closing">February 11, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="badgerstate.html">Badger State Bank</a></td> + <td class="city">Cassville</td> + <td class="state">WI</td> + <td class="cert">13272</td> + <td class="ai">Royal Bank</td> + <td class="closing">February 11, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="peoplesstatebank.html">Peoples State Bank</a></td> + <td class="city">Hamtramck</td> + <td class="state">MI</td> + <td class="cert">14939</td> + <td class="ai">First Michigan Bank</td> + <td class="closing">February 11, 2011</td> + <td class="updated">January 22, 2013</td> + </tr> + <tr> + <td class="institution"><a href="sunshinestate.html">Sunshine State Community Bank</a></td> + <td class="city">Port Orange</td> + <td class="state">FL</td> + <td class="cert">35478</td> + <td class="ai">Premier American Bank, N.A.</td> + <td class="closing">February 11, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="commfirst_il.html">Community First Bank Chicago</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">57948</td> + <td class="ai">Northbrook Bank &amp; Trust Company</td> + <td class="closing">February 4, 2011</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="northgabank.html">North Georgia Bank</a></td> + <td class="city">Watkinsville</td> + <td class="state">GA</td> + <td class="cert">35242</td> + <td class="ai">BankSouth</td> + <td class="closing">February 4, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="americantrust.html">American Trust Bank</a></td> + <td class="city">Roswell</td> + <td class="state">GA</td> + <td class="cert">57432</td> + <td class="ai">Renasant Bank</td> + <td class="closing">February 4, 2011</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstcomm_nm.html">First Community Bank</a></td> + <td class="city">Taos</td> + <td class="state">NM</td> + <td class="cert">12261</td> + <td class="ai">U.S. Bank, N.A.</td> + <td class="closing">January 28, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstier.html">FirsTier Bank</a></td> + <td class="city">Louisville</td> + <td class="state">CO</td> + <td class="cert">57646</td> + <td class="ai">No Acquirer</td> + <td class="closing">January 28, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="evergreenstatewi.html">Evergreen State Bank</a></td> + <td class="city">Stoughton</td> + <td class="state">WI</td> + <td class="cert">5328</td> + <td class="ai">McFarland State Bank</td> + <td class="closing">January 28, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firststatebank_ok.html">The First State Bank</a></td> + <td class="city">Camargo</td> + <td class="state">OK</td> + <td class="cert">2303</td> + <td class="ai">Bank 7</td> + <td class="closing">January 28, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="unitedwestern.html">United Western Bank</a></td> + <td class="city">Denver</td> + <td class="state">CO</td> + <td class="cert">31293</td> + <td class="ai">First-Citizens Bank &amp; Trust Company</td> + <td class="closing">January 21, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankofasheville.html">The Bank of Asheville</a></td> + <td class="city">Asheville</td> + <td class="state">NC</td> + <td class="cert">34516</td> + <td class="ai">First Bank</td> + <td class="closing">January 21, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="commsouth.html">CommunitySouth Bank &amp; Trust</a></td> + <td class="city">Easley</td> + <td class="state">SC</td> + <td class="cert">57868</td> + <td class="ai">CertusBank, National Association</td> + <td class="closing">January 21, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="enterprise.html">Enterprise Banking Company</a></td> + <td class="city">McDonough</td> + <td class="state">GA</td> + <td class="cert">19758</td> + <td class="ai">No Acquirer</td> + <td class="closing">January 21, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="oglethorpe.html">Oglethorpe Bank</a></td> + <td class="city">Brunswick</td> + <td class="state">GA</td> + <td class="cert">57440</td> + <td class="ai">Bank of the Ozarks</td> + <td class="closing">January 14, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="legacybank.html">Legacy Bank</a></td> + <td class="city">Scottsdale</td> + <td class="state">AZ</td> + <td class="cert">57820</td> + <td class="ai">Enterprise Bank &amp; Trust</td> + <td class="closing">January 7, 2011</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstcommercial.html">First Commercial Bank of Florida</a></td> + <td class="city">Orlando</td> + <td class="state">FL</td> + <td class="cert">34965</td> + <td class="ai">First Southern Bank</td> + <td class="closing">January 7, 2011</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="communitynatl.html">Community National Bank</a></td> + <td class="city">Lino Lakes</td> + <td class="state">MN</td> + <td class="cert">23306</td> + <td class="ai">Farmers &amp; Merchants Savings Bank</td> + <td class="closing">December 17, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstsouthern.html">First Southern Bank</a></td> + <td class="city">Batesville</td> + <td class="state">AR</td> + <td class="cert">58052</td> + <td class="ai">Southern Bank</td> + <td class="closing">December 17, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="unitedamericas.html">United Americas Bank, N.A.</a></td> + <td class="city">Atlanta</td> + <td class="state">GA</td> + <td class="cert">35065</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">December 17, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="appalachianga.html">Appalachian Community Bank, FSB</a></td> + <td class="city">McCaysville</td> + <td class="state">GA</td> + <td class="cert">58495</td> + <td class="ai">Peoples Bank of East Tennessee</td> + <td class="closing">December 17, 2010</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="chestatee.html">Chestatee State Bank</a></td> + <td class="city">Dawsonville</td> + <td class="state">GA</td> + <td class="cert">34578</td> + <td class="ai">Bank of the Ozarks</td> + <td class="closing">December 17, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankofmiami.html">The Bank of Miami,N.A.</a></td> + <td class="city">Coral Gables</td> + <td class="state">FL</td> + <td class="cert">19040</td> + <td class="ai">1st United Bank</td> + <td class="closing">December 17, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="earthstar.html">Earthstar Bank</a></td> + <td class="city">Southampton</td> + <td class="state">PA</td> + <td class="cert">35561</td> + <td class="ai">Polonia Bank</td> + <td class="closing">December 10, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="paramount.html">Paramount Bank</a></td> + <td class="city">Farmington Hills</td> + <td class="state">MI</td> + <td class="cert">34673</td> + <td class="ai">Level One Bank</td> + <td class="closing">December 10, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstbanking.html">First Banking Center</a></td> + <td class="city">Burlington</td> + <td class="state">WI</td> + <td class="cert">5287</td> + <td class="ai">First Michigan Bank</td> + <td class="closing">November 19, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="allegbank.html">Allegiance Bank of North America</a></td> + <td class="city">Bala Cynwyd</td> + <td class="state">PA</td> + <td class="cert">35078</td> + <td class="ai">VIST Bank</td> + <td class="closing">November 19, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="gulfstate.html">Gulf State Community Bank</a></td> + <td class="city">Carrabelle</td> + <td class="state">FL</td> + <td class="cert">20340</td> + <td class="ai">Centennial Bank</td> + <td class="closing">November 19, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="copperstar.html">Copper Star Bank</a></td> + <td class="city">Scottsdale</td> + <td class="state">AZ</td> + <td class="cert">35463</td> + <td class="ai">Stearns Bank, N.A.</td> + <td class="closing">November 12, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="darbybank.html">Darby Bank &amp; Trust Co.</a></td> + <td class="city">Vidalia</td> + <td class="state">GA</td> + <td class="cert">14580</td> + <td class="ai">Ameris Bank</td> + <td class="closing">November 12, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="tifton.html">Tifton Banking Company</a></td> + <td class="city">Tifton</td> + <td class="state">GA</td> + <td class="cert">57831</td> + <td class="ai">Ameris Bank</td> + <td class="closing">November 12, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstvietnamese.html">First Vietnamese American Bank</a><br><a href="firstvietnamese_viet.pdf">In Vietnamese</a></td> + <td class="city">Westminster</td> + <td class="state">CA</td> + <td class="cert">57885</td> + <td class="ai">Grandpoint Bank</td> + <td class="closing">November 5, 2010</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="piercecommercial.html">Pierce Commercial Bank</a></td> + <td class="city">Tacoma</td> + <td class="state">WA</td> + <td class="cert">34411</td> + <td class="ai">Heritage Bank</td> + <td class="closing">November 5, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="westerncommercial_ca.html">Western Commercial Bank</a></td> + <td class="city">Woodland Hills</td> + <td class="state">CA</td> + <td class="cert">58087</td> + <td class="ai">First California Bank</td> + <td class="closing">November 5, 2010</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="kbank.html">K Bank</a></td> + <td class="city">Randallstown</td> + <td class="state">MD</td> + <td class="cert">31263</td> + <td class="ai">Manufacturers and Traders Trust Company (M&amp;T Bank)</td> + <td class="closing">November 5, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstazfsb.html">First Arizona Savings, A FSB</a></td> + <td class="city">Scottsdale</td> + <td class="state">AZ</td> + <td class="cert">32582</td> + <td class="ai">No Acquirer</td> + <td class="closing">October 22, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="hillcrest_ks.html">Hillcrest Bank</a></td> + <td class="city">Overland Park</td> + <td class="state">KS</td> + <td class="cert">22173</td> + <td class="ai">Hillcrest Bank, N.A.</td> + <td class="closing">October 22, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstsuburban.html">First Suburban National Bank</a></td> + <td class="city">Maywood</td> + <td class="state">IL</td> + <td class="cert">16089</td> + <td class="ai">Seaway Bank and Trust Company</td> + <td class="closing">October 22, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fnbbarnesville.html">The First National Bank of Barnesville</a></td> + <td class="city">Barnesville</td> + <td class="state">GA</td> + <td class="cert">2119</td> + <td class="ai">United Bank</td> + <td class="closing">October 22, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="gordon.html">The Gordon Bank</a></td> + <td class="city">Gordon</td> + <td class="state">GA</td> + <td class="cert">33904</td> + <td class="ai">Morris Bank</td> + <td class="closing">October 22, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="progress_fl.html">Progress Bank of Florida</a></td> + <td class="city">Tampa</td> + <td class="state">FL</td> + <td class="cert">32251</td> + <td class="ai">Bay Cities Bank</td> + <td class="closing">October 22, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstbankjacksonville.html">First Bank of Jacksonville</a></td> + <td class="city">Jacksonville</td> + <td class="state">FL</td> + <td class="cert">27573</td> + <td class="ai">Ameris Bank</td> + <td class="closing">October 22, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="premier_mo.html">Premier Bank</a></td> + <td class="city">Jefferson City</td> + <td class="state">MO</td> + <td class="cert">34016</td> + <td class="ai">Providence Bank</td> + <td class="closing">October 15, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="westbridge.html">WestBridge Bank and Trust Company</a></td> + <td class="city">Chesterfield</td> + <td class="state">MO</td> + <td class="cert">58205</td> + <td class="ai">Midland States Bank</td> + <td class="closing">October 15, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="securitysavingsfsb.html">Security Savings Bank, F.S.B.</a></td> + <td class="city">Olathe</td> + <td class="state">KS</td> + <td class="cert">30898</td> + <td class="ai">Simmons First National Bank</td> + <td class="closing">October 15, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="shoreline.html">Shoreline Bank</a></td> + <td class="city">Shoreline</td> + <td class="state">WA</td> + <td class="cert">35250</td> + <td class="ai">GBC International Bank</td> + <td class="closing">October 1, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="wakulla.html">Wakulla Bank</a></td> + <td class="city">Crawfordville</td> + <td class="state">FL</td> + <td class="cert">21777</td> + <td class="ai">Centennial Bank</td> + <td class="closing">October 1, 2010</td> + <td class="updated">November 2, 2012</td> + </tr> + <tr> + <td class="institution"><a href="northcounty.html">North County Bank</a></td> + <td class="city">Arlington</td> + <td class="state">WA</td> + <td class="cert">35053</td> + <td class="ai">Whidbey Island Bank</td> + <td class="closing">September 24, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="haventrust_fl.html">Haven Trust Bank Florida</a></td> + <td class="city">Ponte Vedra Beach</td> + <td class="state">FL</td> + <td class="cert">58308</td> + <td class="ai">First Southern Bank</td> + <td class="closing">September 24, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="maritimesavings.html">Maritime Savings Bank</a></td> + <td class="city">West Allis</td> + <td class="state">WI</td> + <td class="cert">28612</td> + <td class="ai">North Shore Bank, FSB</td> + <td class="closing">September 17, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bramblesavings.html">Bramble Savings Bank</a></td> + <td class="city">Milford</td> + <td class="state">OH</td> + <td class="cert">27808</td> + <td class="ai">Foundation Bank</td> + <td class="closing">September 17, 2010</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="peoplesbank_ga.html">The Peoples Bank</a></td> + <td class="city">Winder</td> + <td class="state">GA</td> + <td class="cert">182</td> + <td class="ai">Community &amp; Southern Bank</td> + <td class="closing">September 17, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstcommerce_ga.html">First Commerce Community Bank</a></td> + <td class="city">Douglasville</td> + <td class="state">GA</td> + <td class="cert">57448</td> + <td class="ai">Community &amp; Southern Bank</td> + <td class="closing">September 17, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="ellijay.html">Bank of Ellijay</a></td> + <td class="city">Ellijay</td> + <td class="state">GA</td> + <td class="cert">58197</td> + <td class="ai">Community &amp; Southern Bank</td> + <td class="closing">September 17, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="isnbank.html">ISN Bank</a></td> + <td class="city">Cherry Hill</td> + <td class="state">NJ</td> + <td class="cert">57107</td> + <td class="ai">Customers Bank</td> + <td class="closing">September 17, 2010</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="horizonfl.html">Horizon Bank</a></td> + <td class="city">Bradenton</td> + <td class="state">FL</td> + <td class="cert">35061</td> + <td class="ai">Bank of the Ozarks</td> + <td class="closing">September 10, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sonoma.html">Sonoma Valley Bank</a></td> + <td class="city">Sonoma</td> + <td class="state">CA</td> + <td class="cert">27259</td> + <td class="ai">Westamerica Bank</td> + <td class="closing">August 20, 2010</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="lospadres.html">Los Padres Bank</a></td> + <td class="city">Solvang</td> + <td class="state">CA</td> + <td class="cert">32165</td> + <td class="ai">Pacific Western Bank</td> + <td class="closing">August 20, 2010</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="butte.html">Butte Community Bank</a></td> + <td class="city">Chico</td> + <td class="state">CA</td> + <td class="cert">33219</td> + <td class="ai">Rabobank, N.A.</td> + <td class="closing">August 20, 2010</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="pacificbk.html">Pacific State Bank</a></td> + <td class="city">Stockton</td> + <td class="state">CA</td> + <td class="cert">27090</td> + <td class="ai">Rabobank, N.A.</td> + <td class="closing">August 20, 2010</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="shorebank.html">ShoreBank</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">15640</td> + <td class="ai">Urban Partnership Bank</td> + <td class="closing">August 20, 2010</td> + <td class="updated">May 16, 2013</td> + </tr> + <tr> + <td class="institution"><a href="imperialsvgs.html">Imperial Savings and Loan Association</a></td> + <td class="city">Martinsville</td> + <td class="state">VA</td> + <td class="cert">31623</td> + <td class="ai">River Community Bank, N.A.</td> + <td class="closing">August 20, 2010</td> + <td class="updated">August 24, 2012</td> + </tr> + <tr> + <td class="institution"><a href="inatbank.html">Independent National Bank</a></td> + <td class="city">Ocala</td> + <td class="state">FL</td> + <td class="cert">27344</td> + <td class="ai">CenterState Bank of Florida, N.A.</td> + <td class="closing">August 20, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cnbbartow.html">Community National Bank at Bartow</a></td> + <td class="city">Bartow</td> + <td class="state">FL</td> + <td class="cert">25266</td> + <td class="ai">CenterState Bank of Florida, N.A.</td> + <td class="closing">August 20, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="palosbank.html">Palos Bank and Trust Company</a></td> + <td class="city">Palos Heights</td> + <td class="state">IL</td> + <td class="cert">17599</td> + <td class="ai">First Midwest Bank</td> + <td class="closing">August 13, 2010</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="ravenswood.html">Ravenswood Bank</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">34231</td> + <td class="ai">Northbrook Bank &amp; Trust Company</td> + <td class="closing">August 6, 2010</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="libertyor.html">LibertyBank</a></td> + <td class="city">Eugene</td> + <td class="state">OR</td> + <td class="cert">31964</td> + <td class="ai">Home Federal Bank</td> + <td class="closing">July 30, 2010</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cowlitz.html">The Cowlitz Bank</a></td> + <td class="city">Longview</td> + <td class="state">WA</td> + <td class="cert">22643</td> + <td class="ai">Heritage Bank</td> + <td class="closing">July 30, 2010</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="coastal.html">Coastal Community Bank</a></td> + <td class="city">Panama City Beach</td> + <td class="state">FL</td> + <td class="cert">9619</td> + <td class="ai">Centennial Bank</td> + <td class="closing">July 30, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bayside.html">Bayside Savings Bank</a></td> + <td class="city">Port Saint Joe</td> + <td class="state">FL</td> + <td class="cert">57669</td> + <td class="ai">Centennial Bank</td> + <td class="closing">July 30, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="northwestga.html">Northwest Bank &amp; Trust</a></td> + <td class="city">Acworth</td> + <td class="state">GA</td> + <td class="cert">57658</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">July 30, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="homevalleyor.html">Home Valley Bank</a></td> + <td class="city">Cave Junction</td> + <td class="state">OR</td> + <td class="cert">23181</td> + <td class="ai">South Valley Bank &amp; Trust</td> + <td class="closing">July 23, 2010</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="southwestusanv.html">SouthwestUSA Bank</a></td> + <td class="city">Las Vegas</td> + <td class="state">NV</td> + <td class="cert">35434</td> + <td class="ai">Plaza Bank</td> + <td class="closing">July 23, 2010</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="communitysecmn.html">Community Security Bank</a></td> + <td class="city">New Prague</td> + <td class="state">MN</td> + <td class="cert">34486</td> + <td class="ai">Roundbank</td> + <td class="closing">July 23, 2010</td> + <td class="updated">September 12, 2012</td> + </tr> + <tr> + <td class="institution"><a href="thunderbankks.html">Thunder Bank</a></td> + <td class="city">Sylvan Grove</td> + <td class="state">KS</td> + <td class="cert">10506</td> + <td class="ai">The Bennington State Bank</td> + <td class="closing">July 23, 2010</td> + <td class="updated">September 13, 2012</td> + </tr> + <tr> + <td class="institution"><a href="williamsburgsc.html">Williamsburg First National Bank</a></td> + <td class="city">Kingstree</td> + <td class="state">SC</td> + <td class="cert">17837</td> + <td class="ai">First Citizens Bank and Trust Company, Inc.</td> + <td class="closing">July 23, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="crescentga.html">Crescent Bank and Trust Company</a></td> + <td class="city">Jasper</td> + <td class="state">GA</td> + <td class="cert">27559</td> + <td class="ai">Renasant Bank</td> + <td class="closing">July 23, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sterlingfl.html">Sterling Bank</a></td> + <td class="city">Lantana</td> + <td class="state">FL</td> + <td class="cert">32536</td> + <td class="ai">IBERIABANK</td> + <td class="closing">July 23, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="mainstsvgs.html">Mainstreet Savings Bank, FSB</a></td> + <td class="city">Hastings</td> + <td class="state">MI</td> + <td class="cert">28136</td> + <td class="ai">Commercial Bank</td> + <td class="closing">July 16, 2010</td> + <td class="updated">September 13, 2012</td> + </tr> + <tr> + <td class="institution"><a href="oldecypress.html">Olde Cypress Community Bank</a></td> + <td class="city">Clewiston</td> + <td class="state">FL</td> + <td class="cert">28864</td> + <td class="ai">CenterState Bank of Florida, N.A.</td> + <td class="closing">July 16, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="turnberry.html">Turnberry Bank</a></td> + <td class="city">Aventura</td> + <td class="state">FL</td> + <td class="cert">32280</td> + <td class="ai">NAFH National Bank</td> + <td class="closing">July 16, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="metrobankfl.html">Metro Bank of Dade County</a></td> + <td class="city">Miami</td> + <td class="state">FL</td> + <td class="cert">25172</td> + <td class="ai">NAFH National Bank</td> + <td class="closing">July 16, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstnatlsc.html">First National Bank of the South</a></td> + <td class="city">Spartanburg</td> + <td class="state">SC</td> + <td class="cert">35383</td> + <td class="ai">NAFH National Bank</td> + <td class="closing">July 16, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="woodlands.html">Woodlands Bank</a></td> + <td class="city">Bluffton</td> + <td class="state">SC</td> + <td class="cert">32571</td> + <td class="ai">Bank of the Ozarks</td> + <td class="closing">July 16, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="homenatlok.html">Home National Bank</a></td> + <td class="city">Blackwell</td> + <td class="state">OK</td> + <td class="cert">11636</td> + <td class="ai">RCB Bank</td> + <td class="closing">July 9, 2010</td> + <td class="updated">December 10, 2012</td> + </tr> + <tr> + <td class="institution"><a href="usabankny.html">USA Bank</a></td> + <td class="city">Port Chester</td> + <td class="state">NY</td> + <td class="cert">58072</td> + <td class="ai">New Century Bank</td> + <td class="closing">July 9, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="idealfedsvngsmd.html">Ideal Federal Savings Bank</a></td> + <td class="city">Baltimore</td> + <td class="state">MD</td> + <td class="cert">32456</td> + <td class="ai">No Acquirer</td> + <td class="closing">July 9, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="baynatlmd.html">Bay National Bank</a></td> + <td class="city">Baltimore</td> + <td class="state">MD</td> + <td class="cert">35462</td> + <td class="ai">Bay Bank, FSB</td> + <td class="closing">July 9, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="highdesertnm.html">High Desert State Bank</a></td> + <td class="city">Albuquerque</td> + <td class="state">NM</td> + <td class="cert">35279</td> + <td class="ai">First American Bank</td> + <td class="closing">June 25, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstnatga.html">First National Bank</a></td> + <td class="city">Savannah</td> + <td class="state">GA</td> + <td class="cert">34152</td> + <td class="ai">The Savannah Bank, N.A.</td> + <td class="closing">June 25, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="peninsulafl.html">Peninsula Bank</a></td> + <td class="city">Englewood</td> + <td class="state">FL</td> + <td class="cert">26563</td> + <td class="ai">Premier American Bank, N.A.</td> + <td class="closing">June 25, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="nevsecbank.html">Nevada Security Bank</a></td> + <td class="city">Reno</td> + <td class="state">NV</td> + <td class="cert">57110</td> + <td class="ai">Umpqua Bank</td> + <td class="closing">June 18, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="washfirstintl.html">Washington First International Bank</a></td> + <td class="city">Seattle</td> + <td class="state">WA</td> + <td class="cert">32955</td> + <td class="ai">East West Bank</td> + <td class="closing">June 11, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="tieronebankne.html">TierOne Bank</a></td> + <td class="city">Lincoln</td> + <td class="state">NE</td> + <td class="cert">29341</td> + <td class="ai">Great Western Bank</td> + <td class="closing">June 4, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="arcolail.html">Arcola Homestead Savings Bank</a></td> + <td class="city">Arcola</td> + <td class="state">IL</td> + <td class="cert">31813</td> + <td class="ai">No Acquirer</td> + <td class="closing">June 4, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstnatms.html">First National Bank</a></td> + <td class="city">Rosedale</td> + <td class="state">MS</td> + <td class="cert">15814</td> + <td class="ai">The Jefferson Bank</td> + <td class="closing">June 4, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="swbnevada.html">Sun West Bank</a></td> + <td class="city">Las Vegas</td> + <td class="state">NV</td> + <td class="cert">34785</td> + <td class="ai">City National Bank</td> + <td class="closing">May 28, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="graniteca.html">Granite Community Bank, NA</a></td> + <td class="city">Granite Bay</td> + <td class="state">CA</td> + <td class="cert">57315</td> + <td class="ai">Tri Counties Bank</td> + <td class="closing">May 28, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankoffloridatb.html">Bank of Florida - Tampa</a></td> + <td class="city">Tampa</td> + <td class="state">FL</td> + <td class="cert">57814</td> + <td class="ai">EverBank</td> + <td class="closing">May 28, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankoffloridasw.html">Bank of Florida - Southwest</a></td> + <td class="city">Naples</td> + <td class="state">FL</td> + <td class="cert">35106</td> + <td class="ai">EverBank</td> + <td class="closing">May 28, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankoffloridase.html">Bank of Florida - Southeast</a></td> + <td class="city">Fort Lauderdale</td> + <td class="state">FL</td> + <td class="cert">57360</td> + <td class="ai">EverBank</td> + <td class="closing">May 28, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="pinehurstmn.html">Pinehurst Bank</a></td> + <td class="city">Saint Paul</td> + <td class="state">MN</td> + <td class="cert">57735</td> + <td class="ai">Coulee Bank</td> + <td class="closing">May 21, 2010</td> + <td class="updated">October 26, 2012</td> + </tr> + <tr> + <td class="institution"><a href="midwestil.html">Midwest Bank and Trust Company</a></td> + <td class="city">Elmwood Park</td> + <td class="state">IL</td> + <td class="cert">18117</td> + <td class="ai">FirstMerit Bank, N.A.</td> + <td class="closing">May 14, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="swcmntymo.html">Southwest Community Bank</a></td> + <td class="city">Springfield</td> + <td class="state">MO</td> + <td class="cert">34255</td> + <td class="ai">Simmons First National Bank</td> + <td class="closing">May 14, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="newlibertymi.html">New Liberty Bank</a></td> + <td class="city">Plymouth</td> + <td class="state">MI</td> + <td class="cert">35586</td> + <td class="ai">Bank of Ann Arbor</td> + <td class="closing">May 14, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="satillacmntyga.html">Satilla Community Bank</a></td> + <td class="city">Saint Marys</td> + <td class="state">GA</td> + <td class="cert">35114</td> + <td class="ai">Ameris Bank</td> + <td class="closing">May 14, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="1stpacific.html">1st Pacific Bank of California</a></td> + <td class="city">San Diego</td> + <td class="state">CA</td> + <td class="cert">35517</td> + <td class="ai">City National Bank</td> + <td class="closing">May 7, 2010</td> + <td class="updated">December 13, 2012</td> + </tr> + <tr> + <td class="institution"><a href="townebank.html">Towne Bank of Arizona</a></td> + <td class="city">Mesa</td> + <td class="state">AZ</td> + <td class="cert">57697</td> + <td class="ai">Commerce Bank of Arizona</td> + <td class="closing">May 7, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="accessbank.html">Access Bank</a></td> + <td class="city">Champlin</td> + <td class="state">MN</td> + <td class="cert">16476</td> + <td class="ai">PrinsBank</td> + <td class="closing">May 7, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bonifay.html">The Bank of Bonifay</a></td> + <td class="city">Bonifay</td> + <td class="state">FL</td> + <td class="cert">14246</td> + <td class="ai">First Federal Bank of Florida</td> + <td class="closing">May 7, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="frontier.html">Frontier Bank</a></td> + <td class="city">Everett</td> + <td class="state">WA</td> + <td class="cert">22710</td> + <td class="ai">Union Bank, N.A.</td> + <td class="closing">April 30, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="bc-natl.html">BC National Banks</a></td> + <td class="city">Butler</td> + <td class="state">MO</td> + <td class="cert">17792</td> + <td class="ai">Community First Bank</td> + <td class="closing">April 30, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="champion.html">Champion Bank</a></td> + <td class="city">Creve Coeur</td> + <td class="state">MO</td> + <td class="cert">58362</td> + <td class="ai">BankLiberty</td> + <td class="closing">April 30, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cfbancorp.html">CF Bancorp</a></td> + <td class="city">Port Huron</td> + <td class="state">MI</td> + <td class="cert">30005</td> + <td class="ai">First Michigan Bank</td> + <td class="closing">April 30, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="westernbank-puertorico.html">Westernbank Puerto Rico</a><br><a href="westernbank-puertorico_spanish.html">En Espanol</a></td> + <td class="city">Mayaguez</td> + <td class="state">PR</td> + <td class="cert">31027</td> + <td class="ai">Banco Popular de Puerto Rico</td> + <td class="closing">April 30, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="r-gpremier-puertorico.html">R-G Premier Bank of Puerto Rico</a><br><a href="r-gpremier-puertorico_spanish.html">En Espanol</a></td> + <td class="city">Hato Rey</td> + <td class="state">PR</td> + <td class="cert">32185</td> + <td class="ai">Scotiabank de Puerto Rico</td> + <td class="closing">April 30, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="eurobank-puertorico.html">Eurobank</a><br><a href="eurobank-puertorico_spanish.html">En Espanol</a></td> + <td class="city">San Juan</td> + <td class="state">PR</td> + <td class="cert">27150</td> + <td class="ai">Oriental Bank and Trust</td> + <td class="closing">April 30, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="wheatland.html">Wheatland Bank</a></td> + <td class="city">Naperville</td> + <td class="state">IL</td> + <td class="cert">58429</td> + <td class="ai">Wheaton Bank &amp; Trust</td> + <td class="closing">April 23, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="peotone.html">Peotone Bank and Trust Company</a></td> + <td class="city">Peotone</td> + <td class="state">IL</td> + <td class="cert">10888</td> + <td class="ai">First Midwest Bank</td> + <td class="closing">April 23, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="lincoln-park.html">Lincoln Park Savings Bank</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">30600</td> + <td class="ai">Northbrook Bank &amp; Trust Company</td> + <td class="closing">April 23, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="new-century-il.html">New Century Bank</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">34821</td> + <td class="ai">MB Financial Bank, N.A.</td> + <td class="closing">April 23, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="citizens-bank.html">Citizens Bank and Trust Company of Chicago</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">34658</td> + <td class="ai">Republic Bank of Chicago</td> + <td class="closing">April 23, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="broadway.html">Broadway Bank</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">22853</td> + <td class="ai">MB Financial Bank, N.A.</td> + <td class="closing">April 23, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="amcore.html">Amcore Bank, National Association</a></td> + <td class="city">Rockford</td> + <td class="state">IL</td> + <td class="cert">3735</td> + <td class="ai">Harris N.A.</td> + <td class="closing">April 23, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="citybank.html">City Bank</a></td> + <td class="city">Lynnwood</td> + <td class="state">WA</td> + <td class="cert">21521</td> + <td class="ai">Whidbey Island Bank</td> + <td class="closing">April 16, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="tamalpais.html">Tamalpais Bank</a></td> + <td class="city">San Rafael</td> + <td class="state">CA</td> + <td class="cert">33493</td> + <td class="ai">Union Bank, N.A.</td> + <td class="closing">April 16, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="innovative.html">Innovative Bank</a></td> + <td class="city">Oakland</td> + <td class="state">CA</td> + <td class="cert">23876</td> + <td class="ai">Center Bank</td> + <td class="closing">April 16, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="butlerbank.html">Butler Bank</a></td> + <td class="city">Lowell</td> + <td class="state">MA</td> + <td class="cert">26619</td> + <td class="ai">People's United Bank</td> + <td class="closing">April 16, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="riverside-natl.html">Riverside National Bank of Florida</a></td> + <td class="city">Fort Pierce</td> + <td class="state">FL</td> + <td class="cert">24067</td> + <td class="ai">TD Bank, N.A.</td> + <td class="closing">April 16, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="americanfirst.html">AmericanFirst Bank</a></td> + <td class="city">Clermont</td> + <td class="state">FL</td> + <td class="cert">57724</td> + <td class="ai">TD Bank, N.A.</td> + <td class="closing">April 16, 2010</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="ffbnf.html">First Federal Bank of North Florida</a></td> + <td class="city">Palatka</td> + <td class="state">FL</td> + <td class="cert">28886</td> + <td class="ai">TD Bank, N.A.</td> + <td class="closing">April 16, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="lakeside-comm.html">Lakeside Community Bank</a></td> + <td class="city">Sterling Heights</td> + <td class="state">MI</td> + <td class="cert">34878</td> + <td class="ai">No Acquirer</td> + <td class="closing">April 16, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="beachfirst.html">Beach First National Bank</a></td> + <td class="city">Myrtle Beach</td> + <td class="state">SC</td> + <td class="cert">34242</td> + <td class="ai">Bank of North Carolina</td> + <td class="closing">April 9, 2010</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="deserthills.html">Desert Hills Bank</a></td> + <td class="city">Phoenix</td> + <td class="state">AZ</td> + <td class="cert">57060</td> + <td class="ai">New York Community Bank</td> + <td class="closing">March 26, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="unity-natl.html">Unity National Bank</a></td> + <td class="city">Cartersville</td> + <td class="state">GA</td> + <td class="cert">34678</td> + <td class="ai">Bank of the Ozarks</td> + <td class="closing">March 26, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="key-west.html">Key West Bank</a></td> + <td class="city">Key West</td> + <td class="state">FL</td> + <td class="cert">34684</td> + <td class="ai">Centennial Bank</td> + <td class="closing">March 26, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="mcintosh.html">McIntosh Commercial Bank</a></td> + <td class="city">Carrollton</td> + <td class="state">GA</td> + <td class="cert">57399</td> + <td class="ai">CharterBank</td> + <td class="closing">March 26, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="state-aurora.html">State Bank of Aurora</a></td> + <td class="city">Aurora</td> + <td class="state">MN</td> + <td class="cert">8221</td> + <td class="ai">Northern State Bank</td> + <td class="closing">March 19, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstlowndes.html">First Lowndes Bank</a></td> + <td class="city">Fort Deposit</td> + <td class="state">AL</td> + <td class="cert">24957</td> + <td class="ai">First Citizens Bank</td> + <td class="closing">March 19, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankofhiawassee.html">Bank of Hiawassee</a></td> + <td class="city">Hiawassee</td> + <td class="state">GA</td> + <td class="cert">10054</td> + <td class="ai">Citizens South Bank</td> + <td class="closing">March 19, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="appalachian.html">Appalachian Community Bank</a></td> + <td class="city">Ellijay</td> + <td class="state">GA</td> + <td class="cert">33989</td> + <td class="ai">Community &amp; Southern Bank</td> + <td class="closing">March 19, 2010</td> + <td class="updated">October 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="advanta-ut.html">Advanta Bank Corp.</a></td> + <td class="city">Draper</td> + <td class="state">UT</td> + <td class="cert">33535</td> + <td class="ai">No Acquirer</td> + <td class="closing">March 19, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cent-security.html">Century Security Bank</a></td> + <td class="city">Duluth</td> + <td class="state">GA</td> + <td class="cert">58104</td> + <td class="ai">Bank of Upson</td> + <td class="closing">March 19, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="amer-natl-oh.html">American National Bank</a></td> + <td class="city">Parma</td> + <td class="state">OH</td> + <td class="cert">18806</td> + <td class="ai">The National Bank and Trust Company</td> + <td class="closing">March 19, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="statewide.html">Statewide Bank</a></td> + <td class="city">Covington</td> + <td class="state">LA</td> + <td class="cert">29561</td> + <td class="ai">Home Bank</td> + <td class="closing">March 12, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="oldsouthern.html">Old Southern Bank</a></td> + <td class="city">Orlando</td> + <td class="state">FL</td> + <td class="cert">58182</td> + <td class="ai">Centennial Bank</td> + <td class="closing">March 12, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="parkavenue-ny.html">The Park Avenue Bank</a></td> + <td class="city">New York</td> + <td class="state">NY</td> + <td class="cert">27096</td> + <td class="ai">Valley National Bank</td> + <td class="closing">March 12, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="libertypointe.html">LibertyPointe Bank</a></td> + <td class="city">New York</td> + <td class="state">NY</td> + <td class="cert">58071</td> + <td class="ai">Valley National Bank</td> + <td class="closing">March 11, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="centennial-ut.html">Centennial Bank</a></td> + <td class="city">Ogden</td> + <td class="state">UT</td> + <td class="cert">34430</td> + <td class="ai">No Acquirer</td> + <td class="closing">March 5, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="waterfield.html">Waterfield Bank</a></td> + <td class="city">Germantown</td> + <td class="state">MD</td> + <td class="cert">34976</td> + <td class="ai">No Acquirer</td> + <td class="closing">March 5, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankofillinois.html">Bank of Illinois</a></td> + <td class="city">Normal</td> + <td class="state">IL</td> + <td class="cert">9268</td> + <td class="ai">Heartland Bank and Trust Company</td> + <td class="closing">March 5, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sunamerican.html">Sun American Bank</a></td> + <td class="city">Boca Raton</td> + <td class="state">FL</td> + <td class="cert">27126</td> + <td class="ai">First-Citizens Bank &amp; Trust Company</td> + <td class="closing">March 5, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="rainier.html">Rainier Pacific Bank</a></td> + <td class="city">Tacoma</td> + <td class="state">WA</td> + <td class="cert">38129</td> + <td class="ai">Umpqua Bank</td> + <td class="closing">February 26, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="carsonriver.html">Carson River Community Bank</a></td> + <td class="city">Carson City</td> + <td class="state">NV</td> + <td class="cert">58352</td> + <td class="ai">Heritage Bank of Nevada</td> + <td class="closing">February 26, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="lajolla.html">La Jolla Bank, FSB</a></td> + <td class="city">La Jolla</td> + <td class="state">CA</td> + <td class="cert">32423</td> + <td class="ai">OneWest Bank, FSB</td> + <td class="closing">February 19, 2010</td> + <td class="updated">August 24, 2012</td> + </tr> + <tr> + <td class="institution"><a href="georgewashington.html">George Washington Savings Bank</a></td> + <td class="city">Orland Park</td> + <td class="state">IL</td> + <td class="cert">29952</td> + <td class="ai">FirstMerit Bank, N.A.</td> + <td class="closing">February 19, 2010</td> + <td class="updated">August 24, 2012</td> + </tr> + <tr> + <td class="institution"><a href="lacoste.html">The La Coste National Bank</a></td> + <td class="city">La Coste</td> + <td class="state">TX</td> + <td class="cert">3287</td> + <td class="ai">Community National Bank</td> + <td class="closing">February 19, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="marco.html">Marco Community Bank</a></td> + <td class="city">Marco Island</td> + <td class="state">FL</td> + <td class="cert">57586</td> + <td class="ai">Mutual of Omaha Bank</td> + <td class="closing">February 19, 2010</td> + <td class="updated">August 24, 2012</td> + </tr> + <tr> + <td class="institution"><a href="1stamerican.html">1st American State Bank of Minnesota</a></td> + <td class="city">Hancock</td> + <td class="state">MN</td> + <td class="cert">15448</td> + <td class="ai">Community Development Bank, FSB</td> + <td class="closing">February 5, 2010</td> + <td class="updated">August 24, 2012</td> + </tr> + <tr> + <td class="institution"><a href="americanmarine.html">American Marine Bank</a></td> + <td class="city">Bainbridge Island</td> + <td class="state">WA</td> + <td class="cert">16730</td> + <td class="ai">Columbia State Bank</td> + <td class="closing">January 29, 2010</td> + <td class="updated">August 24, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstregional.html">First Regional Bank</a></td> + <td class="city">Los Angeles</td> + <td class="state">CA</td> + <td class="cert">23011</td> + <td class="ai">First-Citizens Bank &amp; Trust Company</td> + <td class="closing">January 29, 2010</td> + <td class="updated">August 24, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cbt-cornelia.html">Community Bank and Trust</a></td> + <td class="city">Cornelia</td> + <td class="state">GA</td> + <td class="cert">5702</td> + <td class="ai">SCBT National Association</td> + <td class="closing">January 29, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="marshall-mn.html">Marshall Bank, N.A.</a></td> + <td class="city">Hallock</td> + <td class="state">MN</td> + <td class="cert">16133</td> + <td class="ai">United Valley Bank</td> + <td class="closing">January 29, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="floridacommunity.html">Florida Community Bank</a></td> + <td class="city">Immokalee</td> + <td class="state">FL</td> + <td class="cert">5672</td> + <td class="ai">Premier American Bank, N.A.</td> + <td class="closing">January 29, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="firstnational-carrollton.html">First National Bank of Georgia</a></td> + <td class="city">Carrollton</td> + <td class="state">GA</td> + <td class="cert">16480</td> + <td class="ai">Community &amp; Southern Bank</td> + <td class="closing">January 29, 2010</td> + <td class="updated">December 13, 2012</td> + </tr> + <tr> + <td class="institution"><a href="columbiariver.html">Columbia River Bank</a></td> + <td class="city">The Dalles</td> + <td class="state">OR</td> + <td class="cert">22469</td> + <td class="ai">Columbia State Bank</td> + <td class="closing">January 22, 2010</td> + <td class="updated">September 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="evergreen-wa.html">Evergreen Bank</a></td> + <td class="city">Seattle</td> + <td class="state">WA</td> + <td class="cert">20501</td> + <td class="ai">Umpqua Bank</td> + <td class="closing">January 22, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="charter-nm.html">Charter Bank</a></td> + <td class="city">Santa Fe</td> + <td class="state">NM</td> + <td class="cert">32498</td> + <td class="ai">Charter Bank</td> + <td class="closing">January 22, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="leeton.html">Bank of Leeton</a></td> + <td class="city">Leeton</td> + <td class="state">MO</td> + <td class="cert">8265</td> + <td class="ai">Sunflower Bank, N.A.</td> + <td class="closing">January 22, 2010</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="premieramerican.html">Premier American Bank</a></td> + <td class="city">Miami</td> + <td class="state">FL</td> + <td class="cert">57147</td> + <td class="ai">Premier American Bank, N.A.</td> + <td class="closing">January 22, 2010</td> + <td class="updated">December 13, 2012</td> + </tr> + <tr> + <td class="institution"><a href="barnes.html">Barnes Banking Company</a></td> + <td class="city">Kaysville</td> + <td class="state">UT</td> + <td class="cert">1252</td> + <td class="ai">No Acquirer</td> + <td class="closing">January 15, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="ststephen.html">St. Stephen State Bank</a></td> + <td class="city">St. Stephen</td> + <td class="state">MN</td> + <td class="cert">17522</td> + <td class="ai">First State Bank of St. Joseph</td> + <td class="closing">January 15, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="towncommunity.html">Town Community Bank &amp; Trust</a></td> + <td class="city">Antioch</td> + <td class="state">IL</td> + <td class="cert">34705</td> + <td class="ai">First American Bank</td> + <td class="closing">January 15, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="horizon-wa.html">Horizon Bank</a></td> + <td class="city">Bellingham</td> + <td class="state">WA</td> + <td class="cert">22977</td> + <td class="ai">Washington Federal Savings and Loan Association</td> + <td class="closing">January 8, 2010</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstfederal-ca.html">First Federal Bank of California, F.S.B.</a></td> + <td class="city">Santa Monica</td> + <td class="state">CA</td> + <td class="cert">28536</td> + <td class="ai">OneWest Bank, FSB</td> + <td class="closing">December 18, 2009</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="imperialcapital.html">Imperial Capital Bank</a></td> + <td class="city">La Jolla</td> + <td class="state">CA</td> + <td class="cert">26348</td> + <td class="ai">City National Bank</td> + <td class="closing">December 18, 2009</td> + <td class="updated">September 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="ibb.html">Independent Bankers' Bank</a></td> + <td class="city">Springfield</td> + <td class="state">IL</td> + <td class="cert">26820</td> + <td class="ai">The Independent BankersBank (TIB)</td> + <td class="closing">December 18, 2009</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="newsouth.html">New South Federal Savings Bank</a></td> + <td class="city">Irondale</td> + <td class="state">AL</td> + <td class="cert">32276</td> + <td class="ai">Beal Bank</td> + <td class="closing">December 18, 2009</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="citizensstate-mi.html">Citizens State Bank</a></td> + <td class="city">New Baltimore</td> + <td class="state">MI</td> + <td class="cert">1006</td> + <td class="ai">No Acquirer</td> + <td class="closing">December 18, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="peoplesfirst-fl.html">Peoples First Community Bank</a></td> + <td class="city">Panama City</td> + <td class="state">FL</td> + <td class="cert">32167</td> + <td class="ai">Hancock Bank</td> + <td class="closing">December 18, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="rockbridge.html">RockBridge Commercial Bank</a></td> + <td class="city">Atlanta</td> + <td class="state">GA</td> + <td class="cert">58315</td> + <td class="ai">No Acquirer</td> + <td class="closing">December 18, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="solutions.html">SolutionsBank</a></td> + <td class="city">Overland Park</td> + <td class="state">KS</td> + <td class="cert">4731</td> + <td class="ai">Arvest Bank</td> + <td class="closing">December 11, 2009</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="valleycapital.html">Valley Capital Bank, N.A.</a></td> + <td class="city">Mesa</td> + <td class="state">AZ</td> + <td class="cert">58399</td> + <td class="ai">Enterprise Bank &amp; Trust</td> + <td class="closing">December 11, 2009</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="republicfederal.html">Republic Federal Bank, N.A.</a></td> + <td class="city">Miami</td> + <td class="state">FL</td> + <td class="cert">22846</td> + <td class="ai">1st United Bank</td> + <td class="closing">December 11, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="atlantic-va.html">Greater Atlantic Bank</a></td> + <td class="city">Reston</td> + <td class="state">VA</td> + <td class="cert">32583</td> + <td class="ai">Sonabank</td> + <td class="closing">December 4, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="benchmark-il.html">Benchmark Bank</a></td> + <td class="city">Aurora</td> + <td class="state">IL</td> + <td class="cert">10440</td> + <td class="ai">MB Financial Bank, N.A.</td> + <td class="closing">December 4, 2009</td> + <td class="updated">August 23, 2012</td> + </tr> + <tr> + <td class="institution"><a href="amtrust.html">AmTrust Bank</a></td> + <td class="city">Cleveland</td> + <td class="state">OH</td> + <td class="cert">29776</td> + <td class="ai">New York Community Bank</td> + <td class="closing">December 4, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="tattnall.html">The Tattnall Bank</a></td> + <td class="city">Reidsville</td> + <td class="state">GA</td> + <td class="cert">12080</td> + <td class="ai">Heritage Bank of the South</td> + <td class="closing">December 4, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstsecurity.html">First Security National Bank</a></td> + <td class="city">Norcross</td> + <td class="state">GA</td> + <td class="cert">26290</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">December 4, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="buckheadcommunity.html">The Buckhead Community Bank</a></td> + <td class="city">Atlanta</td> + <td class="state">GA</td> + <td class="cert">34663</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">December 4, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="commercesw-fl.html">Commerce Bank of Southwest Florida</a></td> + <td class="city">Fort Myers</td> + <td class="state">FL</td> + <td class="cert">58016</td> + <td class="ai">Central Bank</td> + <td class="closing">November 20, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="pacificcoastnatl.html">Pacific Coast National Bank</a></td> + <td class="city">San Clemente</td> + <td class="state">CA</td> + <td class="cert">57914</td> + <td class="ai">Sunwest Bank</td> + <td class="closing">November 13, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="orion-fl.html">Orion Bank</a></td> + <td class="city">Naples</td> + <td class="state">FL</td> + <td class="cert">22427</td> + <td class="ai">IBERIABANK</td> + <td class="closing">November 13, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="centuryfsb.html">Century Bank, F.S.B.</a></td> + <td class="city">Sarasota</td> + <td class="state">FL</td> + <td class="cert">32267</td> + <td class="ai">IBERIABANK</td> + <td class="closing">November 13, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="ucb.html">United Commercial Bank</a></td> + <td class="city">San Francisco</td> + <td class="state">CA</td> + <td class="cert">32469</td> + <td class="ai">East West Bank</td> + <td class="closing">November 6, 2009</td> + <td class="updated">November 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="gateway-mo.html">Gateway Bank of St. Louis</a></td> + <td class="city">St. Louis</td> + <td class="state">MO</td> + <td class="cert">19450</td> + <td class="ai">Central Bank of Kansas City</td> + <td class="closing">November 6, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="prosperan.html">Prosperan Bank</a></td> + <td class="city">Oakdale</td> + <td class="state">MN</td> + <td class="cert">35074</td> + <td class="ai">Alerus Financial, N.A.</td> + <td class="closing">November 6, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="homefsb-mi.html">Home Federal Savings Bank</a></td> + <td class="city">Detroit</td> + <td class="state">MI</td> + <td class="cert">30329</td> + <td class="ai">Liberty Bank and Trust Company</td> + <td class="closing">November 6, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="unitedsecurity-ga.html">United Security Bank</a></td> + <td class="city">Sparta</td> + <td class="state">GA</td> + <td class="cert">22286</td> + <td class="ai">Ameris Bank</td> + <td class="closing">November 6, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="northhouston-tx.html">North Houston Bank</a></td> + <td class="city">Houston</td> + <td class="state">TX</td> + <td class="cert">18776</td> + <td class="ai">U.S. Bank N.A.</td> + <td class="closing">October 30, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="madisonville-tx.html">Madisonville State Bank</a></td> + <td class="city">Madisonville</td> + <td class="state">TX</td> + <td class="cert">33782</td> + <td class="ai">U.S. Bank N.A.</td> + <td class="closing">October 30, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="citizens-teague.html">Citizens National Bank</a></td> + <td class="city">Teague</td> + <td class="state">TX</td> + <td class="cert">25222</td> + <td class="ai">U.S. Bank N.A.</td> + <td class="closing">October 30, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="park-il.html">Park National Bank</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">11677</td> + <td class="ai">U.S. Bank N.A.</td> + <td class="closing">October 30, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="pacificnational-ca.html">Pacific National Bank</a></td> + <td class="city">San Francisco</td> + <td class="state">CA</td> + <td class="cert">30006</td> + <td class="ai">U.S. Bank N.A.</td> + <td class="closing">October 30, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="calnational.html">California National Bank</a></td> + <td class="city">Los Angeles</td> + <td class="state">CA</td> + <td class="cert">34659</td> + <td class="ai">U.S. Bank N.A.</td> + <td class="closing">October 30, 2009</td> + <td class="updated">September 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sandiegonational.html">San Diego National Bank</a></td> + <td class="city">San Diego</td> + <td class="state">CA</td> + <td class="cert">23594</td> + <td class="ai">U.S. Bank N.A.</td> + <td class="closing">October 30, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="community-lemont.html">Community Bank of Lemont</a></td> + <td class="city">Lemont</td> + <td class="state">IL</td> + <td class="cert">35291</td> + <td class="ai">U.S. Bank N.A.</td> + <td class="closing">October 30, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="bankusa-az.html">Bank USA, N.A.</a></td> + <td class="city">Phoenix</td> + <td class="state">AZ</td> + <td class="cert">32218</td> + <td class="ai">U.S. Bank N.A.</td> + <td class="closing">October 30, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstdupage.html">First DuPage Bank</a></td> + <td class="city">Westmont</td> + <td class="state">IL</td> + <td class="cert">35038</td> + <td class="ai">First Midwest Bank</td> + <td class="closing">October 23, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="riverview-mn.html">Riverview Community Bank</a></td> + <td class="city">Otsego</td> + <td class="state">MN</td> + <td class="cert">57525</td> + <td class="ai">Central Bank</td> + <td class="closing">October 23, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="elmwood.html">Bank of Elmwood</a></td> + <td class="city">Racine</td> + <td class="state">WI</td> + <td class="cert">18321</td> + <td class="ai">Tri City National Bank</td> + <td class="closing">October 23, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="flagship.html">Flagship National Bank</a></td> + <td class="city">Bradenton</td> + <td class="state">FL</td> + <td class="cert">35044</td> + <td class="ai">First Federal Bank of Florida</td> + <td class="closing">October 23, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="hillcrest-fl.html">Hillcrest Bank Florida</a></td> + <td class="city">Naples</td> + <td class="state">FL</td> + <td class="cert">58336</td> + <td class="ai">Stonegate Bank</td> + <td class="closing">October 23, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="americanunited.html">American United Bank</a></td> + <td class="city">Lawrenceville</td> + <td class="state">GA</td> + <td class="cert">57794</td> + <td class="ai">Ameris Bank</td> + <td class="closing">October 23, 2009</td> + <td class="updated">September 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="partners-fl.html">Partners Bank</a></td> + <td class="city">Naples</td> + <td class="state">FL</td> + <td class="cert">57959</td> + <td class="ai">Stonegate Bank</td> + <td class="closing">October 23, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="sanjoaquin.html">San Joaquin Bank</a></td> + <td class="city">Bakersfield</td> + <td class="state">CA</td> + <td class="cert">23266</td> + <td class="ai">Citizens Business Bank</td> + <td class="closing">October 16, 2009</td> + <td class="updated">August 22, 2012</td> + </tr> + <tr> + <td class="institution"><a href="scnb-co.html">Southern Colorado National Bank</a></td> + <td class="city">Pueblo</td> + <td class="state">CO</td> + <td class="cert">57263</td> + <td class="ai">Legacy Bank</td> + <td class="closing">October 2, 2009</td> + <td class="updated">September 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="jennings-mn.html">Jennings State Bank</a></td> + <td class="city">Spring Grove</td> + <td class="state">MN</td> + <td class="cert">11416</td> + <td class="ai">Central Bank</td> + <td class="closing">October 2, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="warren-mi.html">Warren Bank</a></td> + <td class="city">Warren</td> + <td class="state">MI</td> + <td class="cert">34824</td> + <td class="ai">The Huntington National Bank</td> + <td class="closing">October 2, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="georgian.html">Georgian Bank</a></td> + <td class="city">Atlanta</td> + <td class="state">GA</td> + <td class="cert">57151</td> + <td class="ai">First Citizens Bank and Trust Company, Inc.</td> + <td class="closing">September 25, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="irwin-ky.html">Irwin Union Bank, F.S.B.</a></td> + <td class="city">Louisville</td> + <td class="state">KY</td> + <td class="cert">57068</td> + <td class="ai">First Financial Bank, N.A.</td> + <td class="closing">September 18, 2009</td> + <td class="updated">September 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="irwin-in.html">Irwin Union Bank and Trust Company</a></td> + <td class="city">Columbus</td> + <td class="state">IN</td> + <td class="cert">10100</td> + <td class="ai">First Financial Bank, N.A.</td> + <td class="closing">September 18, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="venture-wa.html">Venture Bank</a></td> + <td class="city">Lacey</td> + <td class="state">WA</td> + <td class="cert">22868</td> + <td class="ai">First-Citizens Bank &amp; Trust Company</td> + <td class="closing">September 11, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="brickwell-mn.html">Brickwell Community Bank</a></td> + <td class="city">Woodbury</td> + <td class="state">MN</td> + <td class="cert">57736</td> + <td class="ai">CorTrust Bank N.A.</td> + <td class="closing">September 11, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="corus.html">Corus Bank, N.A.</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">13693</td> + <td class="ai">MB Financial Bank, N.A.</td> + <td class="closing">September 11, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firststate-az.html">First State Bank</a></td> + <td class="city">Flagstaff</td> + <td class="state">AZ</td> + <td class="cert">34875</td> + <td class="ai">Sunwest Bank</td> + <td class="closing">September 4, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="platinum-il.html">Platinum Community Bank</a></td> + <td class="city">Rolling Meadows</td> + <td class="state">IL</td> + <td class="cert">35030</td> + <td class="ai">No Acquirer</td> + <td class="closing">September 4, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="vantus.html">Vantus Bank</a></td> + <td class="city">Sioux City</td> + <td class="state">IN</td> + <td class="cert">27732</td> + <td class="ai">Great Southern Bank</td> + <td class="closing">September 4, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="inbank.html">InBank</a></td> + <td class="city">Oak Forest</td> + <td class="state">IL</td> + <td class="cert">20203</td> + <td class="ai">MB Financial Bank, N.A.</td> + <td class="closing">September 4, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstbankkc-mo.html">First Bank of Kansas City</a></td> + <td class="city">Kansas City</td> + <td class="state">MO</td> + <td class="cert">25231</td> + <td class="ai">Great American Bank</td> + <td class="closing">September 4, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="affinity-ca.html">Affinity Bank</a></td> + <td class="city">Ventura</td> + <td class="state">CA</td> + <td class="cert">27197</td> + <td class="ai">Pacific Western Bank</td> + <td class="closing">August 28, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="mainstreet-mn.html">Mainstreet Bank</a></td> + <td class="city">Forest Lake</td> + <td class="state">MN</td> + <td class="cert">1909</td> + <td class="ai">Central Bank</td> + <td class="closing">August 28, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bradford-md.html">Bradford Bank</a></td> + <td class="city">Baltimore</td> + <td class="state">MD</td> + <td class="cert">28312</td> + <td class="ai">Manufacturers and Traders Trust Company (M&amp;T Bank)</td> + <td class="closing">August 28, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="guaranty-tx.html">Guaranty Bank</a></td> + <td class="city">Austin</td> + <td class="state">TX</td> + <td class="cert">32618</td> + <td class="ai">BBVA Compass</td> + <td class="closing">August 21, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="capitalsouth.html">CapitalSouth Bank</a></td> + <td class="city">Birmingham</td> + <td class="state">AL</td> + <td class="cert">22130</td> + <td class="ai">IBERIABANK</td> + <td class="closing">August 21, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="coweta.html">First Coweta Bank</a></td> + <td class="city">Newnan</td> + <td class="state">GA</td> + <td class="cert">57702</td> + <td class="ai">United Bank</td> + <td class="closing">August 21, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="ebank.html">ebank</a></td> + <td class="city">Atlanta</td> + <td class="state">GA</td> + <td class="cert">34682</td> + <td class="ai">Stearns Bank, N.A.</td> + <td class="closing">August 21, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="community-nv.html">Community Bank of Nevada</a></td> + <td class="city">Las Vegas</td> + <td class="state">NV</td> + <td class="cert">34043</td> + <td class="ai">No Acquirer</td> + <td class="closing">August 14, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="community-az.html">Community Bank of Arizona</a></td> + <td class="city">Phoenix</td> + <td class="state">AZ</td> + <td class="cert">57645</td> + <td class="ai">MidFirst Bank</td> + <td class="closing">August 14, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="union-az.html">Union Bank, National Association</a></td> + <td class="city">Gilbert</td> + <td class="state">AZ</td> + <td class="cert">34485</td> + <td class="ai">MidFirst Bank</td> + <td class="closing">August 14, 2009</td> + <td class="updated">August 21, 2012</td> + </tr> + <tr> + <td class="institution"><a href="colonial-al.html">Colonial Bank</a></td> + <td class="city">Montgomery</td> + <td class="state">AL</td> + <td class="cert">9609</td> + <td class="ai">Branch Banking &amp; Trust Company, (BB&amp;T)</td> + <td class="closing">August 14, 2009</td> + <td class="updated">September 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="dwelling.html">Dwelling House Savings and Loan Association</a></td> + <td class="city">Pittsburgh</td> + <td class="state">PA</td> + <td class="cert">31559</td> + <td class="ai">PNC Bank, N.A.</td> + <td class="closing">August 14, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="community-prineville.html">Community First Bank</a></td> + <td class="city">Prineville</td> + <td class="state">OR</td> + <td class="cert">23268</td> + <td class="ai">Home Federal Bank</td> + <td class="closing">August 7, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="community-venice.html">Community National Bank of Sarasota County</a></td> + <td class="city">Venice</td> + <td class="state">FL</td> + <td class="cert">27183</td> + <td class="ai">Stearns Bank, N.A.</td> + <td class="closing">August 7, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fsb-sarasota.html">First State Bank</a></td> + <td class="city">Sarasota</td> + <td class="state">FL</td> + <td class="cert">27364</td> + <td class="ai">Stearns Bank, N.A.</td> + <td class="closing">August 7, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="mutual-harvey.html">Mutual Bank</a></td> + <td class="city">Harvey</td> + <td class="state">IL</td> + <td class="cert">18659</td> + <td class="ai">United Central Bank</td> + <td class="closing">July 31, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="americano.html">First BankAmericano</a></td> + <td class="city">Elizabeth</td> + <td class="state">NJ</td> + <td class="cert">34270</td> + <td class="ai">Crown Bank</td> + <td class="closing">July 31, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="peoplescommunity-oh.html">Peoples Community Bank</a></td> + <td class="city">West Chester</td> + <td class="state">OH</td> + <td class="cert">32288</td> + <td class="ai">First Financial Bank, N.A.</td> + <td class="closing">July 31, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="integrity-fl.html">Integrity Bank</a></td> + <td class="city">Jupiter</td> + <td class="state">FL</td> + <td class="cert">57604</td> + <td class="ai">Stonegate Bank</td> + <td class="closing">July 31, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fsb-altus.html">First State Bank of Altus</a></td> + <td class="city">Altus</td> + <td class="state">OK</td> + <td class="cert">9873</td> + <td class="ai">Herring Bank</td> + <td class="closing">July 31, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sb-jones.html">Security Bank of Jones County</a></td> + <td class="city">Gray</td> + <td class="state">GA</td> + <td class="cert">8486</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">July 24, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sb-houston.html">Security Bank of Houston County</a></td> + <td class="city">Perry</td> + <td class="state">GA</td> + <td class="cert">27048</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">July 24, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sb-bibb.html">Security Bank of Bibb County</a></td> + <td class="city">Macon</td> + <td class="state">GA</td> + <td class="cert">27367</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">July 24, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sb-metro.html">Security Bank of North Metro</a></td> + <td class="city">Woodstock</td> + <td class="state">GA</td> + <td class="cert">57105</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">July 24, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sb-fulton.html">Security Bank of North Fulton</a></td> + <td class="city">Alpharetta</td> + <td class="state">GA</td> + <td class="cert">57430</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">July 24, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sb-gwinnett.html">Security Bank of Gwinnett County</a></td> + <td class="city">Suwanee</td> + <td class="state">GA</td> + <td class="cert">57346</td> + <td class="ai">State Bank and Trust Company</td> + <td class="closing">July 24, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="waterford.html">Waterford Village Bank</a></td> + <td class="city">Williamsville</td> + <td class="state">NY</td> + <td class="cert">58065</td> + <td class="ai">Evans Bank, N.A.</td> + <td class="closing">July 24, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="temecula.html">Temecula Valley Bank</a></td> + <td class="city">Temecula</td> + <td class="state">CA</td> + <td class="cert">34341</td> + <td class="ai">First-Citizens Bank &amp; Trust Company</td> + <td class="closing">July 17, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="vineyard.html">Vineyard Bank</a></td> + <td class="city">Rancho Cucamonga</td> + <td class="state">CA</td> + <td class="cert">23556</td> + <td class="ai">California Bank &amp; Trust</td> + <td class="closing">July 17, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankfirst.html">BankFirst</a></td> + <td class="city">Sioux Falls</td> + <td class="state">SD</td> + <td class="cert">34103</td> + <td class="ai">Alerus Financial, N.A.</td> + <td class="closing">July 17, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="piedmont.html">First Piedmont Bank</a></td> + <td class="city">Winder</td> + <td class="state">GA</td> + <td class="cert">34594</td> + <td class="ai">First American Bank and Trust Company</td> + <td class="closing">July 17, 2009</td> + <td class="updated">January 15, 2013</td> + </tr> + <tr> + <td class="institution"><a href="wyoming.html">Bank of Wyoming</a></td> + <td class="city">Thermopolis</td> + <td class="state">WY</td> + <td class="cert">22754</td> + <td class="ai">Central Bank &amp; Trust</td> + <td class="closing">July 10, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="founders.html">Founders Bank</a></td> + <td class="city">Worth</td> + <td class="state">IL</td> + <td class="cert">18390</td> + <td class="ai">The PrivateBank and Trust Company</td> + <td class="closing">July 2, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="millennium.html">Millennium State Bank of Texas</a></td> + <td class="city">Dallas</td> + <td class="state">TX</td> + <td class="cert">57667</td> + <td class="ai">State Bank of Texas</td> + <td class="closing">July 2, 2009</td> + <td class="updated">October 26, 2012</td> + </tr> + <tr> + <td class="institution"><a href="danville.html">First National Bank of Danville</a></td> + <td class="city">Danville</td> + <td class="state">IL</td> + <td class="cert">3644</td> + <td class="ai">First Financial Bank, N.A.</td> + <td class="closing">July 2, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="elizabeth.html">Elizabeth State Bank</a></td> + <td class="city">Elizabeth</td> + <td class="state">IL</td> + <td class="cert">9262</td> + <td class="ai">Galena State Bank and Trust Company</td> + <td class="closing">July 2, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="rockriver.html">Rock River Bank</a></td> + <td class="city">Oregon</td> + <td class="state">IL</td> + <td class="cert">15302</td> + <td class="ai">The Harvard State Bank</td> + <td class="closing">July 2, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="winchester.html">First State Bank of Winchester</a></td> + <td class="city">Winchester</td> + <td class="state">IL</td> + <td class="cert">11710</td> + <td class="ai">The First National Bank of Beardstown</td> + <td class="closing">July 2, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="warner.html">John Warner Bank</a></td> + <td class="city">Clinton</td> + <td class="state">IL</td> + <td class="cert">12093</td> + <td class="ai">State Bank of Lincoln</td> + <td class="closing">July 2, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="mirae.html">Mirae Bank</a></td> + <td class="city">Los Angeles</td> + <td class="state">CA</td> + <td class="cert">57332</td> + <td class="ai">Wilshire State Bank</td> + <td class="closing">June 26, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="metropacific.html">MetroPacific Bank</a></td> + <td class="city">Irvine</td> + <td class="state">CA</td> + <td class="cert">57893</td> + <td class="ai">Sunwest Bank</td> + <td class="closing">June 26, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="horizon.html">Horizon Bank</a></td> + <td class="city">Pine City</td> + <td class="state">MN</td> + <td class="cert">9744</td> + <td class="ai">Stearns Bank, N.A.</td> + <td class="closing">June 26, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="neighbor.html">Neighborhood Community Bank</a></td> + <td class="city">Newnan</td> + <td class="state">GA</td> + <td class="cert">35285</td> + <td class="ai">CharterBank</td> + <td class="closing">June 26, 2009</td> + <td class="updated">August 20, 2012</td> + </tr> + <tr> + <td class="institution"><a href="communityga.html">Community Bank of West Georgia</a></td> + <td class="city">Villa Rica</td> + <td class="state">GA</td> + <td class="cert">57436</td> + <td class="ai">No Acquirer</td> + <td class="closing">June 26, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="anthony.html">First National Bank of Anthony</a></td> + <td class="city">Anthony</td> + <td class="state">KS</td> + <td class="cert">4614</td> + <td class="ai">Bank of Kansas</td> + <td class="closing">June 19, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cooperative.html">Cooperative Bank</a></td> + <td class="city">Wilmington</td> + <td class="state">NC</td> + <td class="cert">27837</td> + <td class="ai">First Bank</td> + <td class="closing">June 19, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="scb.html">Southern Community Bank</a></td> + <td class="city">Fayetteville</td> + <td class="state">GA</td> + <td class="cert">35251</td> + <td class="ai">United Community Bank</td> + <td class="closing">June 19, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="lincolnwood.html">Bank of Lincolnwood</a></td> + <td class="city">Lincolnwood</td> + <td class="state">IL</td> + <td class="cert">17309</td> + <td class="ai">Republic Bank of Chicago</td> + <td class="closing">June 5, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="citizensnational.html">Citizens National Bank</a></td> + <td class="city">Macomb</td> + <td class="state">IL</td> + <td class="cert">5757</td> + <td class="ai">Morton Community Bank</td> + <td class="closing">May 22, 2009</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="strategiccapital.html">Strategic Capital Bank</a></td> + <td class="city">Champaign</td> + <td class="state">IL</td> + <td class="cert">35175</td> + <td class="ai">Midland States Bank</td> + <td class="closing">May 22, 2009</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="bankunited.html">BankUnited, FSB</a></td> + <td class="city">Coral Gables</td> + <td class="state">FL</td> + <td class="cert">32247</td> + <td class="ai">BankUnited</td> + <td class="closing">May 21, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="westsound.html">Westsound Bank</a></td> + <td class="city">Bremerton</td> + <td class="state">WA</td> + <td class="cert">34843</td> + <td class="ai">Kitsap Bank</td> + <td class="closing">May 8, 2009</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="americawest.html">America West Bank</a></td> + <td class="city">Layton</td> + <td class="state">UT</td> + <td class="cert">35461</td> + <td class="ai">Cache Valley Bank</td> + <td class="closing">May 1, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="citizens.html">Citizens Community Bank</a></td> + <td class="city">Ridgewood</td> + <td class="state">NJ</td> + <td class="cert">57563</td> + <td class="ai">North Jersey Community Bank</td> + <td class="closing">May 1, 2009</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="silverton.html">Silverton Bank, NA</a></td> + <td class="city">Atlanta</td> + <td class="state">GA</td> + <td class="cert">26535</td> + <td class="ai">No Acquirer</td> + <td class="closing">May 1, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstbankidaho.html">First Bank of Idaho</a></td> + <td class="city">Ketchum</td> + <td class="state">ID</td> + <td class="cert">34396</td> + <td class="ai">U.S. Bank, N.A.</td> + <td class="closing">April 24, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="beverlyhills.html">First Bank of Beverly Hills</a></td> + <td class="city">Calabasas</td> + <td class="state">CA</td> + <td class="cert">32069</td> + <td class="ai">No Acquirer</td> + <td class="closing">April 24, 2009</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="michiganheritage.html">Michigan Heritage Bank</a></td> + <td class="city">Farmington Hills</td> + <td class="state">MI</td> + <td class="cert">34369</td> + <td class="ai">Level One Bank</td> + <td class="closing">April 24, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="amsouthern.html">American Southern Bank</a></td> + <td class="city">Kennesaw</td> + <td class="state">GA</td> + <td class="cert">57943</td> + <td class="ai">Bank of North Georgia</td> + <td class="closing">April 24, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="greatbasin.html">Great Basin Bank of Nevada</a></td> + <td class="city">Elko</td> + <td class="state">NV</td> + <td class="cert">33824</td> + <td class="ai">Nevada State Bank</td> + <td class="closing">April 17, 2009</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="amsterling.html">American Sterling Bank</a></td> + <td class="city">Sugar Creek</td> + <td class="state">MO</td> + <td class="cert">8266</td> + <td class="ai">Metcalf Bank</td> + <td class="closing">April 17, 2009</td> + <td class="updated">August 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="newfrontier.html">New Frontier Bank</a></td> + <td class="city">Greeley</td> + <td class="state">CO</td> + <td class="cert">34881</td> + <td class="ai">No Acquirer</td> + <td class="closing">April 10, 2009</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="capefear.html">Cape Fear Bank</a></td> + <td class="city">Wilmington</td> + <td class="state">NC</td> + <td class="cert">34639</td> + <td class="ai">First Federal Savings and Loan Association</td> + <td class="closing">April 10, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="omni.html">Omni National Bank</a></td> + <td class="city">Atlanta</td> + <td class="state">GA</td> + <td class="cert">22238</td> + <td class="ai">No Acquirer</td> + <td class="closing">March 27, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="teambank.html">TeamBank, NA</a></td> + <td class="city">Paola</td> + <td class="state">KS</td> + <td class="cert">4754</td> + <td class="ai">Great Southern Bank</td> + <td class="closing">March 20, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="coloradonational.html">Colorado National Bank</a></td> + <td class="city">Colorado Springs</td> + <td class="state">CO</td> + <td class="cert">18896</td> + <td class="ai">Herring Bank</td> + <td class="closing">March 20, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstcity.html">FirstCity Bank</a></td> + <td class="city">Stockbridge</td> + <td class="state">GA</td> + <td class="cert">18243</td> + <td class="ai">No Acquirer</td> + <td class="closing">March 20, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="freedomga.html">Freedom Bank of Georgia</a></td> + <td class="city">Commerce</td> + <td class="state">GA</td> + <td class="cert">57558</td> + <td class="ai">Northeast Georgia Bank</td> + <td class="closing">March 6, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="securitysavings.html">Security Savings Bank</a></td> + <td class="city">Henderson</td> + <td class="state">NV</td> + <td class="cert">34820</td> + <td class="ai">Bank of Nevada</td> + <td class="closing">February 27, 2009</td> + <td class="updated">September 7, 2012</td> + </tr> + <tr> + <td class="institution"><a href="heritagebank.html">Heritage Community Bank</a></td> + <td class="city">Glenwood</td> + <td class="state">IL</td> + <td class="cert">20078</td> + <td class="ai">MB Financial Bank, N.A.</td> + <td class="closing">February 27, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="silverfalls.html">Silver Falls Bank</a></td> + <td class="city">Silverton</td> + <td class="state">OR</td> + <td class="cert">35399</td> + <td class="ai">Citizens Bank</td> + <td class="closing">February 20, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="pinnacle.html">Pinnacle Bank of Oregon</a></td> + <td class="city">Beaverton</td> + <td class="state">OR</td> + <td class="cert">57342</td> + <td class="ai">Washington Trust Bank of Spokane</td> + <td class="closing">February 13, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="cornbelt.html">Corn Belt Bank &amp; Trust Co.</a></td> + <td class="city">Pittsfield</td> + <td class="state">IL</td> + <td class="cert">16500</td> + <td class="ai">The Carlinville National Bank</td> + <td class="closing">February 13, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="riverside.html">Riverside Bank of the Gulf Coast</a></td> + <td class="city">Cape Coral</td> + <td class="state">FL</td> + <td class="cert">34563</td> + <td class="ai">TIB Bank</td> + <td class="closing">February 13, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sherman.html">Sherman County Bank</a></td> + <td class="city">Loup City</td> + <td class="state">NE</td> + <td class="cert">5431</td> + <td class="ai">Heritage Bank</td> + <td class="closing">February 13, 2009</td> + <td class="updated">August 17, 2012</td> + </tr> + <tr> + <td class="institution"><a href="county.html">County Bank</a></td> + <td class="city">Merced</td> + <td class="state">CA</td> + <td class="cert">22574</td> + <td class="ai">Westamerica Bank</td> + <td class="closing">February 6, 2009</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="alliance.html">Alliance Bank</a></td> + <td class="city">Culver City</td> + <td class="state">CA</td> + <td class="cert">23124</td> + <td class="ai">California Bank &amp; Trust</td> + <td class="closing">February 6, 2009</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstbank.html">FirstBank Financial Services</a></td> + <td class="city">McDonough</td> + <td class="state">GA</td> + <td class="cert">57017</td> + <td class="ai">Regions Bank</td> + <td class="closing">February 6, 2009</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="ocala.html">Ocala National Bank</a></td> + <td class="city">Ocala</td> + <td class="state">FL</td> + <td class="cert">26538</td> + <td class="ai">CenterState Bank of Florida, N.A.</td> + <td class="closing">January 30, 2009</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="suburban.html">Suburban FSB</a></td> + <td class="city">Crofton</td> + <td class="state">MD</td> + <td class="cert">30763</td> + <td class="ai">Bank of Essex</td> + <td class="closing">January 30, 2009</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="magnet.html">MagnetBank</a></td> + <td class="city">Salt Lake City</td> + <td class="state">UT</td> + <td class="cert">58001</td> + <td class="ai">No Acquirer</td> + <td class="closing">January 30, 2009</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="centennial.html">1st Centennial Bank</a></td> + <td class="city">Redlands</td> + <td class="state">CA</td> + <td class="cert">33025</td> + <td class="ai">First California Bank</td> + <td class="closing">January 23, 2009</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="clark.html">Bank of Clark County</a></td> + <td class="city">Vancouver</td> + <td class="state">WA</td> + <td class="cert">34959</td> + <td class="ai">Umpqua Bank</td> + <td class="closing">January 16, 2009</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="commerce.html">National Bank of Commerce</a></td> + <td class="city">Berkeley</td> + <td class="state">IL</td> + <td class="cert">19733</td> + <td class="ai">Republic Bank of Chicago</td> + <td class="closing">January 16, 2009</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sanderson.html">Sanderson State Bank</a><br><a href="sanderson_spanish.html">En Espanol</a></td> + <td class="city">Sanderson</td> + <td class="state">TX</td> + <td class="cert">11568</td> + <td class="ai">The Pecos County State Bank</td> + <td class="closing">December 12, 2008</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="haventrust.html">Haven Trust Bank</a></td> + <td class="city">Duluth</td> + <td class="state">GA</td> + <td class="cert">35379</td> + <td class="ai">Branch Banking &amp; Trust Company, (BB&amp;T)</td> + <td class="closing">December 12, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstga.html">First Georgia Community Bank</a></td> + <td class="city">Jackson</td> + <td class="state">GA</td> + <td class="cert">34301</td> + <td class="ai">United Bank</td> + <td class="closing">December 5, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="pff.html">PFF Bank &amp; Trust</a></td> + <td class="city">Pomona</td> + <td class="state">CA</td> + <td class="cert">28344</td> + <td class="ai">U.S. Bank, N.A.</td> + <td class="closing">November 21, 2008</td> + <td class="updated">January 4, 2013</td> + </tr> + <tr> + <td class="institution"><a href="downey.html">Downey Savings &amp; Loan</a></td> + <td class="city">Newport Beach</td> + <td class="state">CA</td> + <td class="cert">30968</td> + <td class="ai">U.S. Bank, N.A.</td> + <td class="closing">November 21, 2008</td> + <td class="updated">January 4, 2013</td> + </tr> + <tr> + <td class="institution"><a href="community.html">Community Bank</a></td> + <td class="city">Loganville</td> + <td class="state">GA</td> + <td class="cert">16490</td> + <td class="ai">Bank of Essex</td> + <td class="closing">November 21, 2008</td> + <td class="updated">September 4, 2012</td> + </tr> + <tr> + <td class="institution"><a href="securitypacific.html">Security Pacific Bank</a></td> + <td class="city">Los Angeles</td> + <td class="state">CA</td> + <td class="cert">23595</td> + <td class="ai">Pacific Western Bank</td> + <td class="closing">November 7, 2008</td> + <td class="updated">August 28, 2012</td> + </tr> + <tr> + <td class="institution"><a href="franklinbank.html">Franklin Bank, SSB</a></td> + <td class="city">Houston</td> + <td class="state">TX</td> + <td class="cert">26870</td> + <td class="ai">Prosperity Bank</td> + <td class="closing">November 7, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="freedom.html">Freedom Bank</a></td> + <td class="city">Bradenton</td> + <td class="state">FL</td> + <td class="cert">57930</td> + <td class="ai">Fifth Third Bank</td> + <td class="closing">October 31, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="alpha.html">Alpha Bank &amp; Trust</a></td> + <td class="city">Alpharetta</td> + <td class="state">GA</td> + <td class="cert">58241</td> + <td class="ai">Stearns Bank, N.A.</td> + <td class="closing">October 24, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="meridian.html">Meridian Bank</a></td> + <td class="city">Eldred</td> + <td class="state">IL</td> + <td class="cert">13789</td> + <td class="ai">National Bank</td> + <td class="closing">October 10, 2008</td> + <td class="updated">May 31, 2012</td> + </tr> + <tr> + <td class="institution"><a href="mainstreet.html">Main Street Bank</a></td> + <td class="city">Northville</td> + <td class="state">MI</td> + <td class="cert">57654</td> + <td class="ai">Monroe Bank &amp; Trust</td> + <td class="closing">October 10, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="wamu.html">Washington Mutual Bank<br>(Including its subsidiary Washington Mutual Bank FSB)</a></td> + <td class="city">Henderson</td> + <td class="state">NV</td> + <td class="cert">32633</td> + <td class="ai">JP Morgan Chase Bank</td> + <td class="closing">September 25, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="ameribank.html">Ameribank</a></td> + <td class="city">Northfork</td> + <td class="state">WV</td> + <td class="cert">6782</td> + <td class="ai">The Citizens Savings Bank<br><br>Pioneer Community Bank, Inc.</td> + <td class="closing">September 19, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="silverstate.html">Silver State Bank</a><br><a href="silverstatesp.html">En Espanol</a></td> + <td class="city">Henderson</td> + <td class="state">NV</td> + <td class="cert">34194</td> + <td class="ai">Nevada State Bank</td> + <td class="closing">September 5, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="integrity.html">Integrity Bank</a></td> + <td class="city">Alpharetta</td> + <td class="state">GA</td> + <td class="cert">35469</td> + <td class="ai">Regions Bank</td> + <td class="closing">August 29, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="columbian.html">Columbian Bank &amp; Trust</a></td> + <td class="city">Topeka</td> + <td class="state">KS</td> + <td class="cert">22728</td> + <td class="ai">Citizens Bank &amp; Trust</td> + <td class="closing">August 22, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="firstprioritybank.html">First Priority Bank</a></td> + <td class="city">Bradenton</td> + <td class="state">FL</td> + <td class="cert">57523</td> + <td class="ai">SunTrust Bank</td> + <td class="closing">August 1, 2008</td> + <td class="updated">August 16, 2012</td> + </tr> + <tr> + <td class="institution"><a href="heritage.html">First Heritage Bank, NA</a></td> + <td class="city">Newport Beach</td> + <td class="state">CA</td> + <td class="cert">57961</td> + <td class="ai">Mutual of Omaha Bank</td> + <td class="closing">July 25, 2008</td> + <td class="updated">August 28, 2012</td> + </tr> + <tr> + <td class="institution"><a href="fnbnv.html">First National Bank of Nevada</a></td> + <td class="city">Reno</td> + <td class="state">NV</td> + <td class="cert">27011</td> + <td class="ai">Mutual of Omaha Bank</td> + <td class="closing">July 25, 2008</td> + <td class="updated">August 28, 2012</td> + </tr> + <tr> + <td class="institution"><a href="IndyMac.html">IndyMac Bank</a></td> + <td class="city">Pasadena</td> + <td class="state">CA</td> + <td class="cert">29730</td> + <td class="ai">OneWest Bank, FSB</td> + <td class="closing">July 11, 2008</td> + <td class="updated">August 28, 2012</td> + </tr> + <tr> + <td class="institution"><a href="first_integrity_bank.html">First Integrity Bank, NA</a></td> + <td class="city">Staples</td> + <td class="state">MN</td> + <td class="cert">12736</td> + <td class="ai">First International Bank and Trust</td> + <td class="closing">May 30, 2008</td> + <td class="updated">August 28, 2012</td> + </tr> + <tr> + <td class="institution"><a href="anb.html">ANB Financial, NA</a></td> + <td class="city">Bentonville</td> + <td class="state">AR</td> + <td class="cert">33901</td> + <td class="ai">Pulaski Bank and Trust Company</td> + <td class="closing">May 9, 2008</td> + <td class="updated">August 28, 2012</td> + </tr> + <tr> + <td class="institution"><a href="Hume.html">Hume Bank</a></td> + <td class="city">Hume</td> + <td class="state">MO</td> + <td class="cert">1971</td> + <td class="ai">Security Bank</td> + <td class="closing">March 7, 2008</td> + <td class="updated">August 28, 2012</td> + </tr> + <tr> + <td class="institution"><a href="Douglass.html">Douglass National Bank</a></td> + <td class="city">Kansas City</td> + <td class="state">MO</td> + <td class="cert">24660</td> + <td class="ai">Liberty Bank and Trust Company</td> + <td class="closing">January 25, 2008</td> + <td class="updated">October 26, 2012</td> + </tr> + <tr> + <td class="institution"><a href="MiamiValley.html">Miami Valley Bank</a></td> + <td class="city">Lakeview</td> + <td class="state">OH</td> + <td class="cert">16848</td> + <td class="ai">The Citizens Banking Company</td> + <td class="closing">October 4, 2007</td> + <td class="updated">August 28, 2012</td> + </tr> + <tr> + <td class="institution"><a href="NetBank.html">NetBank</a></td> + <td class="city">Alpharetta</td> + <td class="state">GA</td> + <td class="cert">32575</td> + <td class="ai">ING DIRECT</td> + <td class="closing">September 28, 2007</td> + <td class="updated">August 28, 2012</td> + </tr> + <tr> + <td class="institution"><a href="MetropolitanSB.html">Metropolitan Savings Bank</a></td> + <td class="city">Pittsburgh</td> + <td class="state">PA</td> + <td class="cert">35353</td> + <td class="ai">Allegheny Valley Bank of Pittsburgh</td> + <td class="closing">February 2, 2007</td> + <td class="updated">October 27, 2010</td> + </tr> + <tr> + <td class="institution"><a href="ephraim.html">Bank of Ephraim</a></td> + <td class="city">Ephraim</td> + <td class="state">UT</td> + <td class="cert">1249</td> + <td class="ai">Far West Bank</td> + <td class="closing">June 25, 2004</td> + <td class="updated">April 9, 2008</td> + </tr> + <tr> + <td class="institution"><a href="reliance.html">Reliance Bank</a></td> + <td class="city">White Plains</td> + <td class="state">NY</td> + <td class="cert">26778</td> + <td class="ai">Union State Bank</td> + <td class="closing">March 19, 2004</td> + <td class="updated">April 9, 2008</td> + </tr> + <tr> + <td class="institution"><a href="gnb.html">Guaranty National Bank of Tallahassee</a></td> + <td class="city">Tallahassee</td> + <td class="state">FL</td> + <td class="cert">26838</td> + <td class="ai">Hancock Bank of Florida</td> + <td class="closing">March 12, 2004</td> + <td class="updated">June 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="dollar.html">Dollar Savings Bank</a></td> + <td class="city">Newark</td> + <td class="state">NJ</td> + <td class="cert">31330</td> + <td class="ai">No Acquirer</td> + <td class="closing">February 14, 2004</td> + <td class="updated">April 9, 2008</td> + </tr> + <tr> + <td class="institution"><a href="pulaski.html">Pulaski Savings Bank</a></td> + <td class="city">Philadelphia</td> + <td class="state">PA</td> + <td class="cert">27203</td> + <td class="ai">Earthstar Bank</td> + <td class="closing">November 14, 2003</td> + <td class="updated">July 22, 2005</td> + </tr> + <tr> + <td class="institution"><a href="blanchardville.html">First National Bank of Blanchardville</a></td> + <td class="city">Blanchardville</td> + <td class="state">WI</td> + <td class="cert">11639</td> + <td class="ai">The Park Bank</td> + <td class="closing">May 9, 2003</td> + <td class="updated">June 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="spbank.html">Southern Pacific Bank</a></td> + <td class="city">Torrance</td> + <td class="state">CA</td> + <td class="cert">27094</td> + <td class="ai">Beal Bank</td> + <td class="closing">February 7, 2003</td> + <td class="updated">October 20, 2008</td> + </tr> + <tr> + <td class="institution"><a href="farmers.html">Farmers Bank of Cheneyville</a></td> + <td class="city">Cheneyville</td> + <td class="state">LA</td> + <td class="cert">16445</td> + <td class="ai">Sabine State Bank &amp; Trust</td> + <td class="closing">December 17, 2002</td> + <td class="updated">October 20, 2004</td> + </tr> + <tr> + <td class="institution"><a href="bankofalamo.html">Bank of Alamo</a></td> + <td class="city">Alamo</td> + <td class="state">TN</td> + <td class="cert">9961</td> + <td class="ai">No Acquirer</td> + <td class="closing">November 8, 2002</td> + <td class="updated">March 18, 2005</td> + </tr> + <tr> + <td class="institution"><a href="amtrade.html">AmTrade International Bank</a><br><a href="amtrade-spanish.html">En Espanol</a></td> + <td class="city">Atlanta</td> + <td class="state">GA</td> + <td class="cert">33784</td> + <td class="ai">No Acquirer</td> + <td class="closing">September 30, 2002</td> + <td class="updated">September 11, 2006</td> + </tr> + <tr> + <td class="institution"><a href="universal.html">Universal Federal Savings Bank</a></td> + <td class="city">Chicago</td> + <td class="state">IL</td> + <td class="cert">29355</td> + <td class="ai">Chicago Community Bank</td> + <td class="closing">June 27, 2002</td> + <td class="updated">April 9, 2008</td> + </tr> + <tr> + <td class="institution"><a href="cbc.html">Connecticut Bank of Commerce</a></td> + <td class="city">Stamford</td> + <td class="state">CT</td> + <td class="cert">19183</td> + <td class="ai">Hudson United Bank</td> + <td class="closing">June 26, 2002</td> + <td class="updated">February 14, 2012</td> + </tr> + <tr> + <td class="institution"><a href="newcentury.html">New Century Bank</a></td> + <td class="city">Shelby Township</td> + <td class="state">MI</td> + <td class="cert">34979</td> + <td class="ai">No Acquirer</td> + <td class="closing">March 28, 2002</td> + <td class="updated">March 18, 2005</td> + </tr> + <tr> + <td class="institution"><a href="netfirst.html">Net 1st National Bank</a></td> + <td class="city">Boca Raton</td> + <td class="state">FL</td> + <td class="cert">26652</td> + <td class="ai">Bank Leumi USA</td> + <td class="closing">March 1, 2002</td> + <td class="updated">April 9, 2008</td> + </tr> + <tr> + <td class="institution"><a href="nextbank.html">NextBank, NA</a></td> + <td class="city">Phoenix</td> + <td class="state">AZ</td> + <td class="cert">22314</td> + <td class="ai">No Acquirer</td> + <td class="closing">February 7, 2002</td> + <td class="updated">August 27, 2010</td> + </tr> + <tr> + <td class="institution"><a href="Oakwood.html">Oakwood Deposit Bank Co.</a></td> + <td class="city">Oakwood</td> + <td class="state">OH</td> + <td class="cert">8966</td> + <td class="ai">The State Bank &amp; Trust Company</td> + <td class="closing">February 1, 2002</td> + <td class="updated">October 25, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sierrablanca.html">Bank of Sierra Blanca</a></td> + <td class="city">Sierra Blanca</td> + <td class="state">TX</td> + <td class="cert">22002</td> + <td class="ai">The Security State Bank of Pecos</td> + <td class="closing">January 18, 2002</td> + <td class="updated">November 6, 2003</td> + </tr> + <tr> + <td class="institution"><a href="hamilton.html">Hamilton Bank, NA</a><br><a href="hamilton-spanish.html">En Espanol</a></td> + <td class="city">Miami</td> + <td class="state">FL</td> + <td class="cert">24382</td> + <td class="ai">Israel Discount Bank of New York</td> + <td class="closing">January 11, 2002</td> + <td class="updated">June 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="sinclair.html">Sinclair National Bank</a></td> + <td class="city">Gravette</td> + <td class="state">AR</td> + <td class="cert">34248</td> + <td class="ai">Delta Trust &amp; Bank</td> + <td class="closing">September 7, 2001</td> + <td class="updated">February 10, 2004</td> + </tr> + <tr> + <td class="institution"><a href="superior.html">Superior Bank, FSB</a></td> + <td class="city">Hinsdale</td> + <td class="state">IL</td> + <td class="cert">32646</td> + <td class="ai">Superior Federal, FSB</td> + <td class="closing">July 27, 2001</td> + <td class="updated">June 5, 2012</td> + </tr> + <tr> + <td class="institution"><a href="Malta.html">Malta National Bank</a></td> + <td class="city">Malta</td> + <td class="state">OH</td> + <td class="cert">6629</td> + <td class="ai">North Valley Bank</td> + <td class="closing">May 3, 2001</td> + <td class="updated">November 18, 2002</td> + </tr> + <tr> + <td class="institution"><a href="firstalliance.html">First Alliance Bank &amp; Trust Co.</a></td> + <td class="city">Manchester</td> + <td class="state">NH</td> + <td class="cert">34264</td> + <td class="ai">Southern New Hampshire Bank &amp; Trust</td> + <td class="closing">February 2, 2001</td> + <td class="updated">February 18, 2003</td> + </tr> + <tr> + <td class="institution"><a href="nsb.html">National State Bank of Metropolis</a></td> + <td class="city">Metropolis</td> + <td class="state">IL</td> + <td class="cert">3815</td> + <td class="ai">Banterra Bank of Marion</td> + <td class="closing">December 14, 2000</td> + <td class="updated">March 17, 2005</td> + </tr> + <tr> + <td class="institution"><a href="boh.html">Bank of Honolulu</a></td> + <td class="city">Honolulu</td> + <td class="state">HI</td> + <td class="cert">21029</td> + <td class="ai">Bank of the Orient</td> + <td class="closing">October 13, 2000</td> + <td class="updated">March 17, 2005</td> + </tr> + </tbody> + </table> + </div> + +</div> +<div id="page_foot"> + <div class="date">Last Updated 05/31/2013</div> + <div class="email"><a href="mailto:cservicefdicdal@fdic.gov">cservicefdicdal@fdic.gov</a></div> + <div class="clear"></div> +</div> + +<!-- START of Footer --> +<footer> +<link rel="stylesheet" type="text/css" href="/responsive/footer/css/footer.css" /> +<div id="responsive_footer"> + <div id="responsive_footer-full"> + <ul> + <li><a href="/" title="Home">Home</a></li> + <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li> + <li><a href="/search/" title="Search">Search</a></li> + <li><a href="/help/" title="Help">Help</a></li> + <li><a href="/sitemap/" title="SiteMap">SiteMap</a></li> + <li><a href="/regulations/laws/forms/" title="Forms">Forms</a></li> + <li><a href="/quicklinks/spanish.html" title="En Espa&ntilde;ol">En Espa&ntilde;ol</a></li> + </ul> + <hr> + <ul> + <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li> + <li><a href="/about/privacy/policy/" title="Privacy Policy">Privacy Policy</a></li> + <li><a href="/plainlanguage/" title="Privacy Policy">Plain Writing Act of 2010 </a></li> + <li><a href="http://www.usa.gov/" title="USA.gov">USA.gov</a></li> + <li><a href="http://www.fdicoig.gov/" title="FDIC Office of Inspector General">FDIC Office of Inspector General</a></li> + </ul> + <hr> + <ul> + <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li> + <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a></li> + <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li> + </ul> + </div> + <div id="responsive_footer-small"> + <ul> + <li><a href="/" title="Home">Home</a></li> + <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li> + <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li> + <li><a href="/search/" title="Search">Search</a></li> + </ul> + </div> +</div> +</footer> +<!-- START Omniture SiteCatalyst Code --> +<script language="JavaScript" type="text/javascript" src="/js/s_code_v1.js"></script> +<script type="text/javascript"> +/************* DO NOT ALTER ANYTHING BELOW THIS LINE ! **************/ +var s_code=s.t();if(s_code)document.write(s_code)</script> +<script type="text/javascript"> +if(navigator.appVersion.indexOf('MSIE')>=0)document.write(unescape('%3C')+'\!-'+'-') +</script> +<noscript> +<a href="http://www.omniture.com" title="Web Analytics"> +<img src="http://fdic.122.2o7.net/b/ss/fdicgovprod/1/H.21--NS/0?[AQB]%26cl=Session%26AQE" height="1" width="1" border="0" alt="" /></a></li> +</noscript> +<!--/DO NOT REMOVE/--> +<!-- END Omniture SiteCatalyst Code --> +<!-- END of Footer --> + +<script type="text/javascript" src="/responsive/js/jquery.tablesorter.js"></script> +<script type="text/javascript" src="banklist.js"></script> + +</body> +</html> diff --git a/pandas/io/tests/data/spam.html b/pandas/io/tests/data/spam.html index 9f6ac2d74e0c9..935b39f6d6011 100644 --- a/pandas/io/tests/data/spam.html +++ b/pandas/io/tests/data/spam.html @@ -204,574 +204,574 @@ <h1>Nutrient data for 07908, Luncheon meat, pork with ham, minced, canned, inclu <p style="font-style:italic;font-size:.8em">Nutrient values and weights are for edible portion</p> - - <table> - <thead> - - <tr><td colspan="6" style="vertical-align:middle;text-align:center;height:2em;" class="buttons"><input type="submit" name="_action_show" value="Apply Changes" class="calc" title="Click to recalculate measures" id="1732" /><a href="/ndb/help/contextHelp/measures" onclick="jQuery.ajax({type:'POST', url:'/ndb/help/contextHelp/measures',success:function(data,textStatus){jQuery('#helpDiv').html(data);},error:function(XMLHttpRequest,textStatus,errorThrown){},complete:function(XMLHttpRequest,textStatus){GRAILSUI.measuresHelpDialog.show();}});return false;" controller="help" action="contextHelp" id="measures"><img title="Click for more information on calculating household measures" src="/ndb/static/images/skin/help.png" alt="Help" border="0" style="vertical-align:middle"/></a></span></td></tr> - <th style="vertical-align:middle">Nutrient</th> - <th style="vertical-align:middle" >Unit</th> - <th style="vertical-align:middle"><input type="text" name="Qv" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="1" id="Qv" /><br/>Value per 100.0g</th> - - - - - <th style="width:130px;line-height:1.2em;text-align:center"> - <input type="text" name="Q3483" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="2.0" id="Q3483" /> - <br> - - oz 1 NLEA serving - <br>56g - <!-- - --> - </th> - - </thead> - <tbody> - - <tr class="even" > - <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Proximates</td> - </tr> - - - <tr class="odd"> - <td >Water - - - </td> - - <td style="text-align:center;">g</td> - <td style="text-align:right;">51.70</td> - - - <td style="text-align:right;">28.95</td> - - - </tr> - - - <tr class="even"> - <td >Energy - - - </td> - - <td style="text-align:center;">kcal</td> - <td style="text-align:right;">315</td> - - - <td style="text-align:right;">176</td> - - - </tr> - - - <tr class="odd"> - <td >Protein - - - </td> - - <td style="text-align:center;">g</td> - <td style="text-align:right;">13.40</td> - - - <td style="text-align:right;">7.50</td> - - - </tr> - - - <tr class="even"> - <td >Total lipid (fat) - - - </td> - - <td style="text-align:center;">g</td> - <td style="text-align:right;">26.60</td> - - - <td style="text-align:right;">14.90</td> - - - </tr> - - - <tr class="odd"> - <td >Carbohydrate, by difference - - - </td> - - <td style="text-align:center;">g</td> - <td style="text-align:right;">4.60</td> - - - <td style="text-align:right;">2.58</td> - - - </tr> - - - <tr class="even"> - <td >Fiber, total dietary - - - </td> - - <td style="text-align:center;">g</td> - <td style="text-align:right;">0.0</td> - - - <td style="text-align:right;">0.0</td> - - - </tr> - - - <tr class="odd"> - <td >Sugars, total - - - </td> - - <td style="text-align:center;">g</td> - <td style="text-align:right;">0.00</td> - - - <td style="text-align:right;">0.00</td> - - - </tr> - - - - <tr class="even" > - <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Minerals</td> - </tr> - - - <tr class="odd"> - <td >Calcium, Ca - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">0</td> - - - <td style="text-align:right;">0</td> - - - </tr> - - - <tr class="even"> - <td >Iron, Fe - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">0.64</td> - - - <td style="text-align:right;">0.36</td> - - - </tr> - - - <tr class="odd"> - <td >Magnesium, Mg - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">14</td> - - - <td style="text-align:right;">8</td> - - - </tr> - - - <tr class="even"> - <td >Phosphorus, P - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">151</td> - - - <td style="text-align:right;">85</td> - - - </tr> - - - <tr class="odd"> - <td >Potassium, K - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">409</td> - - - <td style="text-align:right;">229</td> - - - </tr> - - - <tr class="even"> - <td >Sodium, Na - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">1411</td> - - - <td style="text-align:right;">790</td> - - - </tr> - - - <tr class="odd"> - <td >Zinc, Zn - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">1.59</td> - - - <td style="text-align:right;">0.89</td> - - - </tr> - - - - <tr class="even" > - <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Vitamins</td> - </tr> - - - <tr class="odd"> - <td >Vitamin C, total ascorbic acid - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">0.0</td> - - - <td style="text-align:right;">0.0</td> - - - </tr> - - - <tr class="even"> - <td >Thiamin - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">0.317</td> - - - <td style="text-align:right;">0.178</td> - - - </tr> - - - <tr class="odd"> - <td >Riboflavin - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">0.176</td> - - - <td style="text-align:right;">0.099</td> - - - </tr> - - - <tr class="even"> - <td >Niacin - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">3.530</td> - - - <td style="text-align:right;">1.977</td> - - - </tr> - - - <tr class="odd"> - <td >Vitamin B-6 - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">0.218</td> - - - <td style="text-align:right;">0.122</td> - - - </tr> - - - <tr class="even"> - <td >Folate, DFE - - - </td> - - <td style="text-align:center;">µg</td> - <td style="text-align:right;">3</td> - - - <td style="text-align:right;">2</td> - - - </tr> - - - <tr class="odd"> - <td >Vitamin B-12 - - - </td> - - <td style="text-align:center;">µg</td> - <td style="text-align:right;">0.45</td> - - - <td style="text-align:right;">0.25</td> - - - </tr> - - - <tr class="even"> - <td >Vitamin A, RAE - - - </td> - - <td style="text-align:center;">µg</td> - <td style="text-align:right;">0</td> - - - <td style="text-align:right;">0</td> - - - </tr> - - - <tr class="odd"> - <td >Vitamin A, IU - - - </td> - - <td style="text-align:center;">IU</td> - <td style="text-align:right;">0</td> - - - <td style="text-align:right;">0</td> - - - </tr> - - - <tr class="even"> - <td >Vitamin E (alpha-tocopherol) - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">0.42</td> - - - <td style="text-align:right;">0.24</td> - - - </tr> - - - <tr class="odd"> - <td >Vitamin D (D2 + D3) - - - </td> - - <td style="text-align:center;">µg</td> - <td style="text-align:right;">0.6</td> - - - <td style="text-align:right;">0.3</td> - - - </tr> - - - <tr class="even"> - <td >Vitamin D - - - </td> - - <td style="text-align:center;">IU</td> - <td style="text-align:right;">26</td> - - - <td style="text-align:right;">15</td> - - - </tr> - - - <tr class="odd"> - <td >Vitamin K (phylloquinone) - - - </td> - - <td style="text-align:center;">µg</td> - <td style="text-align:right;">0.0</td> - - - <td style="text-align:right;">0.0</td> - - - </tr> - - - - <tr class="even" > - <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Lipids</td> - </tr> - - - <tr class="odd"> - <td >Fatty acids, total saturated - - - </td> - - <td style="text-align:center;">g</td> - <td style="text-align:right;">9.987</td> - - - <td style="text-align:right;">5.593</td> - - - </tr> - - - <tr class="even"> - <td >Fatty acids, total monounsaturated - - - </td> - - <td style="text-align:center;">g</td> - <td style="text-align:right;">13.505</td> - - - <td style="text-align:right;">7.563</td> - - - </tr> - - - <tr class="odd"> - <td >Fatty acids, total polyunsaturated - - - </td> - - <td style="text-align:center;">g</td> - <td style="text-align:right;">2.019</td> - - - <td style="text-align:right;">1.131</td> - - - </tr> - - - <tr class="even"> - <td >Cholesterol - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">71</td> - - - <td style="text-align:right;">40</td> - - - </tr> - - - - <tr class="even" > - <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Other</td> - </tr> - - - <tr class="odd"> - <td >Caffeine - - - </td> - - <td style="text-align:center;">mg</td> - <td style="text-align:right;">0</td> - - - <td style="text-align:right;">0</td> - - - </tr> - - - - </tbody> + + <table> + <thead> + + <tr><td colspan="6" style="vertical-align:middle;text-align:center;height:2em;" class="buttons"><input type="submit" name="_action_show" value="Apply Changes" class="calc" title="Click to recalculate measures" id="1732" /><a href="/ndb/help/contextHelp/measures" onclick="jQuery.ajax({type:'POST', url:'/ndb/help/contextHelp/measures',success:function(data,textStatus){jQuery('#helpDiv').html(data);},error:function(XMLHttpRequest,textStatus,errorThrown){},complete:function(XMLHttpRequest,textStatus){GRAILSUI.measuresHelpDialog.show();}});return false;" controller="help" action="contextHelp" id="measures"><img title="Click for more information on calculating household measures" src="/ndb/static/images/skin/help.png" alt="Help" border="0" style="vertical-align:middle"/></a></span></td></tr> + <th style="vertical-align:middle">Nutrient</th> + <th style="vertical-align:middle" >Unit</th> + <th style="vertical-align:middle"><input type="text" name="Qv" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="1" id="Qv" /><br/>Value per 100.0g</th> + + + + + <th style="width:130px;line-height:1.2em;text-align:center"> + <input type="text" name="Q3483" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="2.0" id="Q3483" /> + <br> + + oz 1 NLEA serving + <br>56g + <!-- + --> + </th> + + </thead> + <tbody> + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Proximates</td> + </tr> + + + <tr class="odd"> + <td >Water + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">51.70</td> + + + <td style="text-align:right;">28.95</td> + + + </tr> + + + <tr class="even"> + <td >Energy + + + </td> + + <td style="text-align:center;">kcal</td> + <td style="text-align:right;">315</td> + + + <td style="text-align:right;">176</td> + + + </tr> + + + <tr class="odd"> + <td >Protein + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">13.40</td> + + + <td style="text-align:right;">7.50</td> + + + </tr> + + + <tr class="even"> + <td >Total lipid (fat) + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">26.60</td> + + + <td style="text-align:right;">14.90</td> + + + </tr> + + + <tr class="odd"> + <td >Carbohydrate, by difference + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">4.60</td> + + + <td style="text-align:right;">2.58</td> + + + </tr> + + + <tr class="even"> + <td >Fiber, total dietary + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">0.0</td> + + + <td style="text-align:right;">0.0</td> + + + </tr> + + + <tr class="odd"> + <td >Sugars, total + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">0.00</td> + + + <td style="text-align:right;">0.00</td> + + + </tr> + + + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Minerals</td> + </tr> + + + <tr class="odd"> + <td >Calcium, Ca + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0</td> + + + <td style="text-align:right;">0</td> + + + </tr> + + + <tr class="even"> + <td >Iron, Fe + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.64</td> + + + <td style="text-align:right;">0.36</td> + + + </tr> + + + <tr class="odd"> + <td >Magnesium, Mg + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">14</td> + + + <td style="text-align:right;">8</td> + + + </tr> + + + <tr class="even"> + <td >Phosphorus, P + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">151</td> + + + <td style="text-align:right;">85</td> + + + </tr> + + + <tr class="odd"> + <td >Potassium, K + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">409</td> + + + <td style="text-align:right;">229</td> + + + </tr> + + + <tr class="even"> + <td >Sodium, Na + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">1411</td> + + + <td style="text-align:right;">790</td> + + + </tr> + + + <tr class="odd"> + <td >Zinc, Zn + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">1.59</td> + + + <td style="text-align:right;">0.89</td> + + + </tr> + + + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Vitamins</td> + </tr> + + + <tr class="odd"> + <td >Vitamin C, total ascorbic acid + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.0</td> + + + <td style="text-align:right;">0.0</td> + + + </tr> + + + <tr class="even"> + <td >Thiamin + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.317</td> + + + <td style="text-align:right;">0.178</td> + + + </tr> + + + <tr class="odd"> + <td >Riboflavin + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.176</td> + + + <td style="text-align:right;">0.099</td> + + + </tr> + + + <tr class="even"> + <td >Niacin + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">3.530</td> + + + <td style="text-align:right;">1.977</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin B-6 + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.218</td> + + + <td style="text-align:right;">0.122</td> + + + </tr> + + + <tr class="even"> + <td >Folate, DFE + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">3</td> + + + <td style="text-align:right;">2</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin B-12 + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">0.45</td> + + + <td style="text-align:right;">0.25</td> + + + </tr> + + + <tr class="even"> + <td >Vitamin A, RAE + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">0</td> + + + <td style="text-align:right;">0</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin A, IU + + + </td> + + <td style="text-align:center;">IU</td> + <td style="text-align:right;">0</td> + + + <td style="text-align:right;">0</td> + + + </tr> + + + <tr class="even"> + <td >Vitamin E (alpha-tocopherol) + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.42</td> + + + <td style="text-align:right;">0.24</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin D (D2 + D3) + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">0.6</td> + + + <td style="text-align:right;">0.3</td> + + + </tr> + + + <tr class="even"> + <td >Vitamin D + + + </td> + + <td style="text-align:center;">IU</td> + <td style="text-align:right;">26</td> + + + <td style="text-align:right;">15</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin K (phylloquinone) + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">0.0</td> + + + <td style="text-align:right;">0.0</td> + + + </tr> + + + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Lipids</td> + </tr> + + + <tr class="odd"> + <td >Fatty acids, total saturated + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">9.987</td> + + + <td style="text-align:right;">5.593</td> + + + </tr> + + + <tr class="even"> + <td >Fatty acids, total monounsaturated + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">13.505</td> + + + <td style="text-align:right;">7.563</td> + + + </tr> + + + <tr class="odd"> + <td >Fatty acids, total polyunsaturated + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">2.019</td> + + + <td style="text-align:right;">1.131</td> + + + </tr> + + + <tr class="even"> + <td >Cholesterol + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">71</td> + + + <td style="text-align:right;">40</td> + + + </tr> + + + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Other</td> + </tr> + + + <tr class="odd"> + <td >Caffeine + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0</td> + + + <td style="text-align:right;">0</td> + + + </tr> + + + + </tbody> </table> </div>
View this pull request with [`?w=t`](https://github.com/pydata/pandas/pull/3915?w=t) added to the end of the url to see that there are not differences [aside from the .gitattributes files] - this should prevent more commits with CRLF entering the repo. I think it'd be good to merge this (especially because we can avoid this happening in the future.), though (I think) it'll mess up the history for 10 minutes to pandas and `pandas/core/expressions.py`. After this, will never need to deal with CRLF again! Yay!
https://api.github.com/repos/pandas-dev/pandas/pulls/3915
2013-06-15T15:28:08Z
2013-06-17T23:46:16Z
2013-06-17T23:46:16Z
2014-06-14T06:24:42Z
TST: Move explicit connectivity checks to decorator.
diff --git a/doc/source/release.rst b/doc/source/release.rst index 882826765d057..61cccaba44be7 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -91,6 +91,11 @@ pandas 0.11.1 integers or floats that are in an epoch unit of ``D, s, ms, us, ns``, thanks @mtkini (:issue:`3969`) (e.g. unix timestamps or epoch ``s``, with fracional seconds allowed) (:issue:`3540`) - DataFrame corr method (spearman) is now cythonized. + - Improved ``network`` test decorator to catch ``IOError`` (and therefore + ``URLError`` as well). Added ``with_connectivity_check`` decorator to allow + explicitly checking a website as a proxy for seeing if there is network + connectivity. Plus, new ``optional_args`` decorator factory for decorators. + (:issue:`3910`, :issue:`3914`) **API Changes** diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index 76c439afc452c..3202efbcef83a 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -386,6 +386,11 @@ Bug Fixes - ``read_html`` now correctly skips tests (:issue:`3741`) - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression in the ``to_replace`` argument wasn't working (:issue:`3907`) + - Improved ``network`` test decorator to catch ``IOError`` (and therefore + ``URLError`` as well). Added ``with_connectivity_check`` decorator to allow + explicitly checking a website as a proxy for seeing if there is network + connectivity. Plus, new ``optional_args`` decorator factory for decorators. + (:issue:`3910`, :issue:`3914`) See the :ref:`full release notes <release>` or issue tracker diff --git a/pandas/io/tests/test_fred.py b/pandas/io/tests/test_fred.py index 00a90ec3da402..cd52dca507841 100644 --- a/pandas/io/tests/test_fred.py +++ b/pandas/io/tests/test_fred.py @@ -8,7 +8,7 @@ import pandas.io.data as web from pandas.util.testing import (network, assert_frame_equal, assert_series_equal, - assert_almost_equal) + assert_almost_equal, with_connectivity_check) from numpy.testing.decorators import slow import urllib2 @@ -17,7 +17,7 @@ class TestFred(unittest.TestCase): @slow - @network + @with_connectivity_check("http://www.google.com") def test_fred(self): """ Throws an exception when DataReader can't get a 200 response from @@ -26,22 +26,14 @@ def test_fred(self): start = datetime(2010, 1, 1) end = datetime(2013, 01, 27) - try: - self.assertEquals( - web.DataReader("GDP", "fred", start, end)['GDP'].tail(1), - 16004.5) + self.assertEquals( + web.DataReader("GDP", "fred", start, end)['GDP'].tail(1), + 16004.5) - self.assertRaises( - Exception, - lambda: web.DataReader("NON EXISTENT SERIES", 'fred', - start, end)) - except urllib2.URLError: - try: - urllib2.urlopen('http://google.com') - except urllib2.URLError: - raise nose.SkipTest - else: - raise + self.assertRaises( + Exception, + lambda: web.DataReader("NON EXISTENT SERIES", 'fred', + start, end)) @slow @network diff --git a/pandas/io/tests/test_ga.py b/pandas/io/tests/test_ga.py index 5fa2120090025..d2061a6d0b57a 100644 --- a/pandas/io/tests/test_ga.py +++ b/pandas/io/tests/test_ga.py @@ -1,26 +1,26 @@ +import os import unittest -import nose from datetime import datetime +import nose import pandas as pd -import pandas.core.common as com from pandas import DataFrame -from pandas.util.testing import network, assert_frame_equal +from pandas.util.testing import network, assert_frame_equal, with_connectivity_check from numpy.testing.decorators import slow +try: + import httplib2 + from pandas.io.ga import GAnalytics, read_ga + from pandas.io.auth import AuthenticationConfigError, reset_token_store + from pandas.io import auth +except ImportError: + raise nose.SkipTest class TestGoogle(unittest.TestCase): _multiprocess_can_split_ = True def test_remove_token_store(self): - import os - try: - import pandas.io.auth as auth - from pandas.io.ga import reset_token_store - except ImportError: - raise nose.SkipTest - auth.DEFAULT_TOKEN_FILE = 'test.dat' with open(auth.DEFAULT_TOKEN_FILE, 'w') as fh: fh.write('test') @@ -31,13 +31,6 @@ def test_remove_token_store(self): @slow @network def test_getdata(self): - try: - import httplib2 - from pandas.io.ga import GAnalytics, read_ga - from pandas.io.auth import AuthenticationConfigError - except ImportError: - raise nose.SkipTest - try: end_date = datetime.now() start_date = end_date - pd.offsets.Day() * 5 @@ -76,24 +69,10 @@ def test_getdata(self): except AuthenticationConfigError: raise nose.SkipTest - except httplib2.ServerNotFoundError: - try: - h = httplib2.Http() - response, content = h.request("http://www.google.com") - raise - except httplib2.ServerNotFoundError: - raise nose.SkipTest @slow - @network + @with_connectivity_check("http://www.google.com") def test_iterator(self): - try: - import httplib2 - from pandas.io.ga import GAnalytics, read_ga - from pandas.io.auth import AuthenticationConfigError - except ImportError: - raise nose.SkipTest - try: reader = GAnalytics() @@ -118,24 +97,10 @@ def test_iterator(self): except AuthenticationConfigError: raise nose.SkipTest - except httplib2.ServerNotFoundError: - try: - h = httplib2.Http() - response, content = h.request("http://www.google.com") - raise - except httplib2.ServerNotFoundError: - raise nose.SkipTest @slow - @network + @with_connectivity_check("http://www.google.com") def test_segment(self): - try: - import httplib2 - from pandas.io.ga import GAnalytics, read_ga - from pandas.io.auth import AuthenticationConfigError - except ImportError: - raise nose.SkipTest - try: end_date = datetime.now() start_date = end_date - pd.offsets.Day() * 5 @@ -186,16 +151,7 @@ def test_segment(self): except AuthenticationConfigError: raise nose.SkipTest - except httplib2.ServerNotFoundError: - try: - h = httplib2.Http() - response, content = h.request("http://www.google.com") - raise - except httplib2.ServerNotFoundError: - raise nose.SkipTest - if __name__ == '__main__': - import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) diff --git a/pandas/io/tests/test_google.py b/pandas/io/tests/test_google.py index 8c16c60ac8b87..8ceda94f07a52 100644 --- a/pandas/io/tests/test_google.py +++ b/pandas/io/tests/test_google.py @@ -2,57 +2,30 @@ import nose from datetime import datetime +import numpy as np import pandas as pd import pandas.io.data as web -from pandas.util.testing import (network, assert_series_equal) -from numpy.testing.decorators import slow -import numpy as np - -import urllib2 +from pandas.util.testing import network, with_connectivity_check class TestGoogle(unittest.TestCase): - @network + @with_connectivity_check("http://www.google.com") def test_google(self): # asserts that google is minimally working and that it throws - # an excecption when DataReader can't get a 200 response from + # an exception when DataReader can't get a 200 response from # google start = datetime(2010, 1, 1) end = datetime(2013, 01, 27) - try: - self.assertEquals( - web.DataReader("F", 'google', start, end)['Close'][-1], - 13.68) - except urllib2.URLError: - try: - urllib2.urlopen('http://www.google.com') - except urllib2.URLError: - raise nose.SkipTest - else: - raise - - @network - def test_google_non_existent(self): - # asserts that google is minimally working and that it throws - # an excecption when DataReader can't get a 200 response from - # google - start = datetime(2010, 1, 1) - end = datetime(2013, 01, 27) + self.assertEquals( + web.DataReader("F", 'google', start, end)['Close'][-1], + 13.68) - try: - self.assertRaises( - Exception, - lambda: web.DataReader("NON EXISTENT TICKER", 'google', - start, end)) - except urllib2.URLError: - try: - urllib2.urlopen('http://www.google.com') - except urllib2.URLError: - raise nose.SkipTest - else: - raise + self.assertRaises( + Exception, + lambda: web.DataReader("NON EXISTENT TICKER", 'google', + start, end)) @network @@ -60,64 +33,40 @@ def test_get_quote(self): self.assertRaises(NotImplementedError, lambda: web.get_quote_google(pd.Series(['GOOG', 'AAPL', 'GOOG']))) - @network + @with_connectivity_check('http://www.google.com') def test_get_goog_volume(self): - try: - df = web.get_data_google('GOOG') - assert df.Volume.ix['OCT-08-2010'] == 2863473 - except IOError: - try: - urllib2.urlopen('http://www.google.com') - except IOError: - raise nose.SkipTest - else: - raise + df = web.get_data_google('GOOG') + assert df.Volume.ix['OCT-08-2010'] == 2863473 - @network + @with_connectivity_check('http://www.google.com') def test_get_multi1(self): - try: - sl = ['AAPL', 'AMZN', 'GOOG'] - pan = web.get_data_google(sl, '2012') - ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG] - assert ts[0].dayofyear == 96 - except IOError: - try: - urllib2.urlopen('http://www.google.com') - except IOError: - raise nose.SkipTest - else: - raise + sl = ['AAPL', 'AMZN', 'GOOG'] + pan = web.get_data_google(sl, '2012') + ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG] + assert ts[0].dayofyear == 96 - @network + @with_connectivity_check('http://www.google.com') def test_get_multi2(self): - try: - pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12') - expected = [19.02, 28.23, 25.39] - result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist() - assert result == expected - - # sanity checking - t= np.array(result) - assert np.issubdtype(t.dtype, np.floating) - assert t.shape == (3,) - - expected = [[ 18.99, 28.4 , 25.18], - [ 18.58, 28.31, 25.13], - [ 19.03, 28.16, 25.52], - [ 18.81, 28.82, 25.87]] - result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values - assert (result == expected).all() - - # sanity checking - t= np.array(pan) - assert np.issubdtype(t.dtype, np.floating) - except IOError: - try: - urllib2.urlopen('http://www.google.com') - except IOError: - raise nose.SkipTest - else: - raise + pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12') + expected = [19.02, 28.23, 25.39] + result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist() + assert result == expected + + # sanity checking + t= np.array(result) + assert np.issubdtype(t.dtype, np.floating) + assert t.shape == (3,) + + expected = [[ 18.99, 28.4 , 25.18], + [ 18.58, 28.31, 25.13], + [ 19.03, 28.16, 25.52], + [ 18.81, 28.82, 25.87]] + result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values + assert (result == expected).all() + + # sanity checking + t= np.array(pan) + assert np.issubdtype(t.dtype, np.floating) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/io/tests/test_yahoo.py b/pandas/io/tests/test_yahoo.py index 0e2c2022af422..712475f76f5ed 100644 --- a/pandas/io/tests/test_yahoo.py +++ b/pandas/io/tests/test_yahoo.py @@ -4,41 +4,27 @@ import pandas as pd import pandas.io.data as web -from pandas.util.testing import (network, assert_frame_equal, - assert_series_equal, - assert_almost_equal) -from numpy.testing.decorators import slow - -import urllib2 +from pandas.util.testing import network, assert_series_equal, with_connectivity_check class TestYahoo(unittest.TestCase): - @network + @with_connectivity_check("http://www.google.com") def test_yahoo(self): # asserts that yahoo is minimally working and that it throws - # an excecption when DataReader can't get a 200 response from + # an exception when DataReader can't get a 200 response from # yahoo start = datetime(2010, 1, 1) end = datetime(2013, 01, 27) - try: - self.assertEquals( - web.DataReader("F", 'yahoo', start, end)['Close'][-1], - 13.68) + self.assertEquals( + web.DataReader("F", 'yahoo', start, end)['Close'][-1], + 13.68) - self.assertRaises( - Exception, - lambda: web.DataReader("NON EXISTENT TICKER", 'yahoo', + self.assertRaises( + Exception, + lambda: web.DataReader("NON EXISTENT TICKER", 'yahoo', start, end)) - except urllib2.URLError: - try: - urllib2.urlopen('http://www.google.com') - except urllib2.URLError: - raise nose.SkipTest - else: - raise - @network def test_get_quote(self): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 5e1ab59305bab..5a583ca3ae7d9 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -3,6 +3,7 @@ # pylint: disable-msg=W0402 from datetime import datetime +from functools import wraps import random import string import sys @@ -12,6 +13,8 @@ from contextlib import contextmanager # contextlib is available since 2.5 from distutils.version import LooseVersion +import urllib2 +import nose from numpy.random import randn import numpy as np @@ -36,6 +39,7 @@ N = 30 K = 4 +_RAISE_NETWORK_ERROR_DEFAULT = False def rands(n): @@ -663,18 +667,55 @@ def skip_if_no_package(*args, **kwargs): # Additional tags decorators for nose # +def optional_args(decorator): + """allows a decorator to take optional positional and keyword arguments. + Assumes that taking a single, callable, positional argument means that + it is decorating a function, i.e. something like this:: -def network(t): + @my_decorator + def function(): pass + + Calls decorator with decorator(f, *args, **kwargs)""" + + @wraps(decorator) + def wrapper(*args, **kwargs): + def dec(f): + return decorator(f, *args, **kwargs) + + is_decorating = not kwargs and len(args) == 1 and callable(args[0]) + if is_decorating: + f = args[0] + args = [] + return dec(f) + else: + return dec + + return wrapper + + +@optional_args +def network(t, raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, + error_classes=(IOError,)): """ - Label a test as requiring network connection. + Label a test as requiring network connection and skip test if it encounters a ``URLError``. In some cases it is not possible to assume network presence (e.g. Debian build hosts). + You can pass an optional ``raise_on_error`` argument to the decorator, in + which case it will always raise an error even if it's not a subclass of + ``error_classes``. + Parameters ---------- t : callable The test requiring network connectivity. + raise_on_error : bool + If True, never catches errors. + error_classes : iterable + error classes to ignore. If not in ``error_classes``, raises the error. + defaults to URLError. Be careful about changing the error classes here, + it may result in undefined behavior. Returns ------- @@ -685,18 +726,136 @@ def network(t): -------- A test can be decorated as requiring network like this:: - from pandas.util.testing import * - - @network - def test_network(self): - print 'Fetch the stars from http://' + >>> from pandas.util.testing import network + >>> import urllib2 + >>> @network + ... def test_network(): + ... urllib2.urlopen("rabbit://bonanza.com") + ... + >>> try: + ... test_network() + ... except nose.SkipTest: + ... print "SKIPPING!" + ... + SKIPPING! + + Alternatively, you can use set ``raise_on_error`` in order to get + the error to bubble up, e.g.:: + + >>> @network(raise_on_error=True) + ... def test_network(): + ... urllib2.urlopen("complaint://deadparrot.com") + ... + >>> test_network() + Traceback (most recent call last): + ... + URLError: <urlopen error unknown url type: complaint> And use ``nosetests -a '!network'`` to exclude running tests requiring - network connectivity. + network connectivity. ``_RAISE_NETWORK_ERROR_DEFAULT`` in + ``pandas/util/testing.py`` sets the default behavior (currently False). + """ + t.network = True + + @wraps(t) + def network_wrapper(*args, **kwargs): + if raise_on_error: + return t(*args, **kwargs) + else: + try: + return t(*args, **kwargs) + except error_classes as e: + raise nose.SkipTest("Skipping test %s" % e) + + return network_wrapper + + +def can_connect(url): + """tries to connect to the given url. True if succeeds, False if IOError raised""" + try: + urllib2.urlopen(url) + except IOError: + return False + else: + return True + + +@optional_args +def with_connectivity_check(t, url="http://www.google.com", + raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, check_before_test=False, + error_classes=IOError): """ + Label a test as requiring network connection and, if an error is + encountered, only raise if it does not find a network connection. + + In comparison to ``network``, this assumes an added contract to your test: + you must assert that, under normal conditions, your test will ONLY fail if + it does not have network connectivity. + + You can call this in 3 ways: as a standard decorator, with keyword + arguments, or with a positional argument that is the url to check. + + Parameters + ---------- + t : callable + The test requiring network connectivity. + url : path + The url to test via ``urllib2.urlopen`` to check for connectivity. + Defaults to 'http://www.google.com'. + raise_on_error : bool + If True, never catches errors. + check_before_test : bool + If True, checks connectivity before running the test case. + error_classes : tuple or Exception + error classes to ignore. If not in ``error_classes``, raises the error. + defaults to IOError. Be careful about changing the error classes here. + + NOTE: ``raise_on_error`` supercedes ``check_before_test`` + Returns + ------- + t : callable + The decorated test ``t``, with checks for connectivity errors. + Example + ------- + + In this example, you see how it will raise the error if it can connect to + the url:: + >>> @with_connectivity_check("http://www.yahoo.com") + ... def test_something_with_yahoo(): + ... raise IOError("Failure Message") + >>> test_something_with_yahoo() + Traceback (most recent call last): + ... + IOError: Failure Message + + I you set check_before_test, it will check the url first and not run the test on failure:: + >>> @with_connectivity_check("failing://url.blaher", check_before_test=True) + ... def test_something(): + ... print("I ran!") + ... raise ValueError("Failure") + >>> test_something() + Traceback (most recent call last): + ... + SkipTest + """ t.network = True - return t + + @wraps(t) + def wrapper(*args, **kwargs): + if check_before_test and not raise_on_error: + if not can_connect(url): + raise nose.SkipTest + try: + return t(*args, **kwargs) + except error_classes as e: + if raise_on_error or can_connect(url): + raise + else: + raise nose.SkipTest("Skipping test due to lack of connectivity" + " and error %s" % e) + + return wrapper class SimpleMock(object): @@ -743,11 +902,13 @@ def stdin_encoding(encoding=None): """ import sys + _stdin = sys.stdin sys.stdin = SimpleMock(sys.stdin, "encoding", encoding) yield sys.stdin = _stdin + def assertRaisesRegexp(exception, regexp, callable, *args, **kwargs): """ Port of assertRaisesRegexp from unittest in Python 2.7 - used in with statement. @@ -779,6 +940,7 @@ def assertRaisesRegexp(exception, regexp, callable, *args, **kwargs): """ import re + try: callable(*args, **kwargs) except Exception as e: @@ -792,7 +954,7 @@ def assertRaisesRegexp(exception, regexp, callable, *args, **kwargs): expected_regexp = re.compile(regexp) if not expected_regexp.search(str(e)): raise AssertionError('"%s" does not match "%s"' % - (expected_regexp.pattern, str(e))) + (expected_regexp.pattern, str(e))) else: # Apparently some exceptions don't have a __name__ attribute? Just aping unittest library here name = getattr(exception, "__name__", str(exception))
Instead, `network` decorator in pandas.util.testing catches `IOError` instead. You have to opt into failing on tests by setting `pandas.util.testing._RAISE_NETWORK_ERROR_DEFAULT` to `True`. Also adds a `with_network_connectivity_check` that can automatically check for a connection. Fixes #3910. This version of the fix ignores all IOErrors and assumes there are connectivity problems with any URLError.
https://api.github.com/repos/pandas-dev/pandas/pulls/3914
2013-06-15T12:12:57Z
2013-06-21T01:48:25Z
2013-06-21T01:48:25Z
2014-06-12T07:13:41Z
BUG: (GH3911) groupby appying with custom function not converting dtypes of result
diff --git a/RELEASE.rst b/RELEASE.rst index 285bbb2095488..8fb9406a3ba0e 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -234,8 +234,11 @@ pandas 0.11.1 - ``read_html`` now correctly skips tests (GH3741_) - PandasObjects raise TypeError when trying to hash (GH3882_) - Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_) - - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_) + - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) + in ``read_csv`` (GH3795_) - Fix index name not propogating when using ``loc/ix`` (GH3880_) + - Fix groupby when applying a custom function resulting in a returned DataFrame was + not converting dtypes (GH3911_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH2786: https://github.com/pydata/pandas/issues/2786 @@ -331,6 +334,7 @@ pandas 0.11.1 .. _GH3873: https://github.com/pydata/pandas/issues/3873 .. _GH3877: https://github.com/pydata/pandas/issues/3877 .. _GH3880: https://github.com/pydata/pandas/issues/3880 +.. _GH3911: https://github.com/pydata/pandas/issues/3911 pandas 0.11.0 diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 0be5d438e5e7c..168615c060c2b 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1928,7 +1928,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): return Series(values, index=key_index) return DataFrame(stacked_values, index=index, - columns=columns) + columns=columns).convert_objects() else: return Series(values, index=key_index) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index f3a608b82e756..6989d3bcae42b 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -261,6 +261,20 @@ def test_groupby_nonobject_dtype(self): expected = self.mframe.groupby(key.astype('O')).sum() assert_frame_equal(result, expected) + # GH 3911, mixed frame non-conversion + df = self.df_mixed_floats.copy() + df['value'] = range(len(df)) + + def max_value(group): + return group.ix[group['value'].idxmax()] + + applied = df.groupby('A').apply(max_value) + result = applied.get_dtype_counts() + result.sort() + expected = Series({ 'object' : 2, 'float64' : 2, 'int64' : 1 }) + expected.sort() + assert_series_equal(result,expected) + def test_groupby_return_type(self): # GH2893, return a reduced type
closes #3911
https://api.github.com/repos/pandas-dev/pandas/pulls/3913
2013-06-15T11:42:08Z
2013-06-15T12:25:21Z
2013-06-15T12:25:21Z
2014-07-07T14:54:54Z
FIX/ENH: attempt soft conversion of object series before raising a TypeError when plotting
diff --git a/RELEASE.rst b/RELEASE.rst index 9d862c687bcac..f03e10df1b460 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -77,8 +77,10 @@ pandas 0.11.1 dependencies offered for Linux) (GH3837_). - Plotting functions now raise a ``TypeError`` before trying to plot anything if the associated objects have have a dtype of ``object`` (GH1818_, - GH3572_). This happens before any drawing takes place which elimnates any - spurious plots from showing up. + GH3572_, GH3911_, GH3912_), but they will try to convert object arrays to + numeric arrays if possible so that you can still plot, for example, an + object array with floats. This happens before any drawing takes place which + elimnates any spurious plots from showing up. - Added Faq section on repr display options, to help users customize their setup. - ``where`` operations that result in block splitting are much faster (GH3733_) - Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_) @@ -341,13 +343,13 @@ pandas 0.11.1 .. _GH3834: https://github.com/pydata/pandas/issues/3834 .. _GH3873: https://github.com/pydata/pandas/issues/3873 .. _GH3877: https://github.com/pydata/pandas/issues/3877 +.. _GH3659: https://github.com/pydata/pandas/issues/3659 +.. _GH3679: https://github.com/pydata/pandas/issues/3679 .. _GH3880: https://github.com/pydata/pandas/issues/3880 -<<<<<<< HEAD .. _GH3911: https://github.com/pydata/pandas/issues/3911 -======= .. _GH3907: https://github.com/pydata/pandas/issues/3907 ->>>>>>> 7b5933247b80174de4ba571e95a1add809dd9d09 - +.. _GH3911: https://github.com/pydata/pandas/issues/3911 +.. _GH3912: https://github.com/pydata/pandas/issues/3912 pandas 0.11.0 ============= diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index dd87c5ea827c3..76ae85a53102b 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -300,9 +300,11 @@ Bug Fixes ~~~~~~~~~ - Plotting functions now raise a ``TypeError`` before trying to plot anything - if the associated objects have have a ``dtype`` of ``object`` (GH1818_, - GH3572_). This happens before any drawing takes place which elimnates any - spurious plots from showing up. + if the associated objects have have a dtype of ``object`` (GH1818_, + GH3572_, GH3911_, GH3912_), but they will try to convert object arrays to + numeric arrays if possible so that you can still plot, for example, an + object array with floats. This happens before any drawing takes place which + elimnates any spurious plots from showing up. - ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is a list or tuple. @@ -416,3 +418,5 @@ on GitHub for a complete list. .. _GH3659: https://github.com/pydata/pandas/issues/3659 .. _GH3679: https://github.com/pydata/pandas/issues/3679 .. _GH3907: https://github.com/pydata/pandas/issues/3907 +.. _GH3911: https://github.com/pydata/pandas/issues/3911 +.. _GH3912: https://github.com/pydata/pandas/issues/3912 diff --git a/pandas/io/common.py b/pandas/io/common.py index 353930482c8b8..3bd6dd5d74ba8 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -9,6 +9,10 @@ _VALID_URLS.discard('') +class PerformanceWarning(Warning): + pass + + def _is_url(url): """Check to see if a URL has a valid protocol. @@ -26,27 +30,29 @@ def _is_url(url): except: return False + def _is_s3_url(url): - """ Check for an s3 url """ + """Check for an s3 url""" try: return urlparse.urlparse(url).scheme == 's3' except: return False + def get_filepath_or_buffer(filepath_or_buffer, encoding=None): - """ if the filepath_or_buffer is a url, translate and return the buffer - passthru otherwise - - Parameters - ---------- - filepath_or_buffer : a url, filepath, or buffer - encoding : the encoding to use to decode py3 bytes, default is 'utf-8' - - Returns - ------- - a filepath_or_buffer, the encoding - - """ + """ + If the filepath_or_buffer is a url, translate and return the buffer + passthru otherwise. + + Parameters + ---------- + filepath_or_buffer : a url, filepath, or buffer + encoding : the encoding to use to decode py3 bytes, default is 'utf-8' + + Returns + ------- + a filepath_or_buffer, the encoding + """ if _is_url(filepath_or_buffer): from urllib2 import urlopen diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b1b7b80e5fd23..62aa1b99dfac0 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -12,23 +12,22 @@ import warnings import numpy as np -from pandas import ( - Series, TimeSeries, DataFrame, Panel, Panel4D, Index, - MultiIndex, Int64Index, Timestamp -) +from pandas import (Series, TimeSeries, DataFrame, Panel, Panel4D, Index, + MultiIndex, Int64Index, Timestamp) from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel from pandas.sparse.array import BlockIndex, IntIndex from pandas.tseries.api import PeriodIndex, DatetimeIndex -from pandas.core.common import adjoin, isnull, is_list_like -from pandas.core.algorithms import match, unique, factorize +from pandas.core.common import adjoin, is_list_like +from pandas.core.algorithms import match, unique from pandas.core.categorical import Categorical -from pandas.core.common import _asarray_tuplesafe, _try_sort +from pandas.core.common import _asarray_tuplesafe from pandas.core.internals import BlockManager, make_block from pandas.core.reshape import block2d_to_blocknd, factor_indexer -from pandas.core.index import Int64Index, _ensure_index +from pandas.core.index import _ensure_index import pandas.core.common as com from pandas.tools.merge import concat from pandas.util import py3compat +from pandas.io.common import PerformanceWarning import pandas.lib as lib import pandas.algos as algos @@ -42,11 +41,14 @@ # PY3 encoding if we don't specify _default_encoding = 'UTF-8' + def _ensure_decoded(s): """ if we have bytes, decode them to unicde """ if isinstance(s, np.bytes_): s = s.decode('UTF-8') return s + + def _ensure_encoding(encoding): # set the encoding if we need if encoding is None: @@ -54,20 +56,31 @@ def _ensure_encoding(encoding): encoding = _default_encoding return encoding -class IncompatibilityWarning(Warning): pass + +class IncompatibilityWarning(Warning): + pass + + incompatibility_doc = """ -where criteria is being ignored as this version [%s] is too old (or not-defined), -read the file in and write it out to a new file to upgrade (with the copy_to method) +where criteria is being ignored as this version [%s] is too old (or +not-defined), read the file in and write it out to a new file to upgrade (with +the copy_to method) """ -class AttributeConflictWarning(Warning): pass + + +class AttributeConflictWarning(Warning): + pass + + attribute_conflict_doc = """ -the [%s] attribute of the existing index is [%s] which conflicts with the new [%s], -resetting the attribute to None +the [%s] attribute of the existing index is [%s] which conflicts with the new +[%s], resetting the attribute to None """ -class PerformanceWarning(Warning): pass + + performance_doc = """ -your performance may suffer as PyTables will pickle object types that it cannot map -directly to c-types [inferred_type->%s,key->%s] [items->%s] +your performance may suffer as PyTables will pickle object types that it cannot +map directly to c-types [inferred_type->%s,key->%s] [items->%s] """ # map object types diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 0755caf45d336..e57e5a9af2fc0 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -10,6 +10,7 @@ from pandas.util.testing import ensure_clean from pandas.core.config import set_option + import numpy as np from numpy.testing import assert_array_equal @@ -189,8 +190,7 @@ def test_bootstrap_plot(self): from pandas.tools.plotting import bootstrap_plot _check_plot_works(bootstrap_plot, self.ts, size=10) - @slow - def test_all_invalid_plot_data(self): + def test_invalid_plot_data(self): s = Series(list('abcd')) kinds = 'line', 'bar', 'barh', 'kde', 'density' @@ -198,6 +198,14 @@ def test_all_invalid_plot_data(self): self.assertRaises(TypeError, s.plot, kind=kind) @slow + def test_valid_object_plot(self): + from pandas.io.common import PerformanceWarning + s = Series(range(10), dtype=object) + kinds = 'line', 'bar', 'barh', 'kde', 'density' + + for kind in kinds: + _check_plot_works(s.plot, kind=kind) + def test_partially_invalid_plot_data(self): s = Series(['a', 'b', 1.0, 2]) kinds = 'line', 'bar', 'barh', 'kde', 'density' diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 83ad58c1eb41c..4e85d742e352c 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -878,15 +878,20 @@ def _get_layout(self): def _compute_plot_data(self): try: - # might be a frame + # might be an ndframe numeric_data = self.data._get_numeric_data() - except AttributeError: - # a series, but no object dtypes allowed! - if self.data.dtype == np.object_: - raise TypeError('invalid dtype for plotting, please cast to a ' - 'numeric dtype explicitly if you want to plot') - + except AttributeError: # TODO: rm in 0.12 (series-inherit-ndframe) numeric_data = self.data + orig_dtype = numeric_data.dtype + + # possible object array of numeric data + if orig_dtype == np.object_: + numeric_data = numeric_data.convert_objects() # soft convert + + # still an object dtype so we can't plot it + if numeric_data.dtype == np.object_: + raise TypeError('Series has object dtype and cannot be' + ' converted: no numeric data to plot') try: is_empty = numeric_data.empty @@ -895,7 +900,8 @@ def _compute_plot_data(self): # no empty frames or series allowed if is_empty: - raise TypeError('No numeric data to plot') + raise TypeError('Empty {0!r}: no numeric data to ' + 'plot'.format(numeric_data.__class__.__name__)) self.data = numeric_data diff --git a/pandas/util/testing.py b/pandas/util/testing.py index dd86862a2d551..20e59b6d3342a 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -7,6 +7,7 @@ import string import sys import tempfile +import warnings from contextlib import contextmanager # contextlib is available since 2.5 @@ -39,7 +40,7 @@ def rands(n): choices = string.ascii_letters + string.digits - return ''.join([random.choice(choices) for _ in xrange(n)]) + return ''.join(random.choice(choices) for _ in xrange(n)) def randu(n): @@ -746,3 +747,48 @@ def stdin_encoding(encoding=None): sys.stdin = SimpleMock(sys.stdin, "encoding", encoding) yield sys.stdin = _stdin + + +@contextmanager +def assert_produces_warning(expected_warning=Warning, filter_level="always"): + """ + Context manager for running code that expects to raise (or not raise) + warnings. Checks that code raises the expected warning and only the + expected warning. Pass ``False`` or ``None`` to check that it does *not* + raise a warning. Defaults to ``exception.Warning``, baseclass of all + Warnings. (basically a wrapper around ``warnings.catch_warnings``). + + >>> import warnings + >>> with assert_produces_warning(): + ... warnings.warn(UserWarning()) + ... + >>> with assert_produces_warning(False): + ... warnings.warn(RuntimeWarning()) + ... + Traceback (most recent call last): + ... + AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. + >>> with assert_produces_warning(UserWarning): + ... warnings.warn(RuntimeWarning()) + Traceback (most recent call last): + ... + AssertionError: Did not see expected warning of class 'UserWarning'. + + ..warn:: This is *not* thread-safe. + """ + with warnings.catch_warnings(record=True) as w: + saw_warning = False + warnings.simplefilter(filter_level) + yield w + extra_warnings = [] + for actual_warning in w: + if (expected_warning and issubclass(actual_warning.category, + expected_warning)): + saw_warning = True + else: + extra_warnings.append(actual_warning.category.__name__) + if expected_warning: + assert saw_warning, ("Did not see expected warning of class %r." + % expected_warning.__name__) + assert not extra_warnings, ("Caused unexpected warning(s): %r." + % extra_warnings)
https://api.github.com/repos/pandas-dev/pandas/pulls/3912
2013-06-15T09:58:54Z
2013-06-16T16:16:06Z
2013-06-16T16:16:06Z
2014-06-14T16:36:03Z
BUG/API: remove infer_types from replace and fix compiled regex bug
diff --git a/RELEASE.rst b/RELEASE.rst index 285bbb2095488..ce0823d72296b 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -127,6 +127,10 @@ pandas 0.11.1 - ``DataFrame.interpolate()`` is now deprecated. Please use ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead (GH3582_, GH3675_, GH3676_). + - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are + deprecated + - ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now + performs conversion by default. (GH3907_) - Deprecated display.height, display.width is now only a formatting option does not control triggering of summary, similar to < 0.11.0. - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column @@ -141,6 +145,8 @@ pandas 0.11.1 ``to_pickle`` instance method, ``save`` and ``load`` will give deprecation warning. - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are deprecated + - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are + deprecated - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_) - ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned as an int, maxing with ``int64``, to avoid precision issues (GH3733_) @@ -236,6 +242,8 @@ pandas 0.11.1 - Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_) - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_) - Fix index name not propogating when using ``loc/ix`` (GH3880_) + - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression + in the ``to_replace`` argument wasn't working (GH3907_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH2786: https://github.com/pydata/pandas/issues/2786 @@ -331,6 +339,7 @@ pandas 0.11.1 .. _GH3873: https://github.com/pydata/pandas/issues/3873 .. _GH3877: https://github.com/pydata/pandas/issues/3877 .. _GH3880: https://github.com/pydata/pandas/issues/3880 +.. _GH3907: https://github.com/pydata/pandas/issues/3907 pandas 0.11.0 diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index dfc36258a680f..dd87c5ea827c3 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -98,6 +98,9 @@ API changes - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are deprecated + - ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now + performs conversion by default. (GH3907_) + - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_) - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_) @@ -356,6 +359,8 @@ Bug Fixes - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_) - ``read_html`` now correctly skips tests (GH3741_) + - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression + in the ``to_replace`` argument wasn't working (GH3907_) See the `full release notes <https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker @@ -410,3 +415,4 @@ on GitHub for a complete list. .. _GH3877: https://github.com/pydata/pandas/issues/3877 .. _GH3659: https://github.com/pydata/pandas/issues/3659 .. _GH3679: https://github.com/pydata/pandas/issues/3679 +.. _GH3907: https://github.com/pydata/pandas/issues/3907 diff --git a/pandas/core/common.py b/pandas/core/common.py index d0dcb0b9770b8..a31c92caf4343 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -3,6 +3,7 @@ """ import itertools +import re from datetime import datetime from numpy.lib.format import read_array, write_array @@ -1585,8 +1586,21 @@ def is_complex_dtype(arr_or_dtype): return issubclass(tipo, np.complexfloating) +def is_re(obj): + return isinstance(obj, re._pattern_type) + + +def is_re_compilable(obj): + try: + re.compile(obj) + except TypeError: + return False + else: + return True + + def is_list_like(arg): - return hasattr(arg, '__iter__') and not isinstance(arg, basestring) or hasattr(arg,'len') + return hasattr(arg, '__iter__') and not isinstance(arg, basestring) def _is_sequence(x): try: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f9f8a424f8d96..5e3d3e95d8e56 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -33,8 +33,7 @@ _maybe_convert_indices) from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, - create_block_manager_from_blocks, - _re_compilable) + create_block_manager_from_blocks) from pandas.core.series import Series, _radd_compat import pandas.core.expressions as expressions from pandas.compat.scipy import scoreatpercentile as _quantile @@ -3483,7 +3482,7 @@ def bfill(self, axis=0, inplace=False, limit=None): limit=limit) def replace(self, to_replace=None, value=None, inplace=False, limit=None, - regex=False, infer_types=False, method=None, axis=None): + regex=False, method=None, axis=None): """ Replace values given in 'to_replace' with 'value'. @@ -3545,8 +3544,6 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, string. Otherwise, `to_replace` must be ``None`` because this parameter will be interpreted as a regular expression or a list, dict, or array of regular expressions. - infer_types : bool, default True - If ``True`` attempt to convert object blocks to a better dtype. See also -------- @@ -3582,7 +3579,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, and play with this method to gain intuition about how it works. """ - if not isinstance(regex, bool) and to_replace is not None: + if not com.is_bool(regex) and to_replace is not None: raise AssertionError("'to_replace' must be 'None' if 'regex' is " "not a bool") if method is not None: @@ -3628,8 +3625,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, to_replace, value = keys, values return self.replace(to_replace, value, inplace=inplace, - limit=limit, regex=regex, - infer_types=infer_types) + limit=limit, regex=regex) else: if not len(self.columns): return self @@ -3673,14 +3669,14 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, new_data = self._data.replace(to_replace, value, inplace=inplace, regex=regex) elif to_replace is None: - if not (_re_compilable(regex) or + if not (com.is_re_compilable(regex) or isinstance(regex, (list, dict, np.ndarray, Series))): raise TypeError("'regex' must be a string or a compiled " "regular expression or a list or dict of " "strings or regular expressions, you " "passed a {0}".format(type(regex))) return self.replace(regex, value, inplace=inplace, limit=limit, - regex=True, infer_types=infer_types) + regex=True) else: # dest iterable dict-like @@ -3701,8 +3697,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, raise TypeError('Invalid "to_replace" type: ' '{0}'.format(type(to_replace))) # pragma: no cover - if infer_types: - new_data = new_data.convert() + new_data = new_data.convert(copy=not inplace, convert_numeric=False) if inplace: self._data = new_data diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 49d92afc46848..01e976e397111 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1,13 +1,14 @@ import itertools import re from datetime import datetime -import collections from numpy import nan import numpy as np -from pandas.core.common import _possibly_downcast_to_dtype, isnull, _NS_DTYPE, _TD_DTYPE -from pandas.core.index import Index, MultiIndex, _ensure_index, _handle_legacy_indexes +from pandas.core.common import (_possibly_downcast_to_dtype, isnull, _NS_DTYPE, + _TD_DTYPE) +from pandas.core.index import (Index, MultiIndex, _ensure_index, + _handle_legacy_indexes) from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices import pandas.core.common as com import pandas.lib as lib @@ -18,10 +19,6 @@ from pandas.util import py3compat -def _re_compilable(ex): - return isinstance(ex, (basestring, re._pattern_type)) - - class Block(object): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas @@ -744,14 +741,16 @@ def should_store(self, value): def replace(self, to_replace, value, inplace=False, filter=None, regex=False): blk = [self] - to_rep_is_list = (isinstance(to_replace, collections.Iterable) and not - isinstance(to_replace, basestring)) - value_is_list = (isinstance(value, collections.Iterable) and not - isinstance(to_replace, basestring)) + to_rep_is_list = com.is_list_like(to_replace) + value_is_list = com.is_list_like(value) both_lists = to_rep_is_list and value_is_list either_list = to_rep_is_list or value_is_list - if not either_list and not regex: + if not either_list and com.is_re(to_replace): + blk[0], = blk[0]._replace_single(to_replace, value, + inplace=inplace, filter=filter, + regex=True) + elif not (either_list or regex): blk = super(ObjectBlock, self).replace(to_replace, value, inplace=inplace, filter=filter, regex=regex) @@ -773,15 +772,18 @@ def replace(self, to_replace, value, inplace=False, filter=None, def _replace_single(self, to_replace, value, inplace=False, filter=None, regex=False): # to_replace is regex compilable - to_rep_re = _re_compilable(to_replace) + to_rep_re = com.is_re_compilable(to_replace) # regex is regex compilable - regex_re = _re_compilable(regex) + regex_re = com.is_re_compilable(regex) + # only one will survive if to_rep_re and regex_re: raise AssertionError('only one of to_replace and regex can be ' 'regex compilable') + # if regex was passed as something that can be a regex (rather than a + # boolean) if regex_re: to_replace = regex @@ -1668,7 +1670,6 @@ def get(self, item): mgr._consolidate_inplace() return mgr - def iget(self, i): item = self.items[i] if self.items.is_unique: @@ -1970,7 +1971,6 @@ def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan): def _reindex_indexer_items(self, new_items, indexer, fill_value): # TODO: less efficient than I'd like - is_unique = self.items.is_unique item_order = com.take_1d(self.items.values, indexer) # keep track of what items aren't found anywhere @@ -2141,7 +2141,6 @@ def rename_axis(self, mapper, axis=1): def rename_items(self, mapper, copydata=True): new_items = Index([mapper(x) for x in self.items]) - is_unique = new_items.is_unique new_blocks = [] for block in self.blocks: diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index d42d950bd2e7b..db01545fb3c9d 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,5 +1,6 @@ from datetime import datetime import sys +import re import nose import unittest @@ -244,6 +245,18 @@ def test_groupby(): assert v == expected[k] +def test_is_list_like(): + passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]), + Series([]), Series(['a']).str) + fails = (1, '2', object()) + + for p in passes: + assert com.is_list_like(p) + + for f in fails: + assert not com.is_list_like(f) + + def test_ensure_int32(): values = np.arange(10, dtype=np.int32) result = com._ensure_int32(values) @@ -288,6 +301,30 @@ def test_ensure_platform_int(): # expected = u"\u05d0".encode('utf-8') # assert (result == expected) + +def test_is_re(): + passes = re.compile('ad'), + fails = 'x', 2, 3, object() + + for p in passes: + assert com.is_re(p) + + for f in fails: + assert not com.is_re(f) + + +def test_is_recompilable(): + passes = (r'a', u'x', r'asdf', re.compile('adsf'), ur'\u2233\s*', + re.compile(r'')) + fails = 1, [], object() + + for p in passes: + assert com.is_re_compilable(p) + + for f in fails: + assert not com.is_re_compilable(f) + + class TestTake(unittest.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 5b4d582e5e42e..8342d218e76bb 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -6696,7 +6696,7 @@ def test_regex_replace_list_to_scalar(self): res3 = df.copy() res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True) res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True) - expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4, object), + expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4), 'c': [nan, nan, nan, 'd']}) assert_frame_equal(res, expec) assert_frame_equal(res2, expec) @@ -6772,6 +6772,30 @@ def test_replace(self): df = DataFrame(index=['a', 'b']) assert_frame_equal(df, df.replace(5, 7)) + def test_replace_list(self): + obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')} + dfobj = DataFrame(obj) + + ## lists of regexes and values + # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN] + to_replace_res = [r'.', r'e'] + values = [nan, 'crap'] + res = dfobj.replace(to_replace_res, values) + expec = DataFrame({'a': ['a', 'b', nan, nan], + 'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap', + 'l', 'o']}) + assert_frame_equal(res, expec) + + # list of [v1, v2, ..., vN] -> [v1, v2, .., vN] + to_replace_res = [r'.', r'f'] + values = [r'..', r'crap'] + res = dfobj.replace(to_replace_res, values) + expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g', + 'h'], + 'c': ['h', 'e', 'l', 'o']}) + + assert_frame_equal(res, expec) + def test_replace_series_dict(self): # from GH 3064 df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}}) @@ -6792,10 +6816,24 @@ def test_replace_series_dict(self): result = df.replace(s, df.mean()) assert_frame_equal(result, expected) + def test_replace_convert(self): + # gh 3907 + df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']]) + m = {'foo': 1, 'bar': 2, 'bah': 3} + rep = df.replace(m) + expec = Series([np.int_, np.int_, np.int_]) + res = rep.dtypes + assert_series_equal(expec, res) + def test_replace_mixed(self): self.mixed_frame['foo'][5:20] = nan self.mixed_frame['A'][-10:] = nan + result = self.mixed_frame.replace(np.nan, -18) + expected = self.mixed_frame.fillna(value=-18) + assert_frame_equal(result, expected) + assert_frame_equal(result.replace(-18, nan), self.mixed_frame) + result = self.mixed_frame.replace(np.nan, -1e8) expected = self.mixed_frame.fillna(value=-1e8) assert_frame_equal(result, expected)
closes #3907.
https://api.github.com/repos/pandas-dev/pandas/pulls/3909
2013-06-14T22:26:30Z
2013-06-15T12:34:13Z
2013-06-15T12:34:13Z
2014-07-04T21:31:30Z
TST: convert knowntestfailures to skip tests
diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index a7f0e3d3e37b1..5c79c57c1e020 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -15,7 +15,6 @@ from pandas import Index from pandas.sparse.tests import test_sparse from pandas.util import py3compat -from pandas.util.decorators import knownfailureif from pandas.util.misc import is_little_endian class TestPickle(unittest.TestCase): @@ -58,16 +57,18 @@ def compare(self, vf): comparator = getattr(tm,"assert_%s_equal" % typ) comparator(result,expected) - @knownfailureif(not is_little_endian(), "known failure of test_read_pickles_0_10_1 on non-little endian") def test_read_pickles_0_10_1(self): + if not is_little_endian(): + raise nose.SkipTest("known failure of test_read_pickles_0_10_1 on non-little endian") pth = tm.get_data_path('legacy_pickle/0.10.1') for f in os.listdir(pth): vf = os.path.join(pth,f) self.compare(vf) - @knownfailureif(not is_little_endian(), "known failure of test_read_pickles_0_11_0 on non-little endian") def test_read_pickles_0_11_0(self): + if not is_little_endian(): + raise nose.SkipTest("known failure of test_read_pickles_0_11_0 on non-little endian") pth = tm.get_data_path('legacy_pickle/0.11.0') for f in os.listdir(pth): diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index 794d303a68d79..4584976c41383 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -14,7 +14,6 @@ from pandas.io.stata import read_stata, StataReader, StataWriter import pandas.util.testing as tm from pandas.util.testing import ensure_clean -from pandas.util.decorators import knownfailureif from pandas.util.misc import is_little_endian class StataTests(unittest.TestCase): @@ -129,8 +128,10 @@ def test_read_dta4(self): tm.assert_frame_equal(parsed, expected) - @knownfailureif(not is_little_endian(), "known failure of test_write_dta5 on non-little endian") def test_write_dta5(self): + if not is_little_endian(): + raise nose.SkipTest("known failure of test_write_dta5 on non-little endian") + original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) original.index.name = 'index' @@ -140,8 +141,10 @@ def test_write_dta5(self): written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original) - @knownfailureif(not is_little_endian(), "known failure of test_write_dta6 on non-little endian") def test_write_dta6(self): + if not is_little_endian(): + raise nose.SkipTest("known failure of test_write_dta6 on non-little endian") + original = self.read_csv(self.csv3) original.index.name = 'index'
in test_pickle, test_stats closes #3894, #3896
https://api.github.com/repos/pandas-dev/pandas/pulls/3906
2013-06-14T18:41:53Z
2013-06-14T19:07:45Z
2013-06-14T19:07:45Z
2014-06-18T17:41:16Z
BLD: add setup_requires in setup.py so pandas can be used with buildout
diff --git a/setup.py b/setup.py index 030584ba509d3..3f6c945f796ea 100755 --- a/setup.py +++ b/setup.py @@ -34,9 +34,9 @@ _have_setuptools = False setuptools_kwargs = {} +min_numpy_ver = '1.6' if sys.version_info[0] >= 3: - min_numpy_ver = 1.6 if sys.version_info[1] >= 3: # 3.3 needs numpy 1.7+ min_numpy_ver = "1.7.0b2" @@ -45,6 +45,7 @@ 'install_requires': ['python-dateutil >= 2', 'pytz', 'numpy >= %s' % min_numpy_ver], + 'setup_requires': ['numpy >= %s' % min_numpy_ver], 'use_2to3_exclude_fixers': ['lib2to3.fixes.fix_next', ], } @@ -53,10 +54,12 @@ "\n$ pip install distribute") else: + min_numpy_ver = '1.6.1' setuptools_kwargs = { 'install_requires': ['python-dateutil', 'pytz', - 'numpy >= 1.6.1'], + 'numpy >= %s' % min_numpy_ver], + 'setup_requires': ['numpy >= %s' % min_numpy_ver], 'zip_safe': False, }
Numpy is a setup-time dependency due to some .h files, but is not declared as such. This causes tools like buildout ( http://buildout.org/ ) to fail. Adding the `setup_requires` kwarg to the setuptools config shoud fix this: buildout will correctly assemble any declared setup-time dependencies, before executing the pandas build. closes #3861
https://api.github.com/repos/pandas-dev/pandas/pulls/3903
2013-06-14T10:30:26Z
2013-06-18T23:29:05Z
2013-06-18T23:29:04Z
2014-07-16T08:14:08Z
ENH: (GH3863) Timestamp.min and Timestamp.max return a valid Timestamp
diff --git a/RELEASE.rst b/RELEASE.rst index 4f82f7b458737..977491b554cd8 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -69,6 +69,8 @@ pandas 0.11.1 - support python3 (via ``PyTables 3.0.0``) (GH3750_) - Add modulo operator to Series, DataFrame - Add ``date`` method to DatetimeIndex + - Timestamp.min and Timestamp.max now represent valid Timestamp instances instead + of the default datetime.min and datetime.max (respectively). - Simplified the API and added a describe method to Categorical - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name`` to specify custom column names of the returned DataFrame (GH3649_), diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index 45369cb7ddb08..0b736d8ddbe11 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -271,9 +271,10 @@ can be represented using a 64-bit integer is limited to approximately 584 years: .. ipython:: python - begin = Timestamp(-9223285636854775809L) + begin = Timestamp.min begin - end = Timestamp(np.iinfo(np.int64).max) + + end = Timestamp.max end If you need to represent time series data outside the nanosecond timespan, use diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index dfc36258a680f..d5357da16d2bb 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -289,8 +289,12 @@ Enhancements dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False) - Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_) + - DatetimeIndexes no longer try to convert mixed-integer indexes during join operations (GH3877_) + + - Timestamp.min and Timestamp.max now represent valid Timestamp instances instead + of the default datetime.min and datetime.max (respectively). Bug Fixes diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py index eaeb3325685ec..54c00e798f08a 100644 --- a/pandas/tests/test_tseries.py +++ b/pandas/tests/test_tseries.py @@ -2,7 +2,7 @@ from numpy import nan import numpy as np -from pandas import Index, isnull +from pandas import Index, isnull, Timestamp from pandas.util.testing import assert_almost_equal import pandas.util.testing as common import pandas.lib as lib @@ -683,6 +683,22 @@ def test_int_index(self): expected = arr.sum(1) assert_almost_equal(result, expected) + +class TestTsUtil(unittest.TestCase): + def test_min_valid(self): + # Ensure that Timestamp.min is a valid Timestamp + Timestamp(Timestamp.min) + + def test_max_valid(self): + # Ensure that Timestamp.max is a valid Timestamp + Timestamp(Timestamp.max) + + def test_to_datetime_bijective(self): + # Ensure that converting to datetime and back only loses precision + # by going from nanoseconds to microseconds. + self.assertEqual(Timestamp(Timestamp.max.to_pydatetime()).value/1000, Timestamp.max.value/1000) + self.assertEqual(Timestamp(Timestamp.min.to_pydatetime()).value/1000, Timestamp.min.value/1000) + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index ec11de7392680..3a0f7d9264174 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -388,6 +388,15 @@ cpdef object get_value_box(ndarray arr, object loc): return util.get_value_1d(arr, i) +# Add the min and max fields at the class level +# These are defined as magic numbers due to strange +# wraparound behavior when using the true int64 lower boundary +cdef int64_t _NS_LOWER_BOUND = -9223285636854775000LL +cdef int64_t _NS_UPPER_BOUND = 9223372036854775807LL +Timestamp.min = Timestamp(_NS_LOWER_BOUND) +Timestamp.max = Timestamp(_NS_UPPER_BOUND) + + #---------------------------------------------------------------------- # Frequency inference @@ -769,8 +778,6 @@ cdef inline object _get_zone(object tz): except AttributeError: return tz -# cdef int64_t _NS_LOWER_BOUND = -9223285636854775809LL -# cdef int64_t _NS_UPPER_BOUND = -9223372036854775807LL cdef inline _check_dts_bounds(int64_t value, pandas_datetimestruct *dts): cdef pandas_datetimestruct dts2 @@ -2868,4 +2875,4 @@ def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon): return 1 + days_to_week + day_of_week # def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"): -# return _strptime(data_string, format)[0] +# return _strptime(data_string, format)[0] \ No newline at end of file
closes https://github.com/pydata/pandas/issues/3863 by adding valid min and max fields to the Timestamp class.
https://api.github.com/repos/pandas-dev/pandas/pulls/3902
2013-06-14T10:17:04Z
2013-07-02T14:39:10Z
2013-07-02T14:39:10Z
2014-06-14T06:16:44Z
BUG: fix unicode -> str cast in tslib
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index ec11de7392680..9b611032455ae 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -465,7 +465,8 @@ cdef class _Timestamp(datetime): elif op == 3: return True else: - raise TypeError('Cannot compare Timestamp with %s' % str(other)) + raise TypeError('Cannot compare Timestamp with ' + '{0!r}'.format(other.__class__.__name__)) self._assert_tzawareness_compat(other)
This should use format since calling str on a unicode string is a _bad_ idea because it may or may not repr correctly. closes #3875. another error is created from fixing this issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/3900
2013-06-14T00:11:29Z
2013-06-15T00:14:09Z
2013-06-15T00:14:09Z
2014-07-16T08:14:07Z
TST: test fixes for various builds (debian)
diff --git a/pandas/io/json.py b/pandas/io/json.py index fcecb31bb77a7..ce95c3394ce2c 100644 --- a/pandas/io/json.py +++ b/pandas/io/json.py @@ -268,6 +268,15 @@ def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): except: pass + if data.dtype == 'float': + + # coerce floats to 64 + try: + data = data.astype('float64') + result = True + except: + pass + # do't coerce 0-len data if len(data) and (data.dtype == 'float' or data.dtype == 'object'): @@ -280,6 +289,16 @@ def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): except: pass + # coerce ints to 64 + if data.dtype == 'int': + + # coerce floats to 64 + try: + data = data.astype('int64') + result = True + except: + pass + return data, result def _try_convert_to_date(self, data): diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 39e1042d125a2..baa4f6b64ec0e 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -808,11 +808,7 @@ def test_to_excel_styleconverter(self): # self.assertTrue(ws.cell(maddr).merged) # os.remove(filename) def test_excel_010_hemstring(self): - try: - import xlwt - import openpyxl - except ImportError: - raise nose.SkipTest + _skip_if_no_excelsuite() from pandas.util.testing import makeCustomDataframe as mkdf # ensure limited functionality in 0.10 diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py index bdd700bdbcec3..fe717f56e6bea 100644 --- a/pandas/io/tests/test_json/test_pandas.py +++ b/pandas/io/tests/test_json/test_pandas.py @@ -26,7 +26,7 @@ _frame = DataFrame(_seriesd) _frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A']) -_intframe = DataFrame(dict((k, v.astype(int)) +_intframe = DataFrame(dict((k, v.astype(np.int64)) for k, v in _seriesd.iteritems())) _tsframe = DataFrame(_tsd) @@ -71,6 +71,9 @@ def _check_orient(df, orient, dtype=None, numpy=False, convert_axes=True, check_ unser = unser.sort() + if dtype is False: + check_dtype=False + if not convert_axes and df.index.dtype.type == np.datetime64: unser.index = DatetimeIndex(unser.index.values.astype('i8')) if orient == "records": @@ -288,7 +291,7 @@ def test_series_to_json_except(self): def test_typ(self): - s = Series(range(6), index=['a','b','c','d','e','f']) + s = Series(range(6), index=['a','b','c','d','e','f'], dtype='int64') result = read_json(s.to_json(),typ=None) assert_series_equal(result,s) diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index d49597860cd16..a7f0e3d3e37b1 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -15,6 +15,8 @@ from pandas import Index from pandas.sparse.tests import test_sparse from pandas.util import py3compat +from pandas.util.decorators import knownfailureif +from pandas.util.misc import is_little_endian class TestPickle(unittest.TestCase): _multiprocess_can_split_ = True @@ -56,6 +58,7 @@ def compare(self, vf): comparator = getattr(tm,"assert_%s_equal" % typ) comparator(result,expected) + @knownfailureif(not is_little_endian(), "known failure of test_read_pickles_0_10_1 on non-little endian") def test_read_pickles_0_10_1(self): pth = tm.get_data_path('legacy_pickle/0.10.1') @@ -63,6 +66,7 @@ def test_read_pickles_0_10_1(self): vf = os.path.join(pth,f) self.compare(vf) + @knownfailureif(not is_little_endian(), "known failure of test_read_pickles_0_11_0 on non-little endian") def test_read_pickles_0_11_0(self): pth = tm.get_data_path('legacy_pickle/0.11.0') diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 8b3d4a475d952..3266a906dcfae 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -477,6 +477,9 @@ def test_append(self): def test_encoding(self): + if sys.byteorder != 'little': + raise nose.SkipTest('system byteorder is not little, skipping test_encoding!') + with ensure_clean(self.path) as store: df = DataFrame(dict(A='foo',B='bar'),index=range(5)) df.loc[2,'A'] = np.nan diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index d512b0267ed13..794d303a68d79 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -3,7 +3,7 @@ from datetime import datetime import os import unittest - +import sys import warnings import nose @@ -14,6 +14,8 @@ from pandas.io.stata import read_stata, StataReader, StataWriter import pandas.util.testing as tm from pandas.util.testing import ensure_clean +from pandas.util.decorators import knownfailureif +from pandas.util.misc import is_little_endian class StataTests(unittest.TestCase): @@ -127,6 +129,7 @@ def test_read_dta4(self): tm.assert_frame_equal(parsed, expected) + @knownfailureif(not is_little_endian(), "known failure of test_write_dta5 on non-little endian") def test_write_dta5(self): original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) @@ -137,6 +140,7 @@ def test_write_dta5(self): written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original) + @knownfailureif(not is_little_endian(), "known failure of test_write_dta6 on non-little endian") def test_write_dta6(self): original = self.read_csv(self.csv3) original.index.name = 'index' diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 380604b0de32e..4e57977a787f2 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1381,7 +1381,11 @@ def test_to_excel(self): path = '__tmp__.' + ext with ensure_clean(path) as path: self.panel.to_excel(path) - reader = ExcelFile(path) + try: + reader = ExcelFile(path) + except ImportError: + raise nose.SkipTest + for item, df in self.panel.iterkv(): recdf = reader.parse(str(item), index_col=0) assert_frame_equal(df, recdf) diff --git a/pandas/util/misc.py b/pandas/util/misc.py index 8372ba56d00cd..15492cde5a9f7 100644 --- a/pandas/util/misc.py +++ b/pandas/util/misc.py @@ -1,3 +1,10 @@ +""" various miscellaneous utilities """ + +def is_little_endian(): + """ am I little endian """ + import sys + return sys.byteorder == 'little' + def exclusive(*args): count = sum([arg is not None for arg in args]) return count == 1
TST: json tests to int64 to avoid dtype issues, closes #3895 TST: skip tests if xlrd has lower than needed version, closes #3897 TST: skip pickle tests on not-little endianess , closes #3894 TST: skip test_encoding on non-little endian in test_pytables , closes #3892 TST: skip some stata tests on non-little endian, closes #3896
https://api.github.com/repos/pandas-dev/pandas/pulls/3898
2013-06-13T22:58:30Z
2013-06-14T13:13:16Z
2013-06-14T13:13:15Z
2014-07-01T15:50:54Z
Skip tests on network error
diff --git a/pandas/io/tests/test_google.py b/pandas/io/tests/test_google.py index 7f4ca13c27e58..8c16c60ac8b87 100644 --- a/pandas/io/tests/test_google.py +++ b/pandas/io/tests/test_google.py @@ -6,6 +6,7 @@ import pandas.io.data as web from pandas.util.testing import (network, assert_series_equal) from numpy.testing.decorators import slow +import numpy as np import urllib2 @@ -24,7 +25,23 @@ def test_google(self): self.assertEquals( web.DataReader("F", 'google', start, end)['Close'][-1], 13.68) + except urllib2.URLError: + try: + urllib2.urlopen('http://www.google.com') + except urllib2.URLError: + raise nose.SkipTest + else: + raise + + @network + def test_google_non_existent(self): + # asserts that google is minimally working and that it throws + # an excecption when DataReader can't get a 200 response from + # google + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + try: self.assertRaises( Exception, lambda: web.DataReader("NON EXISTENT TICKER", 'google', @@ -44,38 +61,63 @@ def test_get_quote(self): lambda: web.get_quote_google(pd.Series(['GOOG', 'AAPL', 'GOOG']))) @network - def test_get_data(self): - import numpy as np - df = web.get_data_google('GOOG') - print(df.Volume.ix['OCT-08-2010']) - assert df.Volume.ix['OCT-08-2010'] == 2863473 - - sl = ['AAPL', 'AMZN', 'GOOG'] - pan = web.get_data_google(sl, '2012') - ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG] - assert ts[0].dayofyear == 96 - - pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12') - expected = [19.02, 28.23, 25.39] - result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist() - assert result == expected - - # sanity checking - t= np.array(result) - assert np.issubdtype(t.dtype, np.floating) - assert t.shape == (3,) - - expected = [[ 18.99, 28.4 , 25.18], - [ 18.58, 28.31, 25.13], - [ 19.03, 28.16, 25.52], - [ 18.81, 28.82, 25.87]] - result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values - assert (result == expected).all() - - # sanity checking - t= np.array(pan) - assert np.issubdtype(t.dtype, np.floating) + def test_get_goog_volume(self): + try: + df = web.get_data_google('GOOG') + assert df.Volume.ix['OCT-08-2010'] == 2863473 + except IOError: + try: + urllib2.urlopen('http://www.google.com') + except IOError: + raise nose.SkipTest + else: + raise + + @network + def test_get_multi1(self): + try: + sl = ['AAPL', 'AMZN', 'GOOG'] + pan = web.get_data_google(sl, '2012') + ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG] + assert ts[0].dayofyear == 96 + except IOError: + try: + urllib2.urlopen('http://www.google.com') + except IOError: + raise nose.SkipTest + else: + raise + @network + def test_get_multi2(self): + try: + pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12') + expected = [19.02, 28.23, 25.39] + result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist() + assert result == expected + + # sanity checking + t= np.array(result) + assert np.issubdtype(t.dtype, np.floating) + assert t.shape == (3,) + + expected = [[ 18.99, 28.4 , 25.18], + [ 18.58, 28.31, 25.13], + [ 19.03, 28.16, 25.52], + [ 18.81, 28.82, 25.87]] + result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values + assert (result == expected).all() + + # sanity checking + t= np.array(pan) + assert np.issubdtype(t.dtype, np.floating) + except IOError: + try: + urllib2.urlopen('http://www.google.com') + except IOError: + raise nose.SkipTest + else: + raise if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
https://api.github.com/repos/pandas-dev/pandas/pulls/3893
2013-06-13T22:02:03Z
2013-06-19T11:24:56Z
2013-06-19T11:24:56Z
2016-05-10T01:09:03Z
BLD: install older versions of numexpr/pytables on fulldeps/2 build
diff --git a/ci/install.sh b/ci/install.sh index c9b76b88721e9..294db286a1001 100755 --- a/ci/install.sh +++ b/ci/install.sh @@ -67,14 +67,17 @@ if ( ! $VENV_FILE_AVAILABLE ); then if [ x"$FULL_DEPS" == x"true" ]; then echo "Installing FULL_DEPS" pip install $PIP_ARGS cython - pip install $PIP_ARGS numexpr if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then pip install $PIP_ARGS xlwt pip install $PIP_ARGS bottleneck + pip install $PIP_ARGS numexpr==2.0.1 + pip install $PIP_ARGS tables==2.3.1 + else + pip install $PIP_ARGS numexpr + pip install $PIP_ARGS tables fi - pip install $PIP_ARGS tables pip install $PIP_ARGS matplotlib pip install $PIP_ARGS openpyxl pip install $PIP_ARGS xlrd>=0.9.0
https://api.github.com/repos/pandas-dev/pandas/pulls/3891
2013-06-13T20:37:33Z
2013-06-13T21:23:30Z
2013-06-13T21:23:30Z
2014-07-16T08:14:03Z
API: (GH3888) more consistency in the to_datetime return types (given string/array of string inputs)
diff --git a/RELEASE.rst b/RELEASE.rst index 8e4bdd3cba268..500ba2df1ed47 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -159,6 +159,7 @@ pandas 0.11.1 - ``read_html`` now defaults to ``None`` when reading, and falls back on ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try until success is also valid + - more consistency in the to_datetime return types (give string/array of string inputs) (GH3888_) **Bug Fixes** @@ -355,6 +356,8 @@ pandas 0.11.1 .. _GH3911: https://github.com/pydata/pandas/issues/3911 .. _GH3912: https://github.com/pydata/pandas/issues/3912 .. _GH3764: https://github.com/pydata/pandas/issues/3764 +.. _GH3888: https://github.com/pydata/pandas/issues/3888 + pandas 0.11.0 ============= diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 16b3176521e28..2bbb0da9af658 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1159,9 +1159,13 @@ def truncate(self, before=None, after=None, copy=True): ------- truncated : type of caller """ - from pandas.tseries.tools import to_datetime - before = to_datetime(before) - after = to_datetime(after) + + # if we have a date index, convert to dates, otherwise + # treat like a slice + if self.index.is_all_dates: + from pandas.tseries.tools import to_datetime + before = to_datetime(before) + after = to_datetime(after) if before is not None and after is not None: if before > after: diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index bdc603dfdea31..51097cd157b99 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -825,13 +825,36 @@ def test_nat_scalar_field_access(self): self.assertEquals(NaT.weekday(), -1) - def test_to_datetime_empty_string(self): + def test_to_datetime_types(self): + + # empty string result = to_datetime('') - self.assert_(result == '') + self.assert_(result is NaT) result = to_datetime(['', '']) self.assert_(isnull(result).all()) + # ints + result = Timestamp(0) + expected = to_datetime(0) + self.assert_(result == expected) + + # GH 3888 (strings) + expected = to_datetime(['2012'])[0] + result = to_datetime('2012') + self.assert_(result == expected) + + ### array = ['2012','20120101','20120101 12:01:01'] + array = ['20120101','20120101 12:01:01'] + expected = list(to_datetime(array)) + result = map(Timestamp,array) + tm.assert_almost_equal(result,expected) + + ### currently fails ### + ### result = Timestamp('2012') + ### expected = to_datetime('2012') + ### self.assert_(result == expected) + def test_to_datetime_other_datetime64_units(self): # 5/25/2012 scalar = np.int64(1337904000000000).view('M8[us]') diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 90bc0beb8eb84..c80d2ef5d4e1c 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -99,16 +99,7 @@ def _convert_f(arg): except (ValueError, TypeError): raise e - if arg is None: - return arg - elif isinstance(arg, datetime): - return arg - elif isinstance(arg, Series): - values = arg.values - if not com.is_datetime64_dtype(values): - values = _convert_f(values) - return Series(values, index=arg.index, name=arg.name) - elif isinstance(arg, (np.ndarray, list)): + def _convert_listlike(arg): if isinstance(arg, list): arg = np.array(arg, dtype='O') @@ -122,24 +113,23 @@ def _convert_f(arg): return DatetimeIndex._simple_new(values, None, tz=tz) except (ValueError, TypeError): raise e - return arg + return arg - try: - return _convert_f(arg) - except ValueError: - raise - return arg + return _convert_f(arg) - try: - if not arg: - return arg - default = datetime(1, 1, 1) - return parse(arg, dayfirst=dayfirst, default=default) - except Exception: - if errors == 'raise': - raise + if arg is None: return arg + elif isinstance(arg, datetime): + return arg + elif isinstance(arg, Series): + values = arg.values + if not com.is_datetime64_dtype(values): + values = _convert_f(values) + return Series(values, index=arg.index, name=arg.name) + elif isinstance(arg, (np.ndarray, list)): + return _convert_listlike(arg) + return _convert_listlike(np.array([ arg ], dtype='O'))[0] class DateParseError(ValueError): pass
closes #3888 resolves the following inconsistencies in the Timestamp/to_datetime interface Things that do the same thing will now _do the same thing_! ``` In [1]: to_datetime = pd.to_datetime In [2]: to_datetime('') Out[2]: NaT In [3]: to_datetime(['', '']) Out[3]: <class 'pandas.tseries.index.DatetimeIndex'> [NaT, NaT] Length: 2, Freq: None, Timezone: None In [4]: Timestamp(0) Out[4]: Timestamp('1970-01-01 00:00:00', tz=None) In [5]: to_datetime(0) Out[5]: Timestamp('1970-01-01 00:00:00', tz=None) In [6]: to_datetime(['2012'])[0] Out[6]: Timestamp('2012-01-01 00:00:00', tz=None) In [8]: to_datetime('2012') Out[8]: Timestamp('2012-01-01 00:00:00', tz=None) In [9]: array = ['20120101','20120101 12:01:01'] In [11]: list(to_datetime(array)) Out[11]: [Timestamp('2012-01-01 00:00:00', tz=None), Timestamp('2012-01-01 12:01:01', tz=None)] In [13]: map(Timestamp,array) Out[13]: [Timestamp('2012-01-01 00:00:00', tz=None), Timestamp('2012-01-01 12:01:01', tz=None)] ``` Note that the following is still inconsisten and will be fixed in a future PR ``` In [14]: Timestamp('2012') Out[14]: Timestamp('2012-06-18 00:00:00', tz=None) In [15]: to_datetime('2012') Out[15]: Timestamp('2012-01-01 00:00:00', tz=None) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3890
2013-06-13T19:45:50Z
2013-06-19T00:56:27Z
2013-06-19T00:56:27Z
2014-06-17T13:26:30Z
BUG: (GH3880) index names are now propogated with loc/ix
diff --git a/RELEASE.rst b/RELEASE.rst index 03cfc4f6bcafc..839c472da1610 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -177,6 +177,8 @@ pandas 0.11.1 - Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_) - Allow insert/delete to non-unique columns (GH3679_) - Extend ``reindex`` to correctly deal with non-unique indices (GH3679_) + - ``DataFrame.itertuples()`` now works with frames with duplicate column + names (GH3873_) - Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_) - Fixed bug in mixed-frame assignment with aligned series (GH3492_) - Fixed bug in selecting month/quarter/year from a series would not select the time element @@ -228,8 +230,7 @@ pandas 0.11.1 - PandasObjects raise TypeError when trying to hash (GH3882_) - Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_) - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_) - - ``DataFrame.itertuples()`` now works with frames with duplicate column - names (GH3873_) + - Fix index name not propogating when using ``loc/ix`` (GH3880_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH2786: https://github.com/pydata/pandas/issues/2786 @@ -323,6 +324,7 @@ pandas 0.11.1 .. _GH3834: https://github.com/pydata/pandas/issues/3834 .. _GH3873: https://github.com/pydata/pandas/issues/3873 .. _GH3877: https://github.com/pydata/pandas/issues/3877 +.. _GH3880: https://github.com/pydata/pandas/issues/3880 pandas 0.11.0 diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index 1a43e9e6a49e0..dfc36258a680f 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -348,11 +348,14 @@ Bug Fixes - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_) - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_) - Allow insert/delete to non-unique columns (GH3679_) + - Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_) + - Allow insert/delete to non-unique columns (GH3679_) + - Extend ``reindex`` to correctly deal with non-unique indices (GH3679_) + - ``DataFrame.itertuples()`` now works with frames with duplicate column + names (GH3873_) - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_) - ``read_html`` now correctly skips tests (GH3741_) - - ``DataFrame.itertuples()`` now works with frames with duplicate column - names (GH3873_) See the `full release notes <https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker @@ -405,3 +408,5 @@ on GitHub for a complete list. .. _GH3834: https://github.com/pydata/pandas/issues/3834 .. _GH3873: https://github.com/pydata/pandas/issues/3873 .. _GH3877: https://github.com/pydata/pandas/issues/3877 +.. _GH3659: https://github.com/pydata/pandas/issues/3659 +.. _GH3679: https://github.com/pydata/pandas/issues/3679 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b6e29204fc0d8..f9f8a424f8d96 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2712,14 +2712,14 @@ def _reindex_multi(self, new_index, new_columns, copy, fill_value): def _reindex_index(self, new_index, method, copy, level, fill_value=NA, limit=None): new_index, indexer = self.index.reindex(new_index, method, level, - limit=limit) + limit=limit, copy_if_needed=True) return self._reindex_with_indexers(new_index, indexer, None, None, copy, fill_value) def _reindex_columns(self, new_columns, copy, level, fill_value=NA, limit=None): new_columns, indexer = self.columns.reindex(new_columns, level=level, - limit=limit) + limit=limit, copy_if_needed=True) return self._reindex_with_indexers(None, None, new_columns, indexer, copy, fill_value) diff --git a/pandas/core/index.py b/pandas/core/index.py index 51ebd58c33343..a5880b9f18670 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -920,7 +920,7 @@ def _get_method(self, method): } return aliases.get(method, method) - def reindex(self, target, method=None, level=None, limit=None): + def reindex(self, target, method=None, level=None, limit=None, copy_if_needed=False): """ For Index, simply returns the new index and the results of get_indexer. Provided here to enable an interface that is amenable for @@ -939,6 +939,12 @@ def reindex(self, target, method=None, level=None, limit=None): else: if self.equals(target): indexer = None + + # to avoid aliasing an existing index + if copy_if_needed and target.name != self.name and self.name is not None: + if target.name is None: + target = self.copy() + else: if self.is_unique: indexer = self.get_indexer(target, method=method, @@ -2196,7 +2202,7 @@ def get_indexer(self, target, method=None, limit=None): return com._ensure_platform_int(indexer) - def reindex(self, target, method=None, level=None, limit=None): + def reindex(self, target, method=None, level=None, limit=None, copy_if_needed=False): """ Performs any necessary conversion on the input index and calls get_indexer. This method is here so MultiIndex and an Index of diff --git a/pandas/core/internals.py b/pandas/core/internals.py index af1543dad0314..49d92afc46848 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1948,7 +1948,7 @@ def reindex_axis(self, new_axis, method=None, axis=0, copy=True): 'axis == 0') return self.reindex_items(new_axis) - new_axis, indexer = cur_axis.reindex(new_axis, method) + new_axis, indexer = cur_axis.reindex(new_axis, method, copy_if_needed=True) return self.reindex_indexer(new_axis, indexer, axis=axis) def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan): @@ -2014,7 +2014,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): return data.reindex_items(new_items) # TODO: this part could be faster (!) - new_items, indexer = self.items.reindex(new_items) + new_items, indexer = self.items.reindex(new_items, copy_if_needed=True) new_axes = [new_items] + self.axes[1:] # could have so me pathological (MultiIndex) issues here diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 2b2d59306da6e..5b4d582e5e42e 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7207,6 +7207,7 @@ def test_reindex_name_remains(self): s = Series(random.rand(10)) df = DataFrame(s, index=np.arange(len(s))) i = Series(np.arange(10), name='iname') + df = df.reindex(i) self.assert_(df.index.name == 'iname') diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 295eaede443b1..0719d9c9a87db 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1024,6 +1024,19 @@ def test_non_unique_loc(self): expected = DataFrame({'A' : [2,4,5], 'B' : [4,6,7]}, index = [1,1,2]) assert_frame_equal(result,expected) + def test_loc_name(self): + # GH 3880 + df = DataFrame([[1, 1], [1, 1]]) + df.index.name = 'index_name' + result = df.iloc[[0, 1]].index.name + self.assert_(result == 'index_name') + + result = df.ix[[0, 1]].index.name + self.assert_(result == 'index_name') + + result = df.loc[[0, 1]].index.name + self.assert_(result == 'index_name') + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
closes #3880
https://api.github.com/repos/pandas-dev/pandas/pulls/3887
2013-06-13T18:57:18Z
2013-06-13T19:46:37Z
2013-06-13T19:46:37Z
2014-07-03T09:04:12Z
FIX hash of DataFrame raises Typerror
diff --git a/RELEASE.rst b/RELEASE.rst index 307986ab81681..072f40d927108 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -220,6 +220,7 @@ pandas 0.11.1 - Groupby transform with item-by-item not upcasting correctly (GH3740_) - Incorrectly read a HDFStore multi-index Frame witha column specification (GH3748_) - ``read_html`` now correctly skips tests (GH3741_) + - PandasObjects raise TypeError when trying to hash (GH3882_) - Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_) - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0d2612d7aed7a..3a3ce49d50c5a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -31,6 +31,11 @@ def save(self, path): def load(cls, path): return com.load(path) + def __hash__(self): + raise TypeError('{0!r} objects are mutable, thus they cannot be' + ' hashed'.format(self.__class__.__name__)) + + #---------------------------------------------------------------------- # Axis name business diff --git a/pandas/core/series.py b/pandas/core/series.py index 3a7a7d0f49b66..2621c64afc205 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -527,9 +527,6 @@ def _constructor(self): def _can_hold_na(self): return not is_integer_dtype(self.dtype) - def __hash__(self): - raise TypeError('unhashable type') - _index = None index = lib.SeriesIndex() diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 2c6d3b221c6ff..1e8fa91548145 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3109,6 +3109,11 @@ def test_constructor_for_list_with_dtypes(self): expected.sort() assert_series_equal(result, expected) + def test_not_hashable(self): + df = pd.DataFrame([1]) + self.assertRaises(TypeError, hash, df) + self.assertRaises(TypeError, hash, self.empty) + def test_timedeltas(self): df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')), diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 58b7ac272401f..380604b0de32e 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -46,6 +46,12 @@ def test_cumsum(self): cumsum = self.panel.cumsum() assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum()) + def not_hashable(self): + c_empty = Panel() + c = Panel(pd.Panel([[[1]]])) + self.assertRaises(TypeError, hash, c_empty) + self.assertRaises(TypeError, hash, c) + class SafeForLongAndSparse(object): _multiprocess_can_split_ = True diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index a2e08bc744ab0..9c3a66c32c501 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -785,6 +785,11 @@ def test_reindex(self): major=self.panel4d.major_axis, copy=False) self.assert_(result is self.panel4d) + def test_not_hashable(self): + p4D_empty = Panel4D() + self.assertRaises(TypeError, hash, p4D_empty) + self.assertRaises(TypeError, hash, self.panel4d) + def test_reindex_like(self): # reindex_like smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1], diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 88990bdde98b8..d04da38f0e526 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -579,6 +579,12 @@ def test_setindex(self): def test_array_finalize(self): pass + def test_not_hashable(self): + s_empty = Series() + s = Series([1]) + self.assertRaises(TypeError, hash, s_empty) + self.assertRaises(TypeError, hash, s) + def test_fromValue(self): nans = Series(np.NaN, index=self.ts.index)
fixes #3882 raise TypeError if trying to hash a DataFrame (or Panel etc.).
https://api.github.com/repos/pandas-dev/pandas/pulls/3884
2013-06-13T13:29:45Z
2013-06-13T18:49:52Z
2013-06-13T18:49:52Z
2014-06-21T16:50:42Z