title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
BUG: Fixed bug in selecting month/quarter/year from a series would not select correctly (GH3546)
diff --git a/RELEASE.rst b/RELEASE.rst index 69cfd1eb99d7e..487c18cdb679b 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -79,6 +79,8 @@ pandas 0.11.1 - Fix to_csv to handle non-unique columns (GH3495_) - Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_) - Fixed bug in mixed-frame assignment with aligned series (GH3492_) + - Fixed bug in selecting month/quarter/year from a series would not select the time element + on the last day (GH3546_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH2786: https://github.com/pydata/pandas/issues/2786 @@ -100,6 +102,7 @@ pandas 0.11.1 .. _GH3457: https://github.com/pydata/pandas/issues/3457 .. _GH3477: https://github.com/pydata/pandas/issues/3457 .. _GH3461: https://github.com/pydata/pandas/issues/3461 +.. _GH3546: https://github.com/pydata/pandas/issues/3546 .. _GH3468: https://github.com/pydata/pandas/issues/3468 .. _GH3448: https://github.com/pydata/pandas/issues/3448 .. _GH3449: https://github.com/pydata/pandas/issues/3449 diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index d9625a3d5e549..6bccf323f8654 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1075,16 +1075,16 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True): if reso == 'year': t1 = Timestamp(datetime(parsed.year, 1, 1), tz=self.tz) - t2 = Timestamp(datetime(parsed.year, 12, 31), tz=self.tz) + t2 = Timestamp(datetime(parsed.year, 12, 31, 23, 59, 59, 999999), tz=self.tz) elif reso == 'month': d = tslib.monthrange(parsed.year, parsed.month)[1] t1 = Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz) - t2 = Timestamp(datetime(parsed.year, parsed.month, d), tz=self.tz) + t2 = Timestamp(datetime(parsed.year, parsed.month, d, 23, 59, 59, 999999), tz=self.tz) elif reso == 'quarter': qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead d = tslib.monthrange(parsed.year, qe)[1] # at end of month t1 = Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz) - t2 = Timestamp(datetime(parsed.year, qe, d), tz=self.tz) + t2 = Timestamp(datetime(parsed.year, qe, d, 23, 59, 59, 999999), tz=self.tz) elif (reso == 'day' and (self._resolution < Resolution.RESO_DAY or not is_monotonic)): st = datetime(parsed.year, parsed.month, parsed.day) t1 = Timestamp(st, tz=self.tz) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index c83d4ba131a42..e52d9c9c8b777 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -236,6 +236,22 @@ def test_indexing(self): result = df['2001']['A'] assert_series_equal(expected,result) + # GH3546 (not including times on the last day) + idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H') + ts = Series(range(len(idx)), index=idx) + expected = ts['2013-05'] + assert_series_equal(expected,ts) + + idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S') + ts = Series(range(len(idx)), index=idx) + expected = ts['2013-05'] + assert_series_equal(expected,ts) + + idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))] + ts = Series(range(len(idx)), index=idx) + expected = ts['2013'] + assert_series_equal(expected,ts) + def assert_range_equal(left, right): assert(left.equals(right)) assert(left.freq == right.freq)
closes #3546
https://api.github.com/repos/pandas-dev/pandas/pulls/3548
2013-05-08T17:40:05Z
2013-05-08T17:59:47Z
2013-05-08T17:59:47Z
2014-07-16T08:07:40Z
document read_csv's usecols argument.
diff --git a/doc/source/io.rst b/doc/source/io.rst index 39f860c63e0e6..097ce7aea5d3a 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -110,6 +110,8 @@ They can take a number of arguments: - ``verbose``: show number of NA values inserted in non-numeric columns - ``squeeze``: if True then output with only one column is turned into Series - ``error_bad_lines``: if False then any lines causing an error will be skipped :ref:`bad lines <io.bad_lines>` + - ``usecols``: a subset of columns to return, results in much faster parsing + time and lower memory usage. .. ipython:: python :suppress: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 60b6d6c81fdd3..7676529fd97e9 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -122,6 +122,9 @@ class DateConversionError(Exception): Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing na_filter=False can improve the performance of reading a large file +usecols : array-like + Return a subset of the columns. + Results in much faster parsing time and lower memory usage. Returns -------
read_csv's usecols is currently not documented here: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.io.parsers.read_csv.html This patch fixes that.
https://api.github.com/repos/pandas-dev/pandas/pulls/3544
2013-05-08T01:00:50Z
2013-05-15T23:31:24Z
2013-05-15T23:31:24Z
2014-07-16T08:07:34Z
Document read_csv's usecols parameter.
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index c6a904b931c98..b9b2bc33144a2 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -114,6 +114,9 @@ class DateConversionError(Exception): Encoding to use for UTF when reading/writing (ex. 'utf-8') squeeze : boolean, default False If the parsed data only contains one column then return a Series +usecols : array-like, default None + Specify a subset of columns to return. + This can result in much faster parsing time and lower memory usage. Returns -------
read_csv's usecols argument is currently undocumented. This patch fixes this: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.io.parsers.read_csv.html
https://api.github.com/repos/pandas-dev/pandas/pulls/3543
2013-05-08T00:49:15Z
2013-05-08T00:50:59Z
null
2013-05-15T23:13:42Z
PERF: HDFStore table writing performance improvements
diff --git a/RELEASE.rst b/RELEASE.rst index f3d9c72db8bc5..f80a688c3657e 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -44,6 +44,7 @@ pandas 0.11.1 - will warn with a FrequencyWarning if you are attempting to append an index with a different frequency than the existing - support datelike columns with a timezone as data_columns (GH2852_) + - table writing performance improvements. **API Changes** diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 1661080b11799..834a94a139ee5 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -913,7 +913,7 @@ def __init__(self, func, nrows, start=None, stop=None, chunksize=None): self.stop = min(self.nrows,stop) if chunksize is None: - chunksize = 50000 + chunksize = 100000 self.chunksize = chunksize @@ -2232,6 +2232,10 @@ def table(self): """ return the table group (this is my storable) """ return self.storable + @property + def dtype(self): + return self.table.dtype + @property def description(self): return self.table.description @@ -2848,7 +2852,7 @@ class AppendableTable(LegacyTable): table_type = 'appendable' def write(self, obj, axes=None, append=False, complib=None, - complevel=None, fletcher32=None, min_itemsize=None, chunksize=50000, + complevel=None, fletcher32=None, min_itemsize=None, chunksize=None, expectedrows=None, **kwargs): if not append and self.is_exists: @@ -2905,18 +2909,26 @@ def write_data(self, chunksize): [a.is_searchable for a in self.values_axes]).astype('u1') values = [a.take_data() for a in self.values_axes] + # transpose the values so first dimension is last + values = [ v.transpose(np.roll(np.arange(v.ndim),v.ndim-1)) for v in values ] + # write the chunks + if chunksize is None: + chunksize = 100000 + rows = self.nrows_expected chunks = int(rows / chunksize) + 1 for i in xrange(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, rows) + if start_i >= end_i: + break self.write_data_chunk( indexes=[a[start_i:end_i] for a in indexes], mask=mask[start_i:end_i], search=search, - values=[v[:, start_i:end_i] for v in values]) + values=[v[start_i:end_i] for v in values]) def write_data_chunk(self, indexes, mask, search, values): @@ -2929,7 +2941,7 @@ def write_data_chunk(self, indexes, mask, search, values): try: func = getattr(lib, "create_hdf_rows_%sd" % self.ndim) args = list(indexes) - args.extend([mask, search, values]) + args.extend([self.dtype, mask, search, values]) rows = func(*args) except (Exception), detail: raise Exception("cannot create row-data -> %s" % str(detail)) @@ -2939,9 +2951,8 @@ def write_data_chunk(self, indexes, mask, search, values): self.table.append(rows) self.table.flush() except (Exception), detail: - raise Exception( - "tables cannot write this data -> %s" % str(detail)) - + raise Exception("tables cannot write this data -> %s" % str(detail)) + def delete(self, where=None, **kwargs): # delete all rows (and return the nrows) diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 05171523764c8..d043691bc061e 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -837,61 +837,70 @@ def write_csv_rows(list data, list data_index, int nlevels, list cols, object wr @cython.boundscheck(False) @cython.wraparound(False) -def create_hdf_rows_2d(ndarray indexer0, +def create_hdf_rows_2d(ndarray indexer0, + object dtype, ndarray[np.uint8_t, ndim=1] mask, ndarray[np.uint8_t, ndim=1] searchable, - list values): + list values): """ return a list of objects ready to be converted to rec-array format """ cdef: - int i, b, n_indexer0, n_blocks, tup_size - list l - object tup, val, v + int i, l, b, n_indexer0, n_blocks, tup_size + ndarray result + tuple tup + object v n_indexer0 = indexer0.shape[0] n_blocks = len(values) tup_size = n_blocks+1 - l = [] - for i from 0 <= i < n_indexer0: + result = np.empty(n_indexer0,dtype=dtype) + l = 0 + for i in range(n_indexer0): if not mask[i]: - + tup = PyTuple_New(tup_size) - val = indexer0[i] - PyTuple_SET_ITEM(tup, 0, val) - Py_INCREF(val) - for b from 0 <= b < n_blocks: + v = indexer0[i] + PyTuple_SET_ITEM(tup, 0, v) + Py_INCREF(v) + + for b in range(n_blocks): - v = values[b][:, i] + v = values[b][i] if searchable[b]: v = v[0] + PyTuple_SET_ITEM(tup, b+1, v) Py_INCREF(v) - l.append(tup) + result[l] = tup + l += 1 - return l + return result[0:l] @cython.boundscheck(False) @cython.wraparound(False) def create_hdf_rows_3d(ndarray indexer0, ndarray indexer1, + object dtype, ndarray[np.uint8_t, ndim=2] mask, ndarray[np.uint8_t, ndim=1] searchable, list values): """ return a list of objects ready to be converted to rec-array format """ cdef: - int i, j, b, n_indexer0, n_indexer1, n_blocks, tup_size - list l - object tup, val, v + int i, j, l, b, n_indexer0, n_indexer1, n_blocks, tup_size + tuple tup + object v + ndarray result n_indexer0 = indexer0.shape[0] n_indexer1 = indexer1.shape[0] n_blocks = len(values) tup_size = n_blocks+2 - l = [] + result = np.empty(n_indexer0*n_indexer1,dtype=dtype) + l = 0 for i from 0 <= i < n_indexer0: for j from 0 <= j < n_indexer1: @@ -900,45 +909,49 @@ def create_hdf_rows_3d(ndarray indexer0, ndarray indexer1, tup = PyTuple_New(tup_size) - val = indexer0[i] - PyTuple_SET_ITEM(tup, 0, val) - Py_INCREF(val) - - val = indexer1[j] - PyTuple_SET_ITEM(tup, 1, val) - Py_INCREF(val) + v = indexer0[i] + PyTuple_SET_ITEM(tup, 0, v) + Py_INCREF(v) + v = indexer1[j] + PyTuple_SET_ITEM(tup, 1, v) + Py_INCREF(v) for b from 0 <= b < n_blocks: - v = values[b][:, i, j] + v = values[b][i, j] if searchable[b]: v = v[0] + PyTuple_SET_ITEM(tup, b+2, v) Py_INCREF(v) - l.append(tup) + result[l] = tup + l += 1 - return l + return result[0:l] @cython.boundscheck(False) @cython.wraparound(False) def create_hdf_rows_4d(ndarray indexer0, ndarray indexer1, ndarray indexer2, + object dtype, ndarray[np.uint8_t, ndim=3] mask, ndarray[np.uint8_t, ndim=1] searchable, list values): """ return a list of objects ready to be converted to rec-array format """ cdef: - int i, j, k, b, n_indexer0, n_indexer1, n_indexer2, n_blocks, tup_size - list l - object tup, val, v + int i, j, k, l, b, n_indexer0, n_indexer1, n_indexer2, n_blocks, tup_size + tuple tup + object v + ndarray result n_indexer0 = indexer0.shape[0] n_indexer1 = indexer1.shape[0] n_indexer2 = indexer2.shape[0] n_blocks = len(values) tup_size = n_blocks+3 - l = [] + result = np.empty(n_indexer0*n_indexer1*n_indexer2,dtype=dtype) + l = 0 for i from 0 <= i < n_indexer0: for j from 0 <= j < n_indexer1: @@ -949,29 +962,28 @@ def create_hdf_rows_4d(ndarray indexer0, ndarray indexer1, ndarray indexer2, tup = PyTuple_New(tup_size) - val = indexer0[i] - PyTuple_SET_ITEM(tup, 0, val) - Py_INCREF(val) - - val = indexer1[j] - PyTuple_SET_ITEM(tup, 1, val) - Py_INCREF(val) - - val = indexer2[k] - PyTuple_SET_ITEM(tup, 2, val) - Py_INCREF(val) + v = indexer0[i] + PyTuple_SET_ITEM(tup, 0, v) + Py_INCREF(v) + v = indexer1[j] + PyTuple_SET_ITEM(tup, 1, v) + Py_INCREF(v) + v = indexer2[k] + PyTuple_SET_ITEM(tup, 2, v) + Py_INCREF(v) for b from 0 <= b < n_blocks: - v = values[b][:, i, j, k] + v = values[b][i, j, k] if searchable[b]: v = v[0] PyTuple_SET_ITEM(tup, b+3, v) Py_INCREF(v) - l.append(tup) + result[l] = tup + l += 1 - return l + return result[0:l] #------------------------------------------------------------------------------- # Groupby-related functions
PERF: hdf performance by doing recarray translation in cython ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- write_store_table_dc | 155.4217 | 269.1494 | 0.5775 | write_store_table_panel | 93.9604 | 135.8333 | 0.6917 | write_store_table_mixed | 116.6536 | 151.5377 | 0.7698 | write_store_table | 66.0146 | 80.5717 | 0.8193 | write_store_table_wide | 100.0113 | 121.9817 | 0.8199 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3537
2013-05-06T22:38:17Z
2013-05-08T21:44:20Z
2013-05-08T21:44:20Z
2014-07-16T08:07:27Z
Update io.rst
diff --git a/doc/source/io.rst b/doc/source/io.rst index 9001ae393d552..a0d30f0710e71 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -873,6 +873,14 @@ The Series object also has a ``to_string`` method, but with only the ``buf``, which, if set to ``True``, will additionally output the length of the Series. +Reading HTML format +~~~~~~~~~~~~~~~~~~~~~~ + +.. _io.read_html: + +There is a :func:`~pandas.io.parsers.read_html` reading an HTML file and parsing the contained table(s) into a list of DataFrames. + + Writing to HTML format ~~~~~~~~~~~~~~~~~~~~~~
added io.read_html
https://api.github.com/repos/pandas-dev/pandas/pulls/3536
2013-05-06T20:40:46Z
2013-05-10T10:02:36Z
2013-05-10T10:02:36Z
2013-05-10T10:03:07Z
BUG: Fixed bug in mixed frame assignment with aligned series (GH3492)
diff --git a/RELEASE.rst b/RELEASE.rst index c1d40dc76f598..69cfd1eb99d7e 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -78,6 +78,7 @@ pandas 0.11.1 (removed warning) (GH2786_), and fix (GH3230_) - Fix to_csv to handle non-unique columns (GH3495_) - Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_) + - Fixed bug in mixed-frame assignment with aligned series (GH3492_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH2786: https://github.com/pydata/pandas/issues/2786 @@ -103,6 +104,7 @@ pandas 0.11.1 .. _GH3448: https://github.com/pydata/pandas/issues/3448 .. _GH3449: https://github.com/pydata/pandas/issues/3449 .. _GH3495: https://github.com/pydata/pandas/issues/3495 +.. _GH3492: https://github.com/pydata/pandas/issues/3492 .. _GH3493: https://github.com/pydata/pandas/issues/3493 diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index 5cfb24d71e19b..4be34cdbf84eb 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -21,3 +21,4 @@ on GitHub for a complete list. .. _GH2437: https://github.com/pydata/pandas/issues/2437 .. _GH3477: https://github.com/pydata/pandas/issues/3477 +.. _GH3492: https://github.com/pydata/pandas/issues/3492 diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 7562d20363027..8b6acd8c7c53e 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -124,8 +124,7 @@ def setter(item, v): values = data.values if np.prod(values.shape): result, changed = com._maybe_upcast_indexer(values,plane_indexer,v,dtype=getattr(data,'dtype',None)) - if changed: - self.obj[item] = result + self.obj[item] = result labels = item_labels[het_idx] diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index ae71ec8b35422..f70c781847cc7 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -784,6 +784,21 @@ def test_dups_fancy_indexing(self): assert_frame_equal(df,result) + def test_indexing_mixed_frame_bug(self): + + # GH3492 + df=DataFrame({'a':{1:'aaa',2:'bbb',3:'ccc'},'b':{1:111,2:222,3:333}}) + + # this works, new column is created correctly + df['test']=df['a'].apply(lambda x: '_' if x=='aaa' else x) + + # this does not work, ie column test is not changed + idx=df['test']=='_' + temp=df.ix[idx,'a'].apply(lambda x: '-----' if x=='aaa' else x) + df.ix[idx,'test']=temp + self.assert_(df.iloc[0,2] == '-----') + + #if I look at df, then element [0,2] equals '_'. If instead I type df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I get '_'. if __name__ == '__main__': import nose
closes #3492
https://api.github.com/repos/pandas-dev/pandas/pulls/3533
2013-05-06T19:09:03Z
2013-05-06T19:34:37Z
2013-05-06T19:34:37Z
2014-07-16T08:07:24Z
ENH: HDFStore enhancements
diff --git a/RELEASE.rst b/RELEASE.rst index 69cfd1eb99d7e..f3f4d7c895931 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -38,6 +38,12 @@ pandas 0.11.1 - Fixed various issues with internal pprinting code, the repr() for various objects including TimeStamp and *Index now produces valid python code strings and can be used to recreate the object, (GH3038_), (GH3379_), (GH3251_) + - ``HDFStore`` + + - will retain index attributes (freq,tz,name) on recreation (GH3499_) + - will warn with a FrequencyWarning if you are attempting to append + an index with a different frequency than the existing + - support datelike columns with a timezone as data_columns (GH2852_) **API Changes** @@ -87,6 +93,7 @@ pandas 0.11.1 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3480: https://github.com/pydata/pandas/issues/3480 +.. _GH2852: https://github.com/pydata/pandas/issues/2852 .. _GH3454: https://github.com/pydata/pandas/issues/3454 .. _GH3457: https://github.com/pydata/pandas/issues/3457 .. _GH3491: https://github.com/pydata/pandas/issues/3491 @@ -102,7 +109,7 @@ pandas 0.11.1 .. _GH3461: https://github.com/pydata/pandas/issues/3461 .. _GH3468: https://github.com/pydata/pandas/issues/3468 .. _GH3448: https://github.com/pydata/pandas/issues/3448 -.. _GH3449: https://github.com/pydata/pandas/issues/3449 +.. _GH3499: https://github.com/pydata/pandas/issues/3499 .. _GH3495: https://github.com/pydata/pandas/issues/3495 .. _GH3492: https://github.com/pydata/pandas/issues/3492 .. _GH3493: https://github.com/pydata/pandas/issues/3493 diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index 4be34cdbf84eb..2e3a67ead65e0 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -1,6 +1,6 @@ -.. _whatsnew_0120: +.. _whatsnew_0111: -v0.12.0 (??) +v0.11.1 (??) ------------------------ This is a major release from 0.11.0 and includes many new features and @@ -12,13 +12,21 @@ API changes Enhancements ~~~~~~~~~~~~ - - pd.read_html() can now parse HTML string, files or urls and return dataframes + - ``pd.read_html()`` can now parse HTML string, files or urls and return dataframes courtesy of @cpcloud. (GH3477_) + - ``HDFStore`` + + - will retain index attributes (freq,tz,name) on recreation (GH3499_) + - will warn with a FrequencyWarning if you are attempting to append + an index with a different frequency than the existing + - support datelike columns with a timezone as data_columns (GH2852_) See the `full release notes <https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker on GitHub for a complete list. .. _GH2437: https://github.com/pydata/pandas/issues/2437 +.. _GH2852: https://github.com/pydata/pandas/issues/2852 .. _GH3477: https://github.com/pydata/pandas/issues/3477 .. _GH3492: https://github.com/pydata/pandas/issues/3492 +.. _GH3499: https://github.com/pydata/pandas/issues/3499 diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index 81bd39dd0e70f..a02e41176ced1 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,8 @@ These are new features and improvements of note in each release. .. include:: v0.12.0.txt +.. include:: v0.11.1.txt + .. include:: v0.11.0.txt .. include:: v0.10.1.txt diff --git a/pandas/core/index.py b/pandas/core/index.py index 101b69ffc3c7e..4a7981e57c622 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -83,12 +83,12 @@ class Index(np.ndarray): _engine_type = _index.ObjectEngine - def __new__(cls, data, dtype=None, copy=False, name=None): + def __new__(cls, data, dtype=None, copy=False, name=None, **kwargs): from pandas.tseries.period import PeriodIndex if isinstance(data, np.ndarray): if issubclass(data.dtype.type, np.datetime64): from pandas.tseries.index import DatetimeIndex - result = DatetimeIndex(data, copy=copy, name=name) + result = DatetimeIndex(data, copy=copy, name=name, **kwargs) if dtype is not None and _o_dtype == dtype: return Index(result.to_pydatetime(), dtype=_o_dtype) else: @@ -102,7 +102,7 @@ def __new__(cls, data, dtype=None, copy=False, name=None): except TypeError: pass elif isinstance(data, PeriodIndex): - return PeriodIndex(data, copy=copy, name=name) + return PeriodIndex(data, copy=copy, name=name, **kwargs) if issubclass(data.dtype.type, np.integer): return Int64Index(data, copy=copy, dtype=dtype, name=name) @@ -123,10 +123,10 @@ def __new__(cls, data, dtype=None, copy=False, name=None): if (inferred.startswith('datetime') or tslib.is_timestamp_array(subarr)): from pandas.tseries.index import DatetimeIndex - return DatetimeIndex(subarr, copy=copy, name=name) + return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) elif inferred == 'period': - return PeriodIndex(subarr, name=name) + return PeriodIndex(subarr, name=name, **kwargs) subarr = subarr.view(cls) subarr.name = name diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 06ae9a7f7f11f..1661080b11799 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -42,6 +42,11 @@ class IncompatibilityWarning(Warning): pass where criteria is being ignored as this version [%s] is too old (or not-defined), read the file in and write it out to a new file to upgrade (with the copy_to method) """ +class FrequencyWarning(Warning): pass +frequency_doc = """ +the frequency of the existing index is [%s] which conflicts with the new freq [%s], +resetting the frequency to None +""" class PerformanceWarning(Warning): pass performance_doc = """ your performance may suffer as PyTables will pickle object types that it cannot map @@ -149,9 +154,12 @@ def get_store(path, mode='a', complevel=None, complib=None, ### interface to/from ### -def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, **kwargs): +def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, append=None, **kwargs): """ store this object, close it if we opened it """ - f = lambda store: store.put(key, value, **kwargs) + if append: + f = lambda store: store.append(key, value, **kwargs) + else: + f = lambda store: store.put(key, value, **kwargs) if isinstance(path_or_buf, basestring): with get_store(path_or_buf, mode=mode, complevel=complevel, complib=complib) as store: @@ -941,9 +949,11 @@ class IndexCol(object): is_an_indexable = True is_data_indexable = True is_searchable = False + _info_fields = ['freq','tz','name'] def __init__(self, values=None, kind=None, typ=None, cname=None, itemsize=None, - name=None, axis=None, kind_attr=None, pos=None, **kwargs): + name=None, axis=None, kind_attr=None, pos=None, freq=None, tz=None, + index_name=None, **kwargs): self.values = values self.kind = kind self.typ = typ @@ -953,6 +963,9 @@ def __init__(self, values=None, kind=None, typ=None, cname=None, itemsize=None, self.kind_attr = kind_attr self.axis = axis self.pos = pos + self.freq = freq + self.tz = tz + self.index_name = None self.table = None if name is not None: @@ -1023,7 +1036,22 @@ def convert(self, values, nan_rep): values = values[self.cname] except: pass - self.values = Index(_maybe_convert(values, self.kind)) + + kwargs = dict() + if self.freq is not None: + kwargs['freq'] = self.freq + if self.tz is not None: + kwargs['tz'] = self.tz + if self.name is not None: + kwargs['name'] = self.index_name + try: + self.values = Index(_maybe_convert(values, self.kind), **kwargs) + except: + + # if the output freq is different that what we recorded, then infer it + if 'freq' in kwargs: + kwargs['freq'] = 'infer' + self.values = Index(_maybe_convert(values, self.kind), **kwargs) return self def take_data(self): @@ -1098,6 +1126,46 @@ def validate_attr(self, append): raise TypeError("incompatible kind in col [%s - %s]" % (existing_kind, self.kind)) + def update_info(self, info): + """ set/update the info for this indexable with the key/value + if validate is True, then raise if an existing value does not match the value """ + + for key in self._info_fields: + + value = getattr(self,key,None) + + try: + idx = info[self.name] + except: + idx = info[self.name] = dict() + + existing_value = idx.get(key) + if key in idx and existing_value != value: + + # frequency just warn + if key == 'freq': + ws = frequency_doc % (existing_value,value) + warnings.warn(ws, FrequencyWarning) + + # reset + idx[key] = None + + else: + raise ValueError("invalid info for [%s] for [%s]""" + ", existing_value [%s] conflicts with new value [%s]" % (self.name, + key,existing_value,value)) + else: + if value is not None or existing_value is not None: + idx[key] = value + + return self + + def set_info(self, info): + """ set my state from the passed info """ + idx = info.get(self.name) + if idx is not None: + self.__dict__.update(idx) + def get_attr(self): """ set the kind for this colummn """ self.kind = getattr(self.attrs, self.kind_attr, None) @@ -1137,6 +1205,7 @@ class DataCol(IndexCol): is_an_indexable = False is_data_indexable = False is_searchable = False + _info_fields = ['tz'] @classmethod def create_for_block(cls, i=None, name=None, cname=None, version=None, **kwargs): @@ -1206,7 +1275,7 @@ def set_kind(self): if self.typ is None: self.typ = getattr(self.description,self.cname,None) - def set_atom(self, block, existing_col, min_itemsize, nan_rep, **kwargs): + def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, **kwargs): """ create and setup my atom from the block b """ self.values = list(block.items) @@ -1221,10 +1290,27 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, **kwargs): "[date] is not implemented as a table column") elif inferred_type == 'datetime': if getattr(rvalues[0],'tzinfo',None) is not None: + + # if this block has more than one timezone, raise + if len(set([r.tzinfo for r in rvalues])) != 1: + raise TypeError( + "too many timezones in this block, create separate data columns") + + # convert this column to datetime64[ns] utc, and save the tz + index = DatetimeIndex(rvalues) + tz = getattr(index,'tz',None) + if tz is None: + raise TypeError( + "invalid timezone specification") + + values = index.tz_convert('UTC').values.view('i8') + self.tz = tz + self.update_info(info) + self.set_atom_datetime64(block, values.reshape(block.values.shape)) + + else: raise TypeError( - "timezone support on datetimes is not yet implemented as a table column") - raise TypeError( - "[datetime] is not implemented as a table column") + "[datetime] is not implemented as a table column") elif inferred_type == 'unicode': raise TypeError( "[unicode] is not implemented as a table column") @@ -1304,10 +1390,12 @@ def set_atom_data(self, block): def get_atom_datetime64(self, block): return _tables().Int64Col(shape=block.shape[0]) - def set_atom_datetime64(self, block): + def set_atom_datetime64(self, block, values = None): self.kind = 'datetime64' self.typ = self.get_atom_datetime64(block) - self.set_data(block.values.view('i8'), 'datetime64') + if values is None: + values = block.values.view('i8') + self.set_data(values, 'datetime64') @property def shape(self): @@ -1346,7 +1434,18 @@ def convert(self, values, nan_rep): # reverse converts if self.dtype == 'datetime64': - self.data = np.asarray(self.data, dtype='M8[ns]') + # recreate the timezone + if self.tz is not None: + + # data should be 2-dim here + # we stored as utc, so just set the tz + + index = DatetimeIndex(self.data.ravel(),tz='UTC').tz_convert(self.tz) + self.data = np.array(index.tolist(),dtype=object).reshape(self.data.shape) + + else: + self.data = np.asarray(self.data, dtype='M8[ns]') + elif self.dtype == 'date': self.data = np.array( [date.fromtimestamp(v) for v in self.data], dtype=object) @@ -2060,6 +2159,7 @@ def __init__(self, *args, **kwargs): self.non_index_axes = [] self.values_axes = [] self.data_columns = [] + self.info = dict() self.nan_rep = None self.selection = None @@ -2173,18 +2273,20 @@ def values_cols(self): def set_attrs(self): """ set our table type & indexables """ - self.attrs.table_type = self.table_type - self.attrs.index_cols = self.index_cols() - self.attrs.values_cols = self.values_cols() + self.attrs.table_type = self.table_type + self.attrs.index_cols = self.index_cols() + self.attrs.values_cols = self.values_cols() self.attrs.non_index_axes = self.non_index_axes self.attrs.data_columns = self.data_columns - self.attrs.nan_rep = self.nan_rep - self.attrs.levels = self.levels + self.attrs.info = self.info + self.attrs.nan_rep = self.nan_rep + self.attrs.levels = self.levels def get_attrs(self): """ retrieve our attributes """ self.non_index_axes = getattr(self.attrs,'non_index_axes',None) or [] self.data_columns = getattr(self.attrs,'data_columns',None) or [] + self.info = getattr(self.attrs,'info',None) or dict() self.nan_rep = getattr(self.attrs,'nan_rep',None) self.levels = getattr(self.attrs,'levels',None) or [] t = self.table @@ -2222,7 +2324,7 @@ def indexables(self): self._indexables = [] # index columns - self._indexables.extend([IndexCol(name=name, axis=axis, pos=i) for i, (axis, name) in enumerate(self.attrs.index_cols)]) + self._indexables.extend([ IndexCol(name=name,axis=axis,pos=i) for i, (axis, name) in enumerate(self.attrs.index_cols)]) # values columns dc = set(self.data_columns) @@ -2315,6 +2417,7 @@ def read_axes(self, where, **kwargs): # convert the data for a in self.axes: + a.set_info(self.info) a.convert(values, nan_rep=self.nan_rep) return True @@ -2379,7 +2482,8 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, existing_table.infer_axes() axes = [ a.axis for a in existing_table.index_axes] data_columns = existing_table.data_columns - nan_rep = existing_table.nan_rep + nan_rep = existing_table.nan_rep + self.info = existing_table.info else: existing_table = None @@ -2421,7 +2525,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, self.non_index_axes.append((i, append_axis)) # set axis positions (based on the axes) - self.index_axes = [index_axes_map[a].set_pos(j) for j, + self.index_axes = [index_axes_map[a].set_pos(j).update_info(self.info) for j, a in enumerate(axes)] j = len(self.index_axes) @@ -2479,6 +2583,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, existing_col=existing_col, min_itemsize=min_itemsize, nan_rep=nan_rep, + info=self.info, **kwargs) col.set_pos(j) @@ -2598,6 +2703,7 @@ def read_column(self, column, where = None, **kwargs): # column must be an indexable or a data column c = getattr(self.table.cols, column) + a.set_info(self.info) return Series(a.convert(c[:], nan_rep=self.nan_rep).take_data()) raise KeyError("column [%s] not found in the table" % column) @@ -3042,10 +3148,10 @@ class AppendableNDimTable(AppendablePanelTable): def _convert_index(index): if isinstance(index, DatetimeIndex): converted = index.asi8 - return IndexCol(converted, 'datetime64', _tables().Int64Col()) + return IndexCol(converted, 'datetime64', _tables().Int64Col(), freq=getattr(index,'freq',None), tz=getattr(index,'tz',None)) elif isinstance(index, (Int64Index, PeriodIndex)): atom = _tables().Int64Col() - return IndexCol(index.values, 'integer', atom) + return IndexCol(index.values, 'integer', atom, freq=getattr(index,'freq',None)) if isinstance(index, MultiIndex): raise Exception('MultiIndex not supported here!') @@ -3309,6 +3415,8 @@ def convert_value(self, v): if self.kind == 'datetime64' or self.kind == 'datetime' : v = lib.Timestamp(v) + if v.tz is not None: + v = v.tz_convert('UTC') return [v.value, v] elif isinstance(v, datetime) or hasattr(v, 'timetuple') or self.kind == 'date': v = time.mktime(v.timetuple()) diff --git a/pandas/io/tests/data/legacy_hdf/legacy_table_0.11.h5 b/pandas/io/tests/data/legacy_hdf/legacy_table_0.11.h5 new file mode 100644 index 0000000000000..958effc2ce6f8 Binary files /dev/null and b/pandas/io/tests/data/legacy_hdf/legacy_table_0.11.h5 differ diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index d7f497648236a..3daa08a0d591a 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -7,9 +7,12 @@ import datetime import numpy as np +import pandas from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range, date_range, Index) -from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning, PerformanceWarning +from pandas.io.pytables import (HDFStore, get_store, Term, + IncompatibilityWarning, PerformanceWarning, + FrequencyWarning) import pandas.util.testing as tm from pandas.tests.test_series import assert_series_equal from pandas.tests.test_frame import assert_frame_equal @@ -1259,16 +1262,48 @@ def test_unimplemented_dtypes_table_columns(self): self.assertRaises(TypeError, store.append, 'df_unimplemented', df) def test_table_append_with_timezones(self): - # not implemented yet with ensure_clean(self.path) as store: - # check with mixed dtypes - df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern')),index=range(5)) - - # timezones not yet supported + def compare(a,b): + tm.assert_frame_equal(a,b) + + # compare the zones on each element + for c in a.columns: + for i in a.index: + a_e = a[c][i] + b_e = b[c][i] + if not (a_e == b_e and a_e.tz == b_e.tz): + raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e,b_e)) + + from datetime import timedelta + + _maybe_remove(store, 'df_tz') + df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ])) + store.append('df_tz',df,data_columns=['A']) + compare(store['df_tz'],df) + + # select with tz aware + compare(store.select('df_tz',where=Term('A','>=',df.A[3])),df[df.A>=df.A[3]]) + + _maybe_remove(store, 'df_tz') + df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130103',tz='US/Eastern')),index=range(5)) + store.append('df_tz',df) + compare(store['df_tz'],df) + + _maybe_remove(store, 'df_tz') + df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5)) self.assertRaises(TypeError, store.append, 'df_tz', df) + # this is ok + _maybe_remove(store, 'df_tz') + store.append('df_tz',df,data_columns=['A','B']) + compare(store['df_tz'],df) + + # can't append with diff timezone + df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5)) + self.assertRaises(ValueError, store.append, 'df_tz', df) + def test_remove(self): with ensure_clean(self.path) as store: @@ -2041,6 +2076,51 @@ def test_select_iterator(self): result = concat(results) tm.assert_frame_equal(expected, result) + def test_retain_index_attributes(self): + + # GH 3499, losing frequency info on index recreation + df = DataFrame(dict(A = Series(xrange(3), + index=date_range('2000-1-1',periods=3,freq='H')))) + + with ensure_clean(self.path) as store: + _maybe_remove(store,'data') + store.put('data', df, table=True) + + result = store.get('data') + tm.assert_frame_equal(df,result) + + for attr in ['freq','tz']: + for idx in ['index','columns']: + self.assert_(getattr(getattr(df,idx),attr,None) == getattr(getattr(result,idx),attr,None)) + + + # try to append a table with a different frequency + warnings.filterwarnings('ignore', category=FrequencyWarning) + df2 = DataFrame(dict(A = Series(xrange(3), + index=date_range('2002-1-1',periods=3,freq='D')))) + store.append('data',df2) + warnings.filterwarnings('always', category=FrequencyWarning) + + self.assert_(store.get_storer('data').info['index']['freq'] is None) + + # this is ok + _maybe_remove(store,'df2') + df2 = DataFrame(dict(A = Series(xrange(3), + index=[Timestamp('20010101'),Timestamp('20010102'),Timestamp('20020101')]))) + store.append('df2',df2) + df3 = DataFrame(dict(A = Series(xrange(3),index=date_range('2002-1-1',periods=3,freq='D')))) + store.append('df2',df3) + + def test_retain_index_attributes2(self): + + with tm.ensure_clean(self.path) as path: + warnings.filterwarnings('ignore', category=FrequencyWarning) + df = DataFrame(dict(A = Series(xrange(3), index=date_range('2000-1-1',periods=3,freq='H')))) + df.to_hdf(path,'data',mode='w',append=True) + df2 = DataFrame(dict(A = Series(xrange(3), index=date_range('2002-1-1',periods=3,freq='D')))) + df2.to_hdf(path,'data',append=True) + warnings.filterwarnings('always', category=FrequencyWarning) + def test_panel_select(self): wp = tm.makePanel() @@ -2437,6 +2517,16 @@ def test_legacy_0_10_read(self): finally: safe_close(store) + def test_legacy_0_11_read(self): + # legacy from 0.11 + try: + store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_0.11.h5'), 'r') + df = store.select('df') + df1 = store.select('df1') + mi = store.select('mi') + finally: + safe_close(store) + def test_copy(self): def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs): @@ -2497,14 +2587,22 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs): def test_legacy_table_write(self): raise nose.SkipTest - # legacy table types + store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a') + df = tm.makeDataFrame() wp = tm.makePanel() - store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table.h5'), 'a') + index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], + ['one', 'two', 'three']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['foo', 'bar']) + df = DataFrame(np.random.randn(10, 3), index=index, + columns=['A', 'B', 'C']) + store.append('mi', df) - self.assertRaises(Exception, store.append, 'df1', df) - self.assertRaises(Exception, store.append, 'wp1', wp) + df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10)) + store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 }) store.close()
- will retain index attributes (freq,tz,name) on recreation #3499 - support datelike columns with a timezone as data_columns #2852 - will raise if trying to append with a different timezone than existing will warn if the existing frequency of an index is different than an appended one (thought raising was too strict) ``` In [5]: df = DataFrame(dict(A = Series(xrange(3), index=date_range('2000-1-1',periods=3,freq='H')))) In [6]: df2 = DataFrame(dict(A = Series(xrange(3), index=date_range('2002-1-1',periods=3,freq='D')))) In [9]: df.index Out[9]: <class 'pandas.tseries.index.DatetimeIndex'> [2000-01-01 00:00:00, ..., 2000-01-01 02:00:00] Length: 3, Freq: H, Timezone: None In [10]: df2.index Out[10]: <class 'pandas.tseries.index.DatetimeIndex'> [2002-01-01 00:00:00, ..., 2002-01-03 00:00:00] Length: 3, Freq: D, Timezone: None In [12]: df.to_hdf('test.h5','data',mode='w',append=True) In [13]: df2.to_hdf('test.h5','data',append=True) pandas/io/pytables.py:1148: FrequencyWarning: the frequency of the existing index is [<1 Hour>] which conflicts with the new freq [<1 Day>], resetting the frequency to None warnings.warn(ws, FrequencyWarning) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3531
2013-05-06T15:35:58Z
2013-05-08T20:26:10Z
2013-05-08T20:26:10Z
2014-06-17T13:08:16Z
DOC: Fix header
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index ddaddd356e7d4..d67a2d51cc1b8 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -14,9 +14,9 @@ randint = np.random.randint np.set_printoptions(precision=4, suppress=True) -************** -Selecting Data -************** +*************************** +Indexing and Selecting Data +*************************** The axis labeling information in pandas objects serves many purposes:
I went searching for how to make a MultiIndex on the docs page and couldn't figure out where to look anymore.
https://api.github.com/repos/pandas-dev/pandas/pulls/3530
2013-05-06T01:05:28Z
2013-05-06T04:50:56Z
2013-05-06T04:50:56Z
2014-07-16T08:07:14Z
fix qt repr bug
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8bfdee3b75170..3fd95e161b41a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -746,8 +746,8 @@ def _repr_html_(self): self.info(buf=buf, verbose=verbose) info = buf.getvalue() info = info.replace('&', r'&amp;') - info = info.replace('<', r'&lt') - info = info.replace('>', r'&gt') + info = info.replace('<', r'&lt;') + info = info.replace('>', r'&gt;') return ('<pre>\n' + info + '\n</pre>') else: return None
addresses #3522.
https://api.github.com/repos/pandas-dev/pandas/pulls/3527
2013-05-03T23:59:41Z
2013-05-04T00:22:02Z
2013-05-04T00:22:02Z
2014-07-03T08:22:45Z
ENH: support for msgpack serialization/deserialization
diff --git a/LICENSES/MSGPACK_NUMPY_LICENSE b/LICENSES/MSGPACK_NUMPY_LICENSE new file mode 100644 index 0000000000000..57ea631f0f66d --- /dev/null +++ b/LICENSES/MSGPACK_NUMPY_LICENSE @@ -0,0 +1,33 @@ +.. -*- rst -*- + +License +======= + +Copyright (c) 2013, Lev Givon. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. +* Neither the name of Lev Givon nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/RELEASE.rst b/RELEASE.rst index 77e8e85db6a76..cefb18c9f0ddf 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -32,6 +32,8 @@ pandas 0.11.1 - pd.read_html() can now parse HTML string, files or urls and return dataframes courtesy of @cpcloud. (GH3477_) + - ``pd.read_msgpack()`` and ``pd.to_msgpack()`` are now a supported method of serialization + of arbitrary pandas (and python objects) in a lightweight portable binary format (GH686_) **Improvements to existing features** @@ -75,6 +77,7 @@ pandas 0.11.1 .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH2786: https://github.com/pydata/pandas/issues/2786 +.. _GH686: https://github.com/pydata/pandas/issues/686 .. _GH2194: https://github.com/pydata/pandas/issues/2194 .. _GH3230: https://github.com/pydata/pandas/issues/3230 .. _GH3251: https://github.com/pydata/pandas/issues/3251 diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 5739fe0922d6d..c6f036d9541a6 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1192,47 +1192,6 @@ While float dtypes are unchanged. casted casted.dtypes -.. _basics.serialize: - -Pickling and serialization --------------------------- - -All pandas objects are equipped with ``save`` methods which use Python's -``cPickle`` module to save data structures to disk using the pickle format. - -.. ipython:: python - - df - df.save('foo.pickle') - -The ``load`` function in the ``pandas`` namespace can be used to load any -pickled pandas object (or any other pickled object) from file: - - -.. ipython:: python - - load('foo.pickle') - -There is also a ``save`` function which takes any object as its first argument: - -.. ipython:: python - - save(df, 'foo.pickle') - load('foo.pickle') - -.. ipython:: python - :suppress: - - import os - os.remove('foo.pickle') - -.. warning:: - - Loading pickled data received from untrusted sources can be unsafe. - - See: http://docs.python.org/2.7/library/pickle.html - - Working with package options ---------------------------- diff --git a/doc/source/install.rst b/doc/source/install.rst index 9d14d1b11c6b1..360ded91c86f0 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -93,6 +93,7 @@ Optional Dependencies version. Version 0.17.1 or higher. * `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions * `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage + * `msgpack <http://www.msgpack.org>`__: necessary for msgpack based serialization * `matplotlib <http://matplotlib.sourceforge.net/>`__: for plotting * `statsmodels <http://statsmodels.sourceforge.net/>`__ * Needed for parts of :mod:`pandas.stats` diff --git a/doc/source/io.rst b/doc/source/io.rst index 9001ae393d552..9df6f4d3ecca6 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -981,6 +981,94 @@ one can use the ExcelWriter class, as in the following example: .. _io.hdf5: +.. _basics.serialize: + +Serialization +------------- + +msgpack +~~~~~~~ + +Starting in 0.12.0, pandas is supporting the ``msgpack`` format for +object serialization. This is a lightweight portable binary format, similar +to binary JSON, that is highly space efficient, and provides good performance +both on the writing (serialization), and reading (deserialization). + +.. ipython:: python + + df = DataFrame(np.random.rand(5,2),columns=list('AB')) + df.to_msgpack('foo.msg') + pd.read_msgpack('foo.msg') + s = Series(np.random.rand(5),index=date_range('20130101',periods=5)) + +You can pass a list of objects and you will receive them back on deserialization. + +.. ipython:: python + + pd.to_msgpack('foo.msg', df, 'foo', np.array([1,2,3]), s) + pd.read_msgpack('foo.msg') + +You can pass ``iterator=True`` to iterator over the unpacked results + +.. ipython:: python + + for o in pd.read_msgpack('foo.msg',iterator=True): + print o + + +You can pass ``append=True`` to the writer to append to an existing pack + +.. ipython:: python + + df.to_msgpack('foo.msg',append=True) + pd.read_msgpack('foo.msg') + +.. ipython:: python + :suppress: + :okexcept: + + os.remove('foo.msg') + + +pickling +~~~~~~~~ + +All pandas objects are equipped with ``save`` methods which use Python's +``cPickle`` module to save data structures to disk using the pickle format. + +.. ipython:: python + + df + df.save('foo.pickle') + +The ``load`` function in the ``pandas`` namespace can be used to load any +pickled pandas object (or any other pickled object) from file: + + +.. ipython:: python + + load('foo.pickle') + +There is also a ``save`` function which takes any object as its first argument: + +.. ipython:: python + + save(df, 'foo.pickle') + load('foo.pickle') + +.. ipython:: python + :suppress: + + import os + os.remove('foo.pickle') + +.. warning:: + + Loading pickled data received from untrusted sources can be unsafe. + + See: http://docs.python.org/2.7/library/pickle.html + + HDF5 (PyTables) --------------- diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index 5cfb24d71e19b..d87e9eea8a35e 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -1,10 +1,9 @@ -.. _whatsnew_0120: +.. _whatsnew_0111: -v0.12.0 (??) +v0.11.1 (??) ------------------------ -This is a major release from 0.11.0 and includes many new features and -enhancements along with a large number of bug fixes. +This is a minor release from 0.11.0 and include a small number of enhances and bug fixes. API changes ~~~~~~~~~~~ @@ -12,7 +11,7 @@ API changes Enhancements ~~~~~~~~~~~~ - - pd.read_html() can now parse HTML string, files or urls and return dataframes + - ``pd.read_html()`` can now parse HTML string, files or urls and return dataframes courtesy of @cpcloud. (GH3477_) See the `full release notes diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt new file mode 100644 index 0000000000000..ccb9347135c48 --- /dev/null +++ b/doc/source/v0.12.0.txt @@ -0,0 +1,42 @@ +.. _whatsnew_0120: + +v0.12.0 (??????) +---------------- + +This is a major release from 0.11.1 and includes many new features and +enhancements along with a large number of bug fixes. There are also a +number of important API changes that long-time pandas users should +pay close attention to. + +Enhancements +~~~~~~~~~~~~ + +- ``pd.read_msgpack()`` and ``pd.to_msgpack()`` are now a supported method of serialization + of arbitrary pandas (and python objects) in a lightweight portable binary format + + .. ipython:: python + + df = DataFrame(np.random.rand(5,2),columns=list('AB')) + df.to_msgpack('foo.msg') + pd.read_msgpack('foo.msg') + + s = Series(np.random.rand(5),index=date_range('20130101',periods=5)) + pd.to_msgpack('foo.msg', df, s) + pd.read_msgpack('foo.msg') + + You can pass ``iterator=True`` to iterator over the unpacked results + + .. ipython:: python + + for o in pd.read_msgpack('foo.msg',iterator=True): + print o + + .. ipython:: python + :suppress: + :okexcept: + + os.remove('foo.msg') + +See the `full release notes +<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker +on GitHub for a complete list. diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index 81bd39dd0e70f..a02e41176ced1 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,8 @@ These are new features and improvements of note in each release. .. include:: v0.12.0.txt +.. include:: v0.11.1.txt + .. include:: v0.11.0.txt .. include:: v0.10.1.txt diff --git a/pandas/__init__.py b/pandas/__init__.py index bf5bcc81bc21e..3aee9b2ab67d8 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -32,6 +32,7 @@ from pandas.io.parsers import (read_csv, read_table, read_clipboard, read_fwf, to_clipboard, ExcelFile, ExcelWriter) +from pandas.io.packers import read_msgpack, to_msgpack from pandas.io.pytables import HDFStore, Term, get_store, read_hdf from pandas.io.html import read_html from pandas.util.testing import debug diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ed90aab715cfd..571ab4fab07ce 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -487,6 +487,10 @@ def to_hdf(self, path_or_buf, key, **kwargs): from pandas.io import pytables return pytables.to_hdf(path_or_buf, key, self, **kwargs) + def to_msgpack(self, path_or_buf, **kwargs): + from pandas.io import packers + return packers.to_msgpack(path_or_buf, self, **kwargs) + # install the indexerse for _name, _indexer in indexing.get_indexers_list(): PandasObject._create_indexer(_name,_indexer) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5c0f9253beb62..4628773491d61 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -15,7 +15,6 @@ from pandas.tslib import Timestamp from pandas.util import py3compat - class Block(object): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas diff --git a/pandas/io/packers.py b/pandas/io/packers.py new file mode 100644 index 0000000000000..bc32c3c4d4011 --- /dev/null +++ b/pandas/io/packers.py @@ -0,0 +1,522 @@ +""" +Msgpack serializer support for reading and writing pandas data structures +to disk +""" + +# porfions of msgpack_numpy package, by Lev Givon were incorporated +# into this module (and tests_packers.py) + +""" +License +======= + +Copyright (c) 2013, Lev Givon. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. +* Neither the name of Lev Givon nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +from datetime import datetime, date, timedelta +import time +import re +import copy +import itertools +import warnings +from dateutil.parser import parse + +import numpy as np +from pandas import ( + Timestamp, Period, Series, TimeSeries, DataFrame, Panel, Panel4D, + Index, MultiIndex, Int64Index, PeriodIndex, DatetimeIndex, NaT +) +from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel +from pandas.sparse.array import BlockIndex, IntIndex +from pandas.tseries.api import PeriodIndex, DatetimeIndex +from pandas.core.index import Int64Index, _ensure_index +import pandas.core.common as com +from pandas.core.generic import NDFrame +from pandas.core.common import needs_i8_conversion +from pandas.core.internals import BlockManager, make_block +import pandas.core.internals as internals + +try: + import msgpack + from msgpack import _packer, _unpacker + _USE_MSGPACK = True +except: + _USE_MSGPACK = False + +import zlib + +try: + import blosc + _BLOSC = True +except: + _BLOSC = False + +## until we can pass this into our conversion functions, +## this is pretty hacky +compressor = None + +def to_msgpack(path, *args, **kwargs): + """ + msgpack (serialize) object to input file path + + Parameters + ---------- + path : string + File path + args : an object or objects to serialize + + append : boolean whether to append to an existing msgpack + (default is False) + compress : type of compressor (zlib or blosc), default to None (no compression) + """ + if not _USE_MSGPACK: + raise Exception("please install msgpack to create msgpack stores!") + + global compressor + compressor = kwargs.get('compress') + append = kwargs.get('append') + if append: + f = open(path, 'a+b') + else: + f = open(path, 'wb') + try: + if len(args) == 1: + f.write(pack(args[0])) + else: + for a in args: + f.write(pack(a)) + finally: + f.close() + + +def read_msgpack(path, iterator=False, **kwargs): + """ + Load msgpack pandas object from the specified + file path + + Parameters + ---------- + path : string + File path + iterator : boolean, if True, return an iterator to the unpacker + (default is False) + + Returns + ------- + obj : type of object stored in file + + """ + if not _USE_MSGPACK: + raise Exception("please install msgpack to read msgpack stores!") + if iterator: + return Iterator(path) + + with open(path,'rb') as fh: + l = list(unpack(fh)) + if len(l) == 1: + return l[0] + return l + +dtype_dict = { 21 : np.dtype('M8[ns]'), + u'datetime64[ns]' : np.dtype('M8[ns]'), + u'datetime64[us]' : np.dtype('M8[us]'), + 22 : np.dtype('m8[ns]'), + u'timedelta64[ns]' : np.dtype('m8[ns]'), + u'timedelta64[us]' : np.dtype('m8[us]') } + +def dtype_for(t): + if t in dtype_dict: + return dtype_dict[t] + return np.typeDict[t] + +c2f_dict = {'complex': np.float64, + 'complex128': np.float64, + 'complex256': np.float128, + 'complex64': np.float32} + +def c2f(r, i, ctype_name): + """ + Convert strings to complex number instance with specified numpy type. + """ + + ftype = c2f_dict[ctype_name] + return np.typeDict[ctype_name](ftype(r)+1j*ftype(i)) + + +def convert(values): + """ convert the numpy values to a list """ + + dtype = values.dtype + if needs_i8_conversion(dtype): + values = values.view('i8') + v = values.ravel() + + if compressor == 'zlib': + + # return string arrays like they are + if dtype == np.object_: + return v.tolist() + + # convert to a bytes array + v = v.tostring() + return zlib.compress(v) + + elif compressor == 'blosc' and _BLOSC: + + # return string arrays like they are + if dtype == np.object_: + return v.tolist() + + # convert to a bytes array + v = v.tostring() + return blosc.compress(v,typesize=dtype.itemsize) + + # as a list + return v.tolist() + +def unconvert(values, dtype, compress): + + if dtype == np.object_: + return np.array(values,dtype=object) + + if compress == 'zlib': + + values = zlib.decompress(values) + return np.frombuffer(values,dtype=dtype) + + elif compress == 'blosc': + + if not _BLOSC: + raise Exception("cannot uncompress w/o blosc") + + # decompress + values = blosc.decompress(values) + + return np.frombuffer(values,dtype=dtype) + + # as a list + return np.array(values,dtype=dtype) + +def encode(obj): + """ + Data encoder + """ + + tobj = type(obj) + if isinstance(obj, Index): + if isinstance(obj, PeriodIndex): + return {'typ' : 'period_index', + 'klass' : obj.__class__.__name__, + 'name' : getattr(obj,'name',None), + 'dtype': obj.dtype.num, + 'data': obj.tolist() } + elif isinstance(obj, DatetimeIndex): + return {'typ' : 'datetime_index', + 'klass' : obj.__class__.__name__, + 'name' : getattr(obj,'name',None), + 'dtype': obj.dtype.num, + 'data': obj.values.view('i8').tolist(), + 'freq' : obj.freqstr, + 'tz' : obj.tz} + elif isinstance(obj, MultiIndex): + return {'typ' : 'multi_index', + 'klass' : obj.__class__.__name__, + 'names' : getattr(obj,'names',None), + 'dtype': obj.dtype.num, + 'data': obj.tolist() } + else: + return {'typ' : 'index', + 'klass' : obj.__class__.__name__, + 'name' : getattr(obj,'name',None), + 'dtype': obj.dtype.num, + 'data': obj.tolist() } + elif isinstance(obj, Series): + if isinstance(obj, SparseSeries): + d = {'typ' : 'sparse_series', + 'klass' : obj.__class__.__name__, + 'dtype': obj.dtype.num, + 'index' : obj.index, + 'sp_index' : obj.sp_index, + 'sp_values' : convert(obj.sp_values), + 'compress' : compressor} + for f in ['name','fill_value','kind']: + d[f] = getattr(obj,f,None) + return d + else: + return {'typ' : 'series', + 'klass' : obj.__class__.__name__, + 'name' : getattr(obj,'name',None), + 'index' : obj.index, + 'dtype': obj.dtype.num, + 'data': convert(obj.values), + 'compress' : compressor} + elif issubclass(tobj, NDFrame): + if isinstance(obj, SparseDataFrame): + d = {'typ' : 'sparse_dataframe', + 'klass' : obj.__class__.__name__, + 'columns' : obj.columns } + for f in ['default_fill_value','default_kind']: + d[f] = getattr(obj,f,None) + d['data'] = dict([ (name,ss) for name,ss in obj.iteritems() ]) + return d + elif isinstance(obj, SparsePanel): + d = {'typ' : 'sparse_panel', + 'klass' : obj.__class__.__name__, + 'items' : obj.items } + for f in ['default_fill_value','default_kind']: + d[f] = getattr(obj,f,None) + d['data'] = dict([ (name,df) for name,df in obj.iteritems() ]) + return d + else: + + data = obj._data + if not data.is_consolidated(): + data = data.consolidate() + + # the block manager + return {'typ' : 'block_manager', + 'klass' : obj.__class__.__name__, + 'axes' : data.axes, + 'blocks' : [ { 'items' : b.items, + 'values' : convert(b.values), + 'shape' : b.values.shape, + 'dtype' : b.dtype.num, + 'klass' : b.__class__.__name__, + 'compress' : compressor + } for b in data.blocks ] } + + elif isinstance(obj, (datetime,date,timedelta)): + if isinstance(obj, Timestamp): + tz = obj.tzinfo + if tz is not None: + tz = tz.zone + offset = obj.offset + if offset is not None: + offset = offset.freqstr + return {'typ' : 'timestamp', + 'value': obj.value, + 'offset' : offset, + 'tz' : tz} + elif isinstance(obj, timedelta): + return { 'typ' : 'timedelta', + 'data' : (obj.days,obj.seconds,obj.microseconds) } + elif isinstance(obj, datetime): + return { 'typ' : 'datetime', + 'data' : obj.isoformat() } + elif isinstance(obj, date): + return { 'typ' : 'date', + 'data' : obj.isoformat() } + raise Exception("cannot encode this datetimelike object: %s" % obj) + elif isinstance(obj, Period): + return {'typ' : 'period', + 'ordinal' : obj.ordinal, + 'freq' : obj.freq } + elif isinstance(obj, BlockIndex): + return { 'typ' : 'block_index', + 'klass' : obj.__class__.__name__, + 'blocs' : obj.blocs, + 'blengths' : obj.blengths, + 'length' : obj.length } + elif isinstance(obj, IntIndex): + return { 'typ' : 'int_index', + 'klass' : obj.__class__.__name__, + 'indices' : obj.indices, + 'length' : obj.length } + elif isinstance(obj, np.ndarray): + return {'typ' : 'ndarray', + 'shape': obj.shape, + 'ndim': obj.ndim, + 'dtype': obj.dtype.num, + 'data': convert(obj), + 'compress' : compressor } + elif isinstance(obj, np.timedelta64): + return { 'typ' : 'np_timedelta64', + 'data' : obj.view('i8') } + elif isinstance(obj, np.number): + if np.iscomplexobj(obj): + return {'typ' : 'np_scalar', + 'sub_typ' : 'np_complex', + 'dtype': obj.dtype.name, + 'real': obj.real.__repr__(), + 'imag': obj.imag.__repr__()} + else: + return {'typ' : 'np_scalar', + 'dtype': obj.dtype.name, + 'data': obj.__repr__()} + elif isinstance(obj, complex): + return {'typ' : 'np_complex', + 'real': obj.real.__repr__(), + 'imag': obj.imag.__repr__()} + + return obj + +def decode(obj): + """ + Decoder for deserializing numpy data types. + """ + + typ = obj.get('typ') + if typ is None: + return obj + elif typ == 'timestamp': + return Timestamp(obj['value'],tz=obj['tz'],offset=obj['offset']) + elif typ == 'period': + return Period(ordinal=obj['ordinal'],freq=obj['freq']) + elif typ == 'index': + dtype = dtype_for(obj['dtype']) + data = obj['data'] + return globals()[obj['klass']](data,dtype=dtype,name=obj['name']) + elif typ == 'multi_index': + return globals()[obj['klass']].from_tuples(obj['data'],names=obj['names']) + elif typ == 'period_index': + return globals()[obj['klass']](obj['data'],name=obj['name']) + elif typ == 'datetime_index': + return globals()[obj['klass']](obj['data'],freq=obj['freq'],tz=obj['tz'],name=obj['name']) + elif typ == 'series': + dtype = dtype_for(obj['dtype']) + index = obj['index'] + return globals()[obj['klass']](unconvert(obj['data'],dtype,obj['compress']),index=index,name=obj['name']) + elif typ == 'block_manager': + axes = obj['axes'] + + def create_block(b): + dtype = dtype_for(b['dtype']) + return make_block(unconvert(b['values'],dtype,b['compress']).reshape(b['shape']),b['items'],axes[0],klass=getattr(internals,b['klass'])) + + blocks = [ create_block(b) for b in obj['blocks'] ] + return globals()[obj['klass']](BlockManager(blocks, axes)) + elif typ == 'datetime': + return parse(obj['data']) + elif typ == 'date': + return parse(obj['data']).date() + elif typ == 'timedelta': + return timedelta(*obj['data']) + elif typ == 'sparse_series': + dtype = dtype_for(obj['dtype']) + return globals()[obj['klass']](unconvert(obj['sp_values'],dtype,obj['compress']),sparse_index=obj['sp_index'], + index=obj['index'],fill_value=obj['fill_value'],kind=obj['kind'],name=obj['name']) + elif typ == 'sparse_dataframe': + return globals()[obj['klass']](obj['data'], + columns=obj['columns'],default_fill_value=obj['default_fill_value'],default_kind=obj['default_kind']) + elif typ == 'sparse_panel': + return globals()[obj['klass']](obj['data'], + items=obj['items'],default_fill_value=obj['default_fill_value'],default_kind=obj['default_kind']) + elif typ == 'block_index': + return globals()[obj['klass']](obj['length'],obj['blocs'],obj['blengths']) + elif typ == 'int_index': + return globals()[obj['klass']](obj['length'],obj['indices']) + elif typ == 'ndarray': + return unconvert(obj['data'],np.typeDict[obj['dtype']],obj['compress']).reshape(obj['shape']) + elif typ == 'np_timedelta64': + return np.timedelta64(obj['data']) + elif typ == 'np_scalar': + if obj.get('sub_typ') == 'np_complex': + return c2f(obj['real'], obj['imag'], obj['dtype']) + else: + dtype = dtype_for(obj['dtype']) + try: + return dtype(obj['data']) + except: + return dtype.type(obj['data']) + elif typ == 'np_complex': + return complex(obj['real']+'+'+obj['imag']+'j') + elif isinstance(obj, (dict,list,set)): + return obj + else: + return obj + +def pack(o, default=encode, + encoding=None, unicode_errors='strict', use_single_float=False): + """ + Pack an object and return the packed bytes. + """ + + return Packer(default=default, encoding=encoding, + unicode_errors=unicode_errors, + use_single_float=use_single_float).pack(o) + +def unpack(packed, object_hook=decode, + list_hook=None, use_list=False, encoding=None, + unicode_errors='strict', object_pairs_hook=None): + """ + Unpack a packed object, return an iterator + Note: packed lists will be returned as tuples + """ + + return Unpacker(packed, object_hook=object_hook, + list_hook=list_hook, + use_list=use_list, encoding=encoding, + unicode_errors=unicode_errors, + object_pairs_hook=object_pairs_hook) + +if _USE_MSGPACK: + + class Packer(_packer.Packer): + def __init__(self, default=encode, + encoding=None, + unicode_errors='strict', + use_single_float=False): + super(Packer, self).__init__(default=default, + encoding=encoding, + unicode_errors=unicode_errors, + use_single_float=use_single_float) + + class Unpacker(_unpacker.Unpacker): + def __init__(self, file_like=None, read_size=0, use_list=False, + object_hook=decode, + object_pairs_hook=None, list_hook=None, encoding=None, + unicode_errors='strict', max_buffer_size=0): + super(Unpacker, self).__init__(file_like=file_like, + read_size=read_size, + use_list=use_list, + object_hook=object_hook, + object_pairs_hook=object_pairs_hook, + list_hook=list_hook, + encoding=encoding, + unicode_errors=unicode_errors, + max_buffer_size=max_buffer_size) + +class Iterator(object): + """ manage the unpacking iteration, + close the file on completion """ + + def __init__(self, path, **kwargs): + self.path = path + self.kwargs = kwargs + + def __iter__(self): + + try: + fh = open(self.path,'rb') + unpacker = unpack(fh) + for o in unpacker: + yield o + finally: + fh.close() diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py new file mode 100644 index 0000000000000..f9e25f3956d38 --- /dev/null +++ b/pandas/io/tests/test_packers.py @@ -0,0 +1,368 @@ +import nose +import unittest +import os +import sys +import warnings + +import datetime +import numpy as np + +from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range, + date_range, period_range, Index, SparseSeries, SparseDataFrame, + SparsePanel) +import pandas.util.testing as tm +from pandas.util.testing import ensure_clean +from pandas.tests.test_series import assert_series_equal +from pandas.tests.test_frame import assert_frame_equal +from pandas.tests.test_panel import assert_panel_equal + +import pandas +from pandas.sparse.tests.test_sparse import assert_sp_series_equal, assert_sp_frame_equal +from pandas import concat, Timestamp, tslib + +from numpy.testing.decorators import slow +nan = np.nan + +from pandas.io.packers import to_msgpack, read_msgpack, _USE_MSGPACK + +if not _USE_MSGPACK: + raise nose.SkipTest('no msgpack') + +_multiprocess_can_split_ = False + +def check_arbitrary(a, b): + + if isinstance(a,(list,tuple)) and isinstance(b,(list,tuple)): + assert(len(a) == len(b)) + for a_, b_ in zip(a,b): + check_arbitrary(a_,b_) + elif isinstance(a,Panel): + assert_panel_equal(a,b) + elif isinstance(a,DataFrame): + assert_frame_equal(a,b) + elif isinstance(a,Series): + assert_series_equal(a,b) + else: + assert(a == b) + +class Test(unittest.TestCase): + + def setUp(self): + self.path = '__%s__.msg' % tm.rands(10) + + def tearDown(self): + pass + + def encode_decode(self, x): + with ensure_clean(self.path) as p: + to_msgpack(p,x) + return read_msgpack(p) + +class TestNumpy(Test): + + def test_numpy_scalar_float(self): + x = np.float32(np.random.rand()) + x_rec = self.encode_decode(x) + assert x == x_rec and type(x) == type(x_rec) + + def test_numpy_scalar_complex(self): + x = np.complex64(np.random.rand()+1j*np.random.rand()) + x_rec = self.encode_decode(x) + assert x == x_rec and type(x) == type(x_rec) + + def test_scalar_float(self): + x = np.random.rand() + x_rec = self.encode_decode(x) + assert x == x_rec and type(x) == type(x_rec) + + def test_scalar_complex(self): + x = np.random.rand()+1j*np.random.rand() + x_rec = self.encode_decode(x) + assert x == x_rec and type(x) == type(x_rec) + + def test_list_numpy_float(self): + x = [np.float32(np.random.rand()) for i in xrange(5)] + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x, x_rec)) and \ + all(map(lambda x,y: type(x) == type(y), x, x_rec)) + + def test_list_numpy_float_complex(self): + x = [np.float32(np.random.rand()) for i in xrange(5)] + \ + [np.complex128(np.random.rand()+1j*np.random.rand()) for i in xrange(5)] + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x, x_rec)) and \ + all(map(lambda x,y: type(x) == type(y), x, x_rec)) + + def test_list_float(self): + x = [np.random.rand() for i in xrange(5)] + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x, x_rec)) and \ + all(map(lambda x,y: type(x) == type(y), x, x_rec)) + + def test_list_float_complex(self): + x = [np.random.rand() for i in xrange(5)] + \ + [(np.random.rand()+1j*np.random.rand()) for i in xrange(5)] + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x, x_rec)) and \ + all(map(lambda x,y: type(x) == type(y), x, x_rec)) + + def test_dict_float(self): + x = {'foo': 1.0, 'bar': 2.0} + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x.values(), x_rec.values())) and \ + all(map(lambda x,y: type(x) == type(y), x.values(), x_rec.values())) + + def test_dict_complex(self): + x = {'foo': 1.0+1.0j, 'bar': 2.0+2.0j} + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x.values(), x_rec.values())) and \ + all(map(lambda x,y: type(x) == type(y), x.values(), x_rec.values())) + + def test_dict_numpy_float(self): + x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)} + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x.values(), x_rec.values())) and \ + all(map(lambda x,y: type(x) == type(y), x.values(), x_rec.values())) + + def test_dict_numpy_complex(self): + x = {'foo': np.complex128(1.0+1.0j), 'bar': np.complex128(2.0+2.0j)} + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x.values(), x_rec.values())) and \ + all(map(lambda x,y: type(x) == type(y), x.values(), x_rec.values())) + + def test_numpy_array_float(self): + x = np.random.rand(5).astype(np.float32) + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x, x_rec)) and \ + x.dtype == x_rec.dtype + def test_numpy_array_complex(self): + x = (np.random.rand(5)+1j*np.random.rand(5)).astype(np.complex128) + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x, x_rec)) and \ + x.dtype == x_rec.dtype + + def test_list_mixed(self): + x = [1.0, np.float32(3.5), np.complex128(4.25), u'foo'] + x_rec = self.encode_decode(x) + assert all(map(lambda x,y: x == y, x, x_rec)) and \ + all(map(lambda x,y: type(x) == type(y), x, x_rec)) +class TestBasic(Test): + + def test_timestamp(self): + + for i in [ Timestamp('20130101'), Timestamp('20130101',tz='US/Eastern'), + Timestamp('201301010501') ]: + i_rec = self.encode_decode(i) + self.assert_(i == i_rec) + + def test_datetimes(self): + + for i in [ datetime.datetime(2013,1,1), datetime.datetime(2013,1,1,5,1), + datetime.date(2013,1,1), np.datetime64('2013-01-05 2:15') ]: + i_rec = self.encode_decode(i) + self.assert_(i == i_rec) + + def test_timedeltas(self): + + for i in [ datetime.timedelta(days=1), + datetime.timedelta(days=1,seconds=10), + np.timedelta64(1000000) ]: + i_rec = self.encode_decode(i) + self.assert_(i == i_rec) + + +class TestIndex(Test): + + def setUp(self): + super(TestIndex, self).setUp() + + self.d = { + 'string' : tm.makeStringIndex(100), + 'date' : tm.makeDateIndex(100), + 'int' : tm.makeIntIndex(100), + 'float' : tm.makeFloatIndex(100), + 'empty' : Index([]), + 'tuple' : Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])), + 'period' : Index(period_range('2012-1-1', freq='M', periods=3)), + 'date2' : Index(date_range('2013-01-1', periods=10)), + 'bdate' : Index(bdate_range('2013-01-02',periods=10)), + } + + self.mi = { + 'reg' : MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), ('foo', 'two'), + ('qux', 'one'), ('qux', 'two')], names=['first','second']), + } + + def test_basic_index(self): + + for s, i in self.d.items(): + i_rec = self.encode_decode(i) + self.assert_(i.equals(i_rec)) + + def test_multi_index(self): + + for s, i in self.mi.items(): + i_rec = self.encode_decode(i) + self.assert_(i.equals(i_rec)) + + def test_unicode(self): + i = tm.makeUnicodeIndex(100) + i_rec = self.encode_decode(i) + self.assert_(i.equals(i_rec)) + +class TestSeries(Test): + + def setUp(self): + super(TestSeries, self).setUp() + + self.d = {} + + + s = tm.makeStringSeries() + s.name = 'string' + self.d['string'] = s + + s = tm.makeObjectSeries() + s.name = 'object' + self.d['object'] = s + + s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5)) + self.d['date'] = s + + data = { + 'A': [0., 1., 2., 3., np.nan], + 'B': [0, 1, 0, 1, 0], + 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], + 'D': date_range('1/1/2009', periods=5), + 'E' : [0., 1, Timestamp('20100101'),'foo',2.], + } + + self.d['float'] = Series(data['A']) + self.d['int'] = Series(data['B']) + self.d['mixed'] = Series(data['E']) + + def test_basic(self): + + for s, i in self.d.items(): + i_rec = self.encode_decode(i) + assert_series_equal(i,i_rec) + +class TestNDFrame(Test): + + def setUp(self): + super(TestNDFrame, self).setUp() + + data = { + 'A': [0., 1., 2., 3., np.nan], + 'B': [0, 1, 0, 1, 0], + 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], + 'D': date_range('1/1/2009', periods=5), + 'E' : [0., 1, Timestamp('20100101'),'foo',2.], + } + + self.frame = { 'float' : DataFrame(dict(A = data['A'], B = Series(data['A']) + 1)), + 'int' : DataFrame(dict(A = data['B'], B = Series(data['B']) + 1)), + 'mixed' : DataFrame(dict([ (k,data[k]) for k in ['A','B','C','D']])) } + + self.panel = { 'float' : Panel(dict(ItemA = self.frame['float'], ItemB = self.frame['float']+1)) } + + def test_basic_frame(self): + + for s, i in self.frame.items(): + i_rec = self.encode_decode(i) + assert_frame_equal(i,i_rec) + + def test_basic_panel(self): + + for s, i in self.panel.items(): + i_rec = self.encode_decode(i) + assert_panel_equal(i,i_rec) + + def test_multi(self): + + i_rec = self.encode_decode(self.frame) + for k in self.frame.keys(): + assert_frame_equal(self.frame[k],i_rec[k]) + + l = tuple([ self.frame['float'], self.frame['float'].A, self.frame['float'].B, None ]) + l_rec = self.encode_decode(l) + check_arbitrary(l,l_rec) + + # this is an oddity in that packed lists will be returned as tuples + l = [ self.frame['float'], self.frame['float'].A, self.frame['float'].B, None ] + l_rec = self.encode_decode(l) + self.assert_(isinstance(l_rec,tuple)) + check_arbitrary(l,l_rec) + + def test_iterator(self): + + l = [ self.frame['float'], self.frame['float'].A, self.frame['float'].B, None ] + + with ensure_clean(self.path) as path: + to_msgpack(path,*l) + for i, packed in enumerate(read_msgpack(path, iterator=True)): + check_arbitrary(packed,l[i]) + +class TestSparse(Test): + + def _check_roundtrip(self, obj, comparator, **kwargs): + + i_rec = self.encode_decode(obj) + comparator(obj,i_rec,**kwargs) + + def test_sparse_series(self): + + s = tm.makeStringSeries() + s[3:5] = np.nan + ss = s.to_sparse() + self._check_roundtrip(ss, tm.assert_series_equal, + check_series_type=True) + + ss2 = s.to_sparse(kind='integer') + self._check_roundtrip(ss2, tm.assert_series_equal, + check_series_type=True) + + ss3 = s.to_sparse(fill_value=0) + self._check_roundtrip(ss3, tm.assert_series_equal, + check_series_type=True) + + def test_sparse_frame(self): + + s = tm.makeDataFrame() + s.ix[3:5, 1:3] = np.nan + s.ix[8:10, -2] = np.nan + ss = s.to_sparse() + + self._check_roundtrip(ss, tm.assert_frame_equal, + check_frame_type=True) + + ss2 = s.to_sparse(kind='integer') + self._check_roundtrip(ss2, tm.assert_frame_equal, + check_frame_type=True) + + ss3 = s.to_sparse(fill_value=0) + self._check_roundtrip(ss3, tm.assert_frame_equal, + check_frame_type=True) + + def test_sparse_panel(self): + + items = ['x', 'y', 'z'] + p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items)) + sp = p.to_sparse() + + self._check_roundtrip(sp, tm.assert_panel_equal, + check_panel_type=True) + + sp2 = p.to_sparse(kind='integer') + self._check_roundtrip(sp2, tm.assert_panel_equal, + check_panel_type=True) + + sp3 = p.to_sparse(fill_value=0) + self._check_roundtrip(sp3, tm.assert_panel_equal, + check_panel_type=True) + + +if __name__ == '__main__': + import nose + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) diff --git a/vb_suite/packers.py b/vb_suite/packers.py new file mode 100644 index 0000000000000..6733b5fa6dfb8 --- /dev/null +++ b/vb_suite/packers.py @@ -0,0 +1,80 @@ +from vbench.api import Benchmark +from datetime import datetime + +start_date = datetime(2013, 5, 1) + +common_setup = """from pandas_vb_common import * +import os +from pandas.io import packers +from pandas.core import common as com + +f = '__test__.msg' +def remove(f): + try: + os.remove(f) + except: + pass + +""" + +#---------------------------------------------------------------------- +# read a pack + +setup1 = common_setup + """ +index = date_range('20000101',periods=25000,freq='H') +df = DataFrame({'float1' : randn(25000), + 'float2' : randn(25000)}, + index=index) +remove(f) +packers.save(f,df) +""" + +read_pack = Benchmark("packers.load(f)", setup1, + start_date=start_date) + + +#---------------------------------------------------------------------- +# write to a pack + +setup2 = common_setup + """ +index = date_range('20000101',periods=25000,freq='H') +df = DataFrame({'float1' : randn(25000), + 'float2' : randn(25000)}, + index=index) +remove(f) +""" + +write_pack = Benchmark( + "packers.save(f,df)", setup2, cleanup="remove(f)", + start_date=start_date) + +#---------------------------------------------------------------------- +# read a pickle + +setup1 = common_setup + """ +index = date_range('20000101',periods=25000,freq='H') +df = DataFrame({'float1' : randn(25000), + 'float2' : randn(25000)}, + index=index) +remove(f) +df.save(f) +""" + +read_pickle = Benchmark("com.load(f)", setup1, + start_date=start_date) + + +#---------------------------------------------------------------------- +# write to a pickle + +setup2 = common_setup + """ +index = date_range('20000101',periods=25000,freq='H') +df = DataFrame({'float1' : randn(25000), + 'float2' : randn(25000)}, + index=index) +remove(f) +""" + +write_pickle = Benchmark( + "df.save(f)", setup2, cleanup="remove(f)", + start_date=start_date) diff --git a/vb_suite/suite.py b/vb_suite/suite.py index 905c4371837cc..4ac967dc1664a 100644 --- a/vb_suite/suite.py +++ b/vb_suite/suite.py @@ -16,6 +16,7 @@ 'join_merge', 'miscellaneous', 'panel_ctor', + 'packers', 'parser', 'reindex', 'replace',
warning: prototype! msgpack serialization/deseriiization - support all pandas objects: Timestamp,Period,all index types,Series,DataFrame,Panel,Sparse suite - docs included (in io.rst) - iterator support - top-level api support Here are 2 features which I think msgpack supports, but have to look further - no support for compression directly, but can compress the file (e.g. gzip) - access is sequential - versioning, is not that hard because its pretty easy to deal with a change in the schema (which is not directly stored), and this is MUCH more transparent than pickle usage is exactly like pickle (aside from that its in a different namespace), allowing arbitrary combinations of storage, e.g. this supports the added storage of pandas objects, but obviously can store `{ 'frame1' : df1, 'frame2' : df2 }` etc storage example `DataFrame(np.random.rand(1000,10))` on my machine stores in 128k file size. and scales pretty well, e.g. 10k rows is 1.26mb Not completely happy with the `packers` name, any suggestions? closes #686 ``` In [1]: df = DataFrame(randn(10,2), ...: columns=list('AB'), ...: index=date_range('20130101',periods=10)) In [2]: pd.to_msgpack('foo.msg',df) In [3]: pd.read_msgpack('foo.msg') Out[3]: A B 2013-01-01 0.676700 -1.702599 2013-01-02 -0.070164 -1.368716 2013-01-03 -0.877145 -1.427964 2013-01-04 -0.295715 -0.176954 2013-01-05 0.566986 0.588918 2013-01-06 -0.307070 1.541773 2013-01-07 1.302388 0.689701 2013-01-08 0.165292 0.273496 2013-01-09 -3.492113 -1.178075 2013-01-10 -1.069521 0.848614 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3525
2013-05-03T19:43:54Z
2013-06-10T14:11:28Z
null
2014-06-12T15:57:14Z
Parse raised exceptions and their error messages sans interpolation/format spec
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9c0a2843370f4..5294db0e6e7c1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -710,8 +710,9 @@ def __unicode__(self): self.info(buf=buf, verbose=verbose) value = buf.getvalue() - if not type(value) == unicode: - raise AssertionError() + if not isinstance(value, unicode): + raise AssertionError("'{0}' is not of type 'unicode', it has " + "type '{0}'".format(type(value))) return value diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 0a099661c58f1..cccee11ffd5db 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -631,8 +631,10 @@ def get_value(self, *args): value : scalar value """ # require an arg for each axis - if not ((len(args) == self._AXIS_LEN)): - raise AssertionError() + if len(args) != self._AXIS_LEN: + raise AssertionError('There must be an argument for each axis, ' + 'you gave {0} args, but {1} are ' + 'required'.format(len(args), self._AXIS_LEN)) # hm, two layers to the onion frame = self._get_item_cache(args[0]) @@ -656,8 +658,12 @@ def set_value(self, *args): otherwise a new object """ # require an arg for each axis and the value - if not ((len(args) == self._AXIS_LEN + 1)): - raise AssertionError() + if len(args) != self._AXIS_LEN + 1: + raise AssertionError('There must be an argument for each axis plus' + ' the value provided, you gave {0} args, ' + 'but {1} are required'.format(len(args), + self._AXIS_LEN + + 1)) try: frame = self._get_item_cache(args[0]) @@ -667,7 +673,7 @@ def set_value(self, *args): axes = self._expand_axes(args) d = self._construct_axes_dict_from(self, axes, copy=False) result = self.reindex(**d) - args = list(args) + args = list(args) likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1]) made_bigger = not np.array_equal( axes[0], getattr(self, self._info_axis)) @@ -702,8 +708,10 @@ def __setitem__(self, key, value): **self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])) mat = value.values elif isinstance(value, np.ndarray): - if not ((value.shape == shape[1:])): - raise AssertionError() + if value.shape != shape[1:]: + raise AssertionError('shape of value must be {0}, shape of ' + 'given object was ' + '{1}'.format(shape[1:], value.shape)) mat = np.asarray(value) elif np.isscalar(value): dtype, value = _infer_dtype_from_scalar(value) @@ -1513,8 +1521,9 @@ def _extract_axes(self, data, axes, **kwargs): @staticmethod def _extract_axes_for_slice(self, axes): """ return the slice dictionary for these axes """ - return dict([(self._AXIS_SLICEMAP[i], a) for i, a - in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)]) + return dict([(self._AXIS_SLICEMAP[i], a) + for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - + len(axes):], axes)]) @staticmethod def _prep_ndarray(self, values, copy=True): @@ -1526,8 +1535,11 @@ def _prep_ndarray(self, values, copy=True): else: if copy: values = values.copy() - if not ((values.ndim == self._AXIS_LEN)): - raise AssertionError() + if values.ndim != self._AXIS_LEN: + raise AssertionError("The number of dimensions required is {0}, " + "but the number of dimensions of the " + "ndarray given was {1}".format(self._AXIS_LEN, + values.ndim)) return values @staticmethod diff --git a/pandas/core/series.py b/pandas/core/series.py index ab8a48f4b8eb9..e4b7b4fcc6deb 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1146,8 +1146,9 @@ def __unicode__(self): else: result = u'Series([], dtype: %s)' % self.dtype - if not ( type(result) == unicode): - raise AssertionError() + if not isinstance(result, unicode): + raise AssertionError("result must be of type unicode, type" + " of result is '{0}'".format(type(result))) return result def __repr__(self): @@ -1216,9 +1217,9 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, length=length, dtype=dtype, name=name) # catch contract violations - if not type(the_repr) == unicode: - raise AssertionError("expected unicode string") - + if not isinstance(the_repr, unicode): + raise AssertionError("result must be of type unicode, type" + " of result is '{0}'".format(type(the_repr))) if buf is None: return the_repr else: @@ -1228,19 +1229,21 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, with open(buf, 'w') as f: f.write(the_repr) - def _get_repr(self, name=False, print_header=False, length=True, dtype=True, - na_rep='NaN', float_format=None): + def _get_repr(self, name=False, print_header=False, length=True, + dtype=True, na_rep='NaN', float_format=None): """ Internal function, should always return unicode string """ formatter = fmt.SeriesFormatter(self, name=name, header=print_header, - length=length, dtype=dtype, na_rep=na_rep, + length=length, dtype=dtype, + na_rep=na_rep, float_format=float_format) result = formatter.to_string() - if not ( type(result) == unicode): - raise AssertionError() + if not isinstance(result, unicode): + raise AssertionError("result must be of type unicode, type" + " of result is '{0}'".format(type(result))) return result def __iter__(self): diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py index c7a60d13f1778..885818104b6f7 100644 --- a/pandas/io/date_converters.py +++ b/pandas/io/date_converters.py @@ -46,12 +46,14 @@ def _maybe_cast(arr): def _check_columns(cols): - if not ((len(cols) > 0)): - raise AssertionError() + if not len(cols): + raise AssertionError("There must be at least 1 column") N = len(cols[0]) for c in cols[1:]: - if not ((len(c) == N)): - raise AssertionError() + if len(c) != N: + raise AssertionError('All columns must have the same length: ' + '{0}, at least one column has ' + 'length {1}'.format(N, len(c))) return N diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 249afe0755445..30bea1ac76fc5 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -630,8 +630,10 @@ def _clean_options(self, options, engine): # type conversion-related if converters is not None: - if not (isinstance(converters, dict)): - raise AssertionError() + if not isinstance(converters, dict): + raise AssertionError('Type converters must be a dict or' + ' subclass, input was ' + 'a {0}'.format(type(converters))) else: converters = {} @@ -1649,8 +1651,8 @@ def _rows_to_cols(self, content): if self._implicit_index: col_len += len(self.index_col) - if not ((self.skip_footer >= 0)): - raise AssertionError() + if self.skip_footer < 0: + raise AssertionError('skip footer cannot be negative') if col_len != zip_len and self.index_col is not False: row_num = -1 @@ -1946,15 +1948,18 @@ def __init__(self, f, colspecs, filler, thousands=None): self.filler = filler # Empty characters between fields. self.thousands = thousands - if not ( isinstance(colspecs, (tuple, list))): - raise AssertionError() + if not isinstance(colspecs, (tuple, list)): + raise AssertionError("column specifications must be a list or" + " tuple, input was " + "a {0}".format(type(colspecs))) for colspec in colspecs: - if not ( isinstance(colspec, (tuple, list)) and - len(colspec) == 2 and - isinstance(colspec[0], int) and - isinstance(colspec[1], int) ): - raise AssertionError() + if not (isinstance(colspec, (tuple, list)) and + len(colspec) == 2 and + isinstance(colspec[0], int) and + isinstance(colspec[1], int)): + raise AssertionError('Each column specification must be ' + '2 element tuple or list of integers') def next(self): line = next(self.f) diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py index 035db279064a0..667f076ee7cc3 100644 --- a/pandas/sparse/array.py +++ b/pandas/sparse/array.py @@ -14,7 +14,6 @@ from pandas._sparse import BlockIndex, IntIndex import pandas._sparse as splib -import pandas.lib as lib import pandas.index as _index @@ -25,8 +24,8 @@ def _sparse_op_wrap(op, name): """ def wrapper(self, other): if isinstance(other, np.ndarray): - if not ((len(self) == len(other))): - raise AssertionError() + if len(self) != len(other): + raise AssertionError("Operands must be of the same size") if not isinstance(other, SparseArray): other = SparseArray(other, fill_value=self.fill_value) return _sparse_array_op(self, other, op, name) @@ -130,8 +129,10 @@ def __new__(cls, data, sparse_index=None, kind='integer', fill_value=None, fill_value=fill_value) else: values = data - if not ((len(values) == sparse_index.npoints)): - raise AssertionError() + if len(values) != sparse_index.npoints: + raise AssertionError("Non array-like type {0} must have" + " the same length as the" + " index".format(type(values))) # Create array, do *not* copy data by default if copy: @@ -277,13 +278,13 @@ def take(self, indices, axis=0): ------- taken : ndarray """ - if not ((axis == 0)): - raise AssertionError() + if axis: + raise AssertionError("axis must be 0, input was {0}".format(axis)) indices = np.asarray(indices, dtype=int) n = len(self) if (indices < 0).any() or (indices >= n).any(): - raise Exception('out of bounds access') + raise IndexError('out of bounds access') if self.sp_index.npoints > 0: locs = np.array([self.sp_index.lookup(loc) for loc in indices]) @@ -296,10 +297,10 @@ def take(self, indices, axis=0): return result def __setitem__(self, key, value): - raise Exception('SparseArray objects are immutable') + raise TypeError('SparseArray objects are immutable') def __setslice__(self, i, j, value): - raise Exception('SparseArray objects are immutable') + raise TypeError('SparseArray objects are immutable') def to_dense(self): """ @@ -313,7 +314,7 @@ def astype(self, dtype=None): """ dtype = np.dtype(dtype) if dtype is not None and dtype not in (np.float_, float): - raise Exception('Can only support floating point data for now') + raise TypeError('Can only support floating point data for now') return self.copy() def copy(self, deep=True): diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index 9694cc005d178..977cf0e3953e4 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -8,7 +8,7 @@ from numpy import nan import numpy as np -from pandas.core.common import _pickle_array, _unpickle_array, _try_sort +from pandas.core.common import _unpickle_array, _try_sort from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices from pandas.core.series import Series @@ -16,11 +16,9 @@ _default_index) from pandas.util.decorators import cache_readonly import pandas.core.common as com -import pandas.core.datetools as datetools from pandas.sparse.series import SparseSeries from pandas.util.decorators import Appender -import pandas.lib as lib class _SparseMockBlockManager(object): @@ -713,8 +711,8 @@ def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', def _join_index(self, other, how, lsuffix, rsuffix): if isinstance(other, Series): - if not (other.name is not None): - raise AssertionError() + if other.name is None: + raise AssertionError('Cannot join series with no name') other = SparseDataFrame({other.name: other}, default_fill_value=self.default_fill_value) diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index 0b2842155b299..dd628385d539a 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -72,7 +72,8 @@ def __init__(self, frames, items=None, major_axis=None, minor_axis=None, frames = new_frames if not (isinstance(frames, dict)): - raise AssertionError() + raise AssertionError('input must be a dict, a {0} was' + ' passed'.format(type(frames))) self.default_fill_value = fill_value = default_fill_value self.default_kind = kind = default_kind diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index bd01845a295b6..5d5f3ddabeed3 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -110,8 +110,11 @@ def __new__(cls, data, index=None, sparse_index=None, kind='block', if isinstance(data, SparseSeries) and index is None: index = data.index elif index is not None: - if not (len(index) == len(data)): - raise AssertionError() + if len(index) != len(data): + raise AssertionError('Passed index and data must have the ' + 'same length, len(data) == {0}, ' + 'len(index) == ' + '{1}'.format(len(data), len(index))) sparse_index = data.sp_index values = np.asarray(data) @@ -129,8 +132,14 @@ def __new__(cls, data, index=None, sparse_index=None, kind='block', fill_value=fill_value) else: values = data - if not (len(values) == sparse_index.npoints): - raise AssertionError() + if len(values) != sparse_index.npoints: + raise AssertionError('length of input must the same as the' + ' length of the given index, ' + 'len(values) == {0}, ' + 'sparse_index.npoints' + ' == ' + '{1}'.format(len(values), + sparse_index.npoints)) else: if index is None: raise Exception('must pass index!') @@ -449,7 +458,7 @@ def sparse_reindex(self, new_index): reindexed : SparseSeries """ if not (isinstance(new_index, splib.SparseIndex)): - raise AssertionError() + raise AssertionError('new index must be a SparseIndex') new_values = self.sp_index.to_int_index().reindex(self.sp_values, self.fill_value, diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py index 13eeb03e15328..fd481e10159bb 100644 --- a/pandas/stats/ols.py +++ b/pandas/stats/ols.py @@ -634,8 +634,8 @@ def _set_window(self, window_type, window, min_periods): self._window_type = scom._get_window_type(window_type) if self._is_rolling: - if not ((window is not None)): - raise AssertionError() + if window is None: + raise AssertionError("'window' cannot be None") if min_periods is None: min_periods = window else: @@ -1212,8 +1212,9 @@ def _nobs_raw(self): return result.astype(int) def _beta_matrix(self, lag=0): - if not ((lag >= 0)): - raise AssertionError() + if lag < 0: + raise AssertionError("'lag' must be greater than or equal to 0, " + "input was {0}".format(lag)) betas = self._beta_raw @@ -1276,8 +1277,8 @@ def _filter_data(lhs, rhs, weights=None): Cleaned lhs and rhs """ if not isinstance(lhs, Series): - if not ((len(lhs) == len(rhs))): - raise AssertionError() + if len(lhs) != len(rhs): + raise AssertionError("length of lhs must equal length of rhs") lhs = Series(lhs, index=rhs.index) rhs = _combine_rhs(rhs) diff --git a/pandas/stats/plm.py b/pandas/stats/plm.py index 467ce6a05e1f0..aa2db7d2e2c66 100644 --- a/pandas/stats/plm.py +++ b/pandas/stats/plm.py @@ -101,10 +101,12 @@ def _prepare_data(self): y_regressor = y if weights is not None: - if not ((y_regressor.index.equals(weights.index))): - raise AssertionError() - if not ((x_regressor.index.equals(weights.index))): - raise AssertionError() + if not y_regressor.index.equals(weights.index): + raise AssertionError("y_regressor and weights must have the " + "same index") + if not x_regressor.index.equals(weights.index): + raise AssertionError("x_regressor and weights must have the " + "same index") rt_weights = np.sqrt(weights) y_regressor = y_regressor * rt_weights @@ -171,8 +173,10 @@ def _convert_x(self, x): # .iteritems iteritems = getattr(x, 'iteritems', x.items) for key, df in iteritems(): - if not ((isinstance(df, DataFrame))): - raise AssertionError() + if not isinstance(df, DataFrame): + raise AssertionError("all input items must be DataFrames, " + "at least one is of " + "type {0}".format(type(df))) if _is_numeric(df): x_converted[key] = df @@ -640,8 +644,9 @@ def _y_predict_raw(self): return (betas * x).sum(1) def _beta_matrix(self, lag=0): - if not ((lag >= 0)): - raise AssertionError() + if lag < 0: + raise AssertionError("'lag' must be greater than or equal to 0, " + "input was {0}".format(lag)) index = self._y_trans.index major_labels = index.labels[0] diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index b19d099790566..f19608752ad9a 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -404,17 +404,19 @@ def _validate_specification(self): elif self.left_on is not None: n = len(self.left_on) if self.right_index: - if not ((len(self.left_on) == self.right.index.nlevels)): - raise AssertionError() + if len(self.left_on) != self.right.index.nlevels: + raise AssertionError('len(left_on) must equal the number ' + 'of levels in the index of "right"') self.right_on = [None] * n elif self.right_on is not None: n = len(self.right_on) if self.left_index: - if not ((len(self.right_on) == self.left.index.nlevels)): - raise AssertionError() + if len(self.right_on) != self.left.index.nlevels: + raise AssertionError('len(right_on) must equal the number ' + 'of levels in the index of "left"') self.left_on = [None] * n - if not ((len(self.right_on) == len(self.left_on))): - raise AssertionError() + if len(self.right_on) != len(self.left_on): + raise AssertionError("len(right_on) must equal len(left_on)") def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'): @@ -427,8 +429,8 @@ def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'): ------- """ - if not ((len(left_keys) == len(right_keys))): - raise AssertionError() + if len(left_keys) != len(right_keys): + raise AssertionError('left_key and right_keys must be the same length') left_labels = [] right_labels = [] @@ -542,8 +544,11 @@ def _left_join_on_index(left_ax, right_ax, join_keys, sort=False): if len(join_keys) > 1: if not ((isinstance(right_ax, MultiIndex) and - len(join_keys) == right_ax.nlevels) ): - raise AssertionError() + len(join_keys) == right_ax.nlevels)): + raise AssertionError("If more than one join key is given then " + "'right_ax' must be a MultiIndex and the " + "number of join keys must be the number of " + "levels in right_ax") left_tmp, right_indexer = \ _get_multiindex_indexer(join_keys, right_ax, @@ -642,8 +647,9 @@ def __init__(self, data_list, join_index, indexers, axis=1, copy=True): if axis <= 0: # pragma: no cover raise MergeError('Only axis >= 1 supported for this operation') - if not ((len(data_list) == len(indexers))): - raise AssertionError() + if len(data_list) != len(indexers): + raise AssertionError("data_list and indexers must have the same " + "length") self.units = [] for data, indexer in zip(data_list, indexers): @@ -936,8 +942,9 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, axis = 1 if axis == 0 else 0 self._is_series = isinstance(sample, Series) - if not ((0 <= axis <= sample.ndim)): - raise AssertionError() + if not 0 <= axis <= sample.ndim: + raise AssertionError("axis must be between 0 and {0}, " + "input was {1}".format(sample.ndim, axis)) # note: this is the BlockManager axis (since DataFrame is transposed) self.axis = axis @@ -1106,8 +1113,9 @@ def _concat_single_item(self, objs, item): to_concat.append(item_values) # this method only gets called with axis >= 1 - if not ((self.axis >= 1)): - raise AssertionError() + if self.axis < 1: + raise AssertionError("axis must be >= 1, input was" + " {0}".format(self.axis)) return com._concat_compat(to_concat, axis=self.axis - 1) def _get_result_dim(self): @@ -1126,8 +1134,9 @@ def _get_new_axes(self): continue new_axes[i] = self._get_comb_axis(i) else: - if not ((len(self.join_axes) == ndim - 1)): - raise AssertionError() + if len(self.join_axes) != ndim - 1: + raise AssertionError("length of join_axes must not be " + "equal to {0}".format(ndim - 1)) # ufff... indices = range(ndim) diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index 8d5ba7af0d92b..12ba39b2387c1 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -2,7 +2,6 @@ from pandas import Series, DataFrame from pandas.core.index import MultiIndex -from pandas.core.reshape import _unstack_multiple from pandas.tools.merge import concat import pandas.core.common as com import numpy as np @@ -300,8 +299,8 @@ def _get_names(arrs, names, prefix='row'): else: names.append('%s_%d' % (prefix, i)) else: - if not ((len(names) == len(arrs))): - raise AssertionError() + if len(names) != len(arrs): + raise AssertionError('arrays and names must have the same length') if not isinstance(names, list): names = list(names) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index a918e9eb18e8b..a6b94184eb475 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -306,12 +306,12 @@ def _generate(cls, start, end, periods, name, offset, if tz is not None and inferred_tz is not None: if not inferred_tz == tz: - raise AssertionError() + raise AssertionError("Inferred time zone not equal to passed " + "time zone") elif inferred_tz is not None: tz = inferred_tz - if start is not None: if normalize: start = normalize_date(start) @@ -450,16 +450,16 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None, cachedRange = drc[offset] if start is None: - if not (isinstance(end, Timestamp)): - raise AssertionError() + if not isinstance(end, Timestamp): + raise AssertionError('end must be an instance of Timestamp') end = offset.rollback(end) endLoc = cachedRange.get_loc(end) + 1 startLoc = endLoc - periods elif end is None: - if not (isinstance(start, Timestamp)): - raise AssertionError() + if not isinstance(start, Timestamp): + raise AssertionError('start must be an instance of Timestamp') start = offset.rollforward(start) @@ -586,14 +586,15 @@ def _format_native_types(self, na_rep=u'NaT', **kwargs): zero_time = time(0, 0) for d in data: if d.time() != zero_time or d.tzinfo is not None: - return [u'%s' % x for x in data ] + return [u'%s' % x for x in data] - values = np.array(data,dtype=object) + values = np.array(data, dtype=object) mask = isnull(self.values) values[mask] = na_rep imask = -mask - values[imask] = np.array([ u'%d-%.2d-%.2d' % (dt.year, dt.month, dt.day) for dt in values[imask] ]) + values[imask] = np.array([u'%d-%.2d-%.2d' % (dt.year, dt.month, dt.day) + for dt in values[imask]]) return values.tolist() def isin(self, values): @@ -1067,7 +1068,6 @@ def intersection(self, other): return self._view_like(left_chunk) def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True): - is_monotonic = self.is_monotonic if reso == 'year': @@ -1104,18 +1104,22 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True): else: raise KeyError - stamps = self.asi8 if is_monotonic: # a monotonic (sorted) series can be sliced - left = stamps.searchsorted(t1.value, side='left') if use_lhs else None - right = stamps.searchsorted(t2.value, side='right') if use_rhs else None + left = None + if use_lhs: + left = stamps.searchsorted(t1.value, side='left') + + right = None + if use_rhs: + right = stamps.searchsorted(t2.value, side='right') return slice(left, right) - lhs_mask = (stamps>=t1.value) if use_lhs else True - rhs_mask = (stamps<=t2.value) if use_rhs else True + lhs_mask = (stamps >= t1.value) if use_lhs else True + rhs_mask = (stamps <= t2.value) if use_rhs else True # try to find a the dates return (lhs_mask & rhs_mask).nonzero()[0] @@ -1188,7 +1192,8 @@ def _get_string_slice(self, key, use_lhs=True, use_rhs=True): freq = getattr(self, 'freqstr', getattr(self, 'inferred_freq', None)) _, parsed, reso = parse_time_string(key, freq) - loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs) + loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, + use_rhs=use_rhs) return loc def slice_indexer(self, start=None, end=None, step=None): @@ -1217,7 +1222,6 @@ def slice_locs(self, start=None, end=None): start_loc = self._get_string_slice(start).start else: start_loc = 0 - if end: end_loc = self._get_string_slice(end).stop else: @@ -1232,12 +1236,12 @@ def slice_locs(self, start=None, end=None): # so create an indexer directly try: if start: - start_loc = self._get_string_slice(start,use_rhs=False) + start_loc = self._get_string_slice(start, + use_rhs=False) else: start_loc = np.arange(len(self)) - if end: - end_loc = self._get_string_slice(end,use_lhs=False) + end_loc = self._get_string_slice(end, use_lhs=False) else: end_loc = np.arange(len(self)) diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 34c640392bda9..aa3012ddf291a 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -4,7 +4,6 @@ from datetime import datetime, date import numpy as np -import pandas.tseries.offsets as offsets from pandas.tseries.frequencies import (get_freq_code as _gfc, _month_numbers, FreqGroup) from pandas.tseries.index import DatetimeIndex, Int64Index, Index @@ -213,7 +212,7 @@ def end_time(self): ordinal = (self + 1).start_time.value - 1 return Timestamp(ordinal) - def to_timestamp(self, freq=None, how='start',tz=None): + def to_timestamp(self, freq=None, how='start', tz=None): """ Return the Timestamp representation of the Period at the target frequency at the specified end (how) of the Period @@ -241,7 +240,7 @@ def to_timestamp(self, freq=None, how='start',tz=None): val = self.asfreq(freq, how) dt64 = tslib.period_ordinal_to_dt64(val.ordinal, base) - return Timestamp(dt64,tz=tz) + return Timestamp(dt64, tz=tz) year = _period_field_accessor('year', 0) month = _period_field_accessor('month', 3) @@ -308,7 +307,6 @@ def __unicode__(self): return value - def strftime(self, fmt): """ Returns the string representation of the :class:`Period`, depending @@ -500,13 +498,13 @@ def _period_index_cmp(opname): def wrapper(self, other): if isinstance(other, Period): func = getattr(self.values, opname) - if not (other.freq == self.freq): - raise AssertionError() + if other.freq != self.freq: + raise AssertionError("Frequencies must be equal") result = func(other.ordinal) elif isinstance(other, PeriodIndex): - if not (other.freq == self.freq): - raise AssertionError() + if other.freq != self.freq: + raise AssertionError("Frequencies must be equal") return getattr(self.values, opname)(other.values) else: other = Period(other, freq=self.freq) @@ -724,7 +722,6 @@ def asof_locs(self, where, mask): @property def asobject(self): - from pandas.core.index import Index return Index(self._box_values(self.values), dtype=object) def _array_values(self): @@ -960,7 +957,7 @@ def get_loc(self, key): key = Period(key, self.freq) try: return self._engine.get_loc(key.ordinal) - except KeyError as inst: + except KeyError: raise KeyError(key) def slice_locs(self, start=None, end=None): @@ -1080,12 +1077,11 @@ def _format_with_header(self, header, **kwargs): def _format_native_types(self, na_rep=u'NaT', **kwargs): - values = np.array(list(self),dtype=object) + values = np.array(list(self), dtype=object) mask = isnull(self.values) values[mask] = na_rep - imask = -mask - values[imask] = np.array([ u'%s' % dt for dt in values[imask] ]) + values[imask] = np.array([u'%s' % dt for dt in values[imask]]) return values.tolist() def __array_finalize__(self, obj): @@ -1184,13 +1180,14 @@ def __setstate__(self, state): nd_state, own_state = state np.ndarray.__setstate__(self, nd_state) self.name = own_state[0] - try: # backcompat + try: # backcompat self.freq = own_state[1] except: pass else: # pragma: no cover np.ndarray.__setstate__(self, state) + def _get_ordinal_range(start, end, periods, freq): if com._count_not_none(start, end, periods) < 2: raise ValueError('Must specify 2 of start, end, periods') @@ -1249,8 +1246,8 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None, base, mult = _gfc(freq) if mult != 1: raise ValueError('Only mult == 1 supported') - if not (base == FreqGroup.FR_QTR): - raise AssertionError() + if base != FreqGroup.FR_QTR: + raise AssertionError("base must equal FR_QTR") year, quarter = _make_field_arrays(year, quarter) for y, q in zip(year, quarter): diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 4bf0a5bf3182c..98b4d9afbfb62 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -119,8 +119,9 @@ def _get_time_grouper(self, obj): return binner, grouper def _get_time_bins(self, axis): - if not (isinstance(axis, DatetimeIndex)): - raise AssertionError() + if not isinstance(axis, DatetimeIndex): + raise AssertionError('axis must be a DatetimeIndex, but got ' + 'a {0}'.format(type(axis))) if len(axis) == 0: binner = labels = DatetimeIndex(data=[], freq=self.freq) @@ -179,8 +180,9 @@ def _adjust_bin_edges(self, binner, ax_values): return binner, bin_edges def _get_time_period_bins(self, axis): - if not(isinstance(axis, DatetimeIndex)): - raise AssertionError() + if not isinstance(axis, DatetimeIndex): + raise AssertionError('axis must be a DatetimeIndex, ' + 'but was a {0}'.format(type(axis))) if len(axis) == 0: binner = labels = PeriodIndex(data=[], freq=self.freq) @@ -210,8 +212,8 @@ def _resample_timestamps(self, obj): result = grouped.aggregate(self._agg_method) else: # upsampling shortcut - if not (self.axis == 0): - raise AssertionError() + if self.axis: + raise AssertionError('axis must be 0') if self.closed == 'right': res_index = binner[1:] @@ -277,7 +279,6 @@ def _resample_periods(self, obj): def _take_new_index(obj, indexer, new_index, axis=0): from pandas.core.api import Series, DataFrame - from pandas.core.internals import BlockManager if isinstance(obj, Series): new_values = com.take_1d(obj.values, indexer) @@ -285,7 +286,7 @@ def _take_new_index(obj, indexer, new_index, axis=0): elif isinstance(obj, DataFrame): if axis == 1: raise NotImplementedError - return DataFrame(obj._data.take(indexer,new_index=new_index,axis=1)) + return DataFrame(obj._data.take(indexer, new_index=new_index, axis=1)) else: raise NotImplementedError diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 62ee19da6b845..a73017618fdbe 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -29,7 +29,8 @@ def _infer(a, b): tz = a.tzinfo if b and b.tzinfo: if not (tslib.get_timezone(tz) == tslib.get_timezone(b.tzinfo)): - raise AssertionError() + raise AssertionError('Inputs must both have the same timezone,' + ' {0} != {1}'.format(tz, b.tzinfo)) return tz tz = None if start is not None: diff --git a/scripts/parse_asserts.py b/scripts/parse_asserts.py new file mode 100755 index 0000000000000..b651de4ce26de --- /dev/null +++ b/scripts/parse_asserts.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python + +import re +import os +import fnmatch +import ast +import argparse +import inspect +import sys +import tempfile +import subprocess +import operator + +try: + from importlib import import_module +except ImportError: + import_module = __import__ + + +from numpy import nan as NA +from pandas import DataFrame +from pandas.core.config import option_context + + +def parse_interp_string(node): + assert isinstance(node, ast.BinOp) + assert isinstance(node.op, ast.Mod) + assert isinstance(node.left, ast.Str) + return node.left.s + + +def parse_format_string(node): + assert isinstance(node, ast.Call) + assert isinstance(node.func, ast.Attribute) + assert isinstance(node.func.value, ast.Str) + return node.func.value.s + + +def try_parse_raise_arg(node): + try: + # string + v = node.s + except AttributeError: + try: + # interpolated string + v = parse_interp_string(node) + except AssertionError: + try: + # format spec string + v = parse_format_string(node) + except AssertionError: + # otherwise forget it (general expr node) + v = node + return v + + +def parse_file(pyfile, asserts): + with open(pyfile, 'r') as pyf: + source = pyf.read() + + try: + parsed = ast.parse(source, pyfile, 'exec') + except SyntaxError: + return + + for node in ast.walk(parsed): + if isinstance(node, ast.Raise): + k = pyfile, node.lineno, node.col_offset + + try: + # try to get the name of the exception constructor + asserts[k] = [node.type.func.id] + except AttributeError: + # not a constructor + asserts[k] = [NA] + else: + # is constructor, try parsing its contents + try: + # function arguments + args = node.type.args + + try: + # try to get the first argument + arg = args[0] + v = try_parse_raise_arg(arg) + asserts[k].append(v) + except IndexError: + # no arguments (e.g., raise Exception()) + asserts[k].append(NA) + + except AttributeError: + # no arguments (e.g., raise Exception) + asserts[k].append(NA) + + +def path_matches(path, pattern): + return re.search(pattern, path) is not None + + +def regex_or(*patterns): + return '({0})'.format('|'.join(patterns)) + + +def get_asserts_from_path(path, file_filters, dir_filters): + if file_filters is None: + file_filters = 'test', '__init__.py' + + file_filters = regex_or(*file_filters) + + if dir_filters is None: + dir_filters = 'build', '.tox', 'test', '.*\.egg.*' + + dir_filters = regex_or(*dir_filters) + + asserts = {} + + if os.path.isfile(path): + parse_file(path, asserts) + return asserts + + for root, _, filenames in os.walk(path): + full_names = [] + + if not path_matches(root, dir_filters): + full_names = [os.path.join(root, fn) for fn in filenames + if not path_matches(fn, file_filters)] + + if full_names: + pyfiles = fnmatch.filter(full_names, '*.py') + + if pyfiles: + for pyfile in pyfiles: + parse_file(pyfile, asserts) + + return asserts + + +def obj_path_from_string(dotted_name, full_path): + try: + obj = import_module(dotted_name) + except ImportError: + splits_ville = dotted_name.split('.') + module_name, obj_name = splits_ville[:-1], splits_ville[-1] + module_name = '.'.join(module_name) + + try: + module = import_module(module_name) + except ImportError: + raise ImportError("'{0}' is not a valid Python " + "module".format(module_name)) + else: + try: + obj = getattr(module, obj_name) + except AttributeError: + raise AttributeError("") + + if full_path: + path = inspect.getabsfile(obj) + else: + path = inspect.getfile(obj) + + if path.endswith('pyc'): + path = path.strip('c') + return os.path.dirname(path) + + +def get_asserts_from_obj(dotted_name, file_filters, dir_filters, full_path): + path = obj_path_from_string(dotted_name, full_path) + return get_asserts_from_path(path, file_filters, dir_filters) + + +def asserts_to_frame(asserts): + index, values = zip(*asserts.iteritems()) + values = map(lambda x: list(reduce(operator.concat, map(list, x))), + asserts.iteritems()) + columns = 'filename', 'line', 'col', 'code', 'msg' + df = DataFrame(values, columns=columns).fillna(NA) + return df + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('-t', '--type', default='all', + choices=('all', 'a', 'empty', 'e', 'nonempty', 'n'), + help='The type of nodes you want to look for') + parser.add_argument('-m', '--module', default='pandas', + help=('The name of a module or file to search for ' + 'nodes in')) + parser.add_argument('-i', '--file-filters', default=None, nargs='*', + help=("A list of regular expressions describing files " + "you want to ignore")) + parser.add_argument('-d', '--dir-filters', default=None, nargs='*', + help=('A list of regular expressions describing' + ' directories you want to ignore')) + parser.add_argument('-s', '--sparse-filename', action='store_true', + help=('Use multi_sparse = False to show the ' + 'resulting DataFrame')) + parser.add_argument('-p', '--full-path', action='store_true', + help=('Display the entire path of the file if this ' + 'is given')) + parser.add_argument('-k', '--exception-types', nargs='*', + help='The types of exceptions to report') + parser.add_argument('-b', '--sort-by', default='line', nargs='*', + help=('A list of columns or index levels you want to ' + 'sort by')) + return parser.parse_args() + + +def _build_exc_regex(exc_list): + return r'(.*(?:{0}).*)'.format('|'.join(exc_list)) + + +def main(args): + asserts = get_asserts_from_obj(args.module, args.file_filters, + args.dir_filters, args.full_path) + + if not asserts: + print "No asserts found in '{0}'".format(args.module) + return 0 + + df = asserts_to_frame(asserts) + + try: + df.sortlevel(args.sort_by, inplace=True) + except Exception: + df.sort(args.sort_by, inplace=True) + + atype = args.type + + msg = 'No' + + if atype.startswith('e'): + ind = df.msg.isnull() + msg += ' empty' + elif atype.startswith('n'): + ind = df.msg.notnull() + msg += ' nonempty' + else: + ind = slice(None) + + df = df[ind] + df.sort_index(inplace=True) + + if df.empty: + print "{0} {1} found in '{2}'".format(msg, args.exception_types, + args.module) + return 0 + max_cols = int(df.msg.map(lambda x: len(repr(x))).max()) + with option_context('display.multi_sparse', args.sparse_filename, + 'display.max_colwidth', max_cols, + 'display.max_seq_items', max_cols): + if args.exception_types is not None: + regex = _build_exc_regex(args.exception_types) + vals = df.code.str.match(regex, re.I) + df = df[vals.str[0].notnull()] + + if df.empty: + msg = "{0} {1} found in '{2}'".format(msg, + args.exception_types, + args.module) + print msg + return 0 + + with tempfile.NamedTemporaryFile() as tmpf: + df.to_string(buf=tmpf) + return subprocess.call([os.environ.get('PAGER', 'less'), + tmpf.name]) + return df + + +if __name__ == '__main__': + sys.exit(main(parse_args()))
This PR partially addresses #3024. It also provides a shiny new script useful looking at the current state of the messages in raised exceptions. Here's an example of some output: # Example ![pandas-shiny-new-parse-asserts](https://f.cloud.github.com/assets/417981/457480/70282b60-b39e-11e2-8db1-f5cfb4f0c5bf.png) You can reproduce this with ``` sh scripts/parse_except.py --type empty --module pandas --kind assert ``` from the top level pandas directory. # Description The above line searches for all empty raises in the module `pandas` whose constructor matches the regular expression `'.*assert.*'`--not case sensitive. Note: **This assumes you have installed pandas with `python setup.py develop`**. This will not work without that (well, that's not strictly true, but it will search for pandas code that's installed wherever you installed it `/usr/*/site-packages/*/pandas` or `$VIRTUAL_ENV/*/pandas`, for example, which is probably not what you want). To see all of the options and a short description you can do the usual: ``` sh scripts/parse_except.py --help ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3519
2013-05-03T03:14:34Z
2013-05-31T16:42:54Z
null
2014-06-15T06:07:10Z
TST: skip tests needing xlrd
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index ee2d265690221..8a145517d3b5a 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -41,6 +41,9 @@ def _skip_if_no_xlrd(): try: import xlrd + ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2])) + if ver < (0, 9): + raise nose.SkipTest('xlrd not installed, skipping') except ImportError: raise nose.SkipTest('xlrd not installed, skipping') @@ -215,6 +218,7 @@ def test_excel_read_buffer(self): df = xl.parse('Sheet1', index_col=0, parse_dates=True) def test_xlsx_table(self): + _skip_if_no_xlrd() _skip_if_no_openpyxl() pth = os.path.join(self.dirpath, 'test.xlsx') @@ -294,6 +298,7 @@ def test_excel_roundtrip_xls_mixed(self): def test_excel_roundtrip_xlsx_mixed(self): _skip_if_no_openpyxl() + _skip_if_no_xlrd() self._check_extension_mixed('xlsx') @@ -314,6 +319,7 @@ def test_excel_roundtrip_xls_tsframe(self): def test_excel_roundtrip_xlsx_tsframe(self): _skip_if_no_openpyxl() + _skip_if_no_xlrd() self._check_extension_tsframe('xlsx') def _check_extension_tsframe(self, ext): @@ -555,6 +561,7 @@ def test_to_excel_multiindex(self): self._check_excel_multiindex('xls') def test_to_excel_multiindex_xlsx(self): + _skip_if_no_xlrd() _skip_if_no_openpyxl() self._check_excel_multiindex('xlsx') @@ -587,6 +594,7 @@ def test_to_excel_multiindex_dates(self): def test_to_excel_multiindex_xlsx_dates(self): _skip_if_no_openpyxl() + _skip_if_no_xlrd() self._check_excel_multiindex_dates('xlsx') def _check_excel_multiindex_dates(self, ext):
xlrd version i have installed is 0.8.0, several of the tests in io.tests.test_excel failed, due to needing at least version > 0.9. This PR fixes this, those tests will be skipped if xlrd is not installed or if the version is old.
https://api.github.com/repos/pandas-dev/pandas/pulls/3517
2013-05-02T20:08:06Z
2013-05-03T15:47:25Z
2013-05-03T15:47:25Z
2014-07-16T08:07:09Z
BUG/CLN: datetime64/timedelta64
diff --git a/RELEASE.rst b/RELEASE.rst index f3d9c72db8bc5..7a2848003783f 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -56,6 +56,10 @@ pandas 0.11.1 Note: The default value will change in 0.12 to the "no mangle" behaviour, If your code relies on this behaviour, explicitly specify mangle_dupe_cols=True in your calls. + - Do not allow astypes on ``datetime64[ns]`` except to ``object``, and + ``timedelta64[ns]`` to ``object/int`` (GH3425_) + - Do not allow datetimelike/timedeltalike creation except with valid types + (e.g. cannot pass ``datetime64[ms]``) (GH3423_) **Bug Fixes** @@ -87,11 +91,15 @@ pandas 0.11.1 - Fixed bug in mixed-frame assignment with aligned series (GH3492_) - Fixed bug in selecting month/quarter/year from a series would not select the time element on the last day (GH3546_) + - Properly convert np.datetime64 objects in a Series (GH3416_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH2786: https://github.com/pydata/pandas/issues/2786 .. _GH2194: https://github.com/pydata/pandas/issues/2194 .. _GH3230: https://github.com/pydata/pandas/issues/3230 +.. _GH3425: https://github.com/pydata/pandas/issues/3425 +.. _GH3416: https://github.com/pydata/pandas/issues/3416 +.. _GH3423: https://github.com/pydata/pandas/issues/3423 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3480: https://github.com/pydata/pandas/issues/3480 diff --git a/pandas/core/common.py b/pandas/core/common.py index 490f269c8c104..893d912dcece8 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -43,6 +43,9 @@ class AmbiguousIndexError(PandasError, KeyError): _POSSIBLY_CAST_DTYPES = set([ np.dtype(t) for t in ['M8[ns]','m8[ns]','O','int8','uint8','int16','uint16','int32','uint32','int64','uint64'] ]) +_NS_DTYPE = np.dtype('M8[ns]') +_TD_DTYPE = np.dtype('m8[ns]') +_INT64_DTYPE = np.dtype(np.int64) def isnull(obj): ''' @@ -1084,6 +1087,12 @@ def _possibly_cast_to_datetime(value, dtype, coerce = False): if is_datetime64 or is_timedelta64: + # force the dtype if needed + if is_datetime64 and dtype != _NS_DTYPE: + raise TypeError("cannot convert datetimelike to dtype [%s]" % dtype) + elif is_timedelta64 and dtype != _TD_DTYPE: + raise TypeError("cannot convert timedeltalike to dtype [%s]" % dtype) + if np.isscalar(value): if value == tslib.iNaT or isnull(value): value = tslib.iNaT @@ -1098,7 +1107,8 @@ def _possibly_cast_to_datetime(value, dtype, coerce = False): elif np.prod(value.shape) and value.dtype != dtype: try: if is_datetime64: - value = tslib.array_to_datetime(value, coerce = coerce) + from pandas.tseries.tools import to_datetime + value = to_datetime(value, coerce=coerce).values elif is_timedelta64: value = _possibly_cast_to_timedelta(value) except: @@ -1119,12 +1129,12 @@ def _possibly_cast_to_datetime(value, dtype, coerce = False): v = [ v ] if len(v): inferred_type = lib.infer_dtype(v) - if inferred_type == 'datetime': + if inferred_type in ['datetime','datetime64']: try: value = tslib.array_to_datetime(np.array(v)) except: pass - elif inferred_type == 'timedelta': + elif inferred_type in ['timedelta','timedelta64']: value = _possibly_cast_to_timedelta(value) return value @@ -1515,9 +1525,24 @@ def _astype_nansafe(arr, dtype, copy = True): if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) - if issubclass(arr.dtype.type, np.datetime64): + if is_datetime64_dtype(arr): if dtype == object: return tslib.ints_to_pydatetime(arr.view(np.int64)) + elif issubclass(dtype.type, np.int): + return arr.view(dtype) + elif dtype != _NS_DTYPE: + raise TypeError("cannot astype a datetimelike from [%s] to [%s]" % (arr.dtype,dtype)) + return arr.astype(_NS_DTYPE) + elif is_timedelta64_dtype(arr): + if issubclass(dtype.type, np.int): + return arr.view(dtype) + elif dtype == object: + return arr.astype(object) + + # in py3, timedelta64[ns] are int64 + elif (py3compat.PY3 and dtype not in [_INT64_DTYPE,_TD_DTYPE]) or (not py3compat.PY3 and dtype != _TD_DTYPE): + raise TypeError("cannot astype a timedelta from [%s] to [%s]" % (arr.dtype,dtype)) + return arr.astype(_TD_DTYPE) elif (np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer)): @@ -1721,9 +1746,6 @@ def _check_as_is(x): self.queue.truncate(0) -_NS_DTYPE = np.dtype('M8[ns]') - - def _concat_compat(to_concat, axis=0): # filter empty arrays to_concat = [x for x in to_concat if x.shape[axis] > 0] @@ -1751,7 +1773,6 @@ def _to_pydatetime(x): return x - def _where_compat(mask, arr1, arr2): if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE: new_vals = np.where(mask, arr1.view(np.int64), arr2.view(np.int64)) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5c0f9253beb62..13e1654963844 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -4,7 +4,7 @@ from numpy import nan import numpy as np -from pandas.core.common import _possibly_downcast_to_dtype, isnull +from pandas.core.common import _possibly_downcast_to_dtype, isnull, _NS_DTYPE, _TD_DTYPE from pandas.core.index import Index, MultiIndex, _ensure_index, _handle_legacy_indexes from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices import pandas.core.common as com @@ -740,10 +740,6 @@ def should_store(self, value): (np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_)) -_NS_DTYPE = np.dtype('M8[ns]') -_TD_DTYPE = np.dtype('m8[ns]') - - class DatetimeBlock(Block): _can_hold_na = True diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 4845ae5258892..78e2cef230e24 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -469,6 +469,27 @@ def test_constructor_dtype_datetime64(self): self.assert_(isnull(s[1]) == True) self.assert_(s.dtype == 'M8[ns]') + # GH3416 + dates = [ + np.datetime64(datetime(2013, 1, 1)), + np.datetime64(datetime(2013, 1, 2)), + np.datetime64(datetime(2013, 1, 3)), + ] + + s = Series(dates) + self.assert_(s.dtype == 'M8[ns]') + + s.ix[0] = np.nan + self.assert_(s.dtype == 'M8[ns]') + + # invalid astypes + for t in ['s','D','us','ms']: + self.assertRaises(TypeError, s.astype, 'M8[%s]' % t) + + # GH3414 related + self.assertRaises(TypeError, lambda x: Series(Series(dates).astype('int')/1000000,dtype='M8[ms]')) + self.assertRaises(TypeError, lambda x: Series(dates, dtype='datetime64')) + def test_constructor_dict(self): d = {'a': 0., 'b': 1., 'c': 2.} result = Series(d, index=['b', 'c', 'd', 'a']) @@ -1809,6 +1830,13 @@ def test_constructor_dtype_timedelta64(self): td = Series([ timedelta(days=i) for i in range(3) ] + [ np.nan ], dtype='m8[ns]' ) self.assert_(td.dtype=='timedelta64[ns]') + # invalid astypes + for t in ['s','D','us','ms']: + self.assertRaises(TypeError, td.astype, 'm8[%s]' % t) + + # valid astype + td.astype('int') + # this is an invalid casting self.assertRaises(Exception, Series, [ timedelta(days=i) for i in range(3) ] + [ 'foo' ], dtype='m8[ns]' ) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 6bccf323f8654..46e2488fb70e6 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -6,7 +6,7 @@ import numpy as np -from pandas.core.common import isnull +from pandas.core.common import isnull, _NS_DTYPE, _INT64_DTYPE from pandas.core.index import Index, Int64Index from pandas.tseries.frequencies import ( infer_freq, to_offset, get_period_alias, @@ -92,9 +92,6 @@ class TimeSeriesError(Exception): _midnight = time(0, 0) -_NS_DTYPE = np.dtype('M8[ns]') -_INT64_DTYPE = np.dtype(np.int64) - class DatetimeIndex(Int64Index): """ diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index c1af7ba5cccc2..d9dfa51bc0bff 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -12,7 +12,7 @@ import pandas.tseries.frequencies as _freq_mod import pandas.core.common as com -from pandas.core.common import isnull +from pandas.core.common import isnull, _NS_DTYPE, _INT64_DTYPE from pandas.util import py3compat from pandas.lib import Timestamp @@ -516,10 +516,6 @@ def wrapper(self, other): return result return wrapper -_INT64_DTYPE = np.dtype(np.int64) -_NS_DTYPE = np.dtype('M8[ns]') - - class PeriodIndex(Int64Index): """ Immutable ndarray holding ordinal values indicating regular periods in diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index e52d9c9c8b777..9b20ac1e3f055 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -1470,7 +1470,7 @@ def test_frame_datetime64_handling_groupby(self): (3, np.datetime64('2012-07-04'))], columns=['a', 'date']) result = df.groupby('a').first() - self.assertEqual(result['date'][3], np.datetime64('2012-07-03')) + self.assertEqual(result['date'][3], datetime(2012,7,3)) def test_series_interpolate_intraday(self): # #1698 diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index f9608be013b3c..62ee19da6b845 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -50,7 +50,7 @@ def _maybe_get_tz(tz): def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, - format=None): + format=None, coerce=False): """ Convert argument to datetime @@ -68,6 +68,7 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, If True returns a DatetimeIndex, if False returns ndarray of values format : string, default None strftime to parse time, eg "%d/%m/%Y" + coerce : force errors to NaT (False by default) Returns ------- @@ -84,7 +85,8 @@ def _convert_f(arg): result = tslib.array_strptime(arg, format) else: result = tslib.array_to_datetime(arg, raise_=errors == 'raise', - utc=utc, dayfirst=dayfirst) + utc=utc, dayfirst=dayfirst, + coerce=coerce) if com.is_datetime64_dtype(result) and box: result = DatetimeIndex(result, tz='utc' if utc else None) return result
Various bugs related to datetime64s - Properly convert np.datetime64 objects in a Series, #3416 This would convert to object dtype previously ``` In [1]: dates = [ ...: np.datetime64(datetime.date(2013, 1, 1)), ...: np.datetime64(datetime.date(2013, 1, 2)), ...: np.datetime64(datetime.date(2013, 1, 3)), ...: ] In [2]: s = pd.Series(dates) In [3]: s Out[3]: 0 2013-01-01 00:00:00 1 2013-01-02 00:00:00 2 2013-01-03 00:00:00 dtype: datetime64[ns] ``` - Do not allow astypes on `datetime64[ns]` except to `object`, and `timedelta64[ns]` to `object/int`, #3425 - Do not allow datetimelike/timedeltalike creation except with valid types (e.g. cannot pass `datetime64[ms]`, #3423 Any creation/astype of a datetimelike to a non accepted dtype will raise ``` In [6]: Series([Timestamp('20130101'),Timestamp('20130102')],dtype='datetime64[s]') TypeError: cannot convert datetimelike to dtype [datetime64[s]] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3516
2013-05-02T18:46:25Z
2013-05-08T21:51:09Z
2013-05-08T21:51:09Z
2014-06-16T01:29:27Z
ENH: add mode.mangle_dupe_cols option GH3468
diff --git a/RELEASE.rst b/RELEASE.rst index f3fb98535cb61..ca9c25294dc23 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -44,6 +44,11 @@ pandas 0.11.1 **KeyError** if **key** is not a valid store object. - The repr() for (Multi)Index now obeys display.max_seq_items rather then numpy threshold print options. (GH3426_, GH3466_) + - Added mangle_dupe_cols option to read_table/csv, allowing users + to control legacy behaviour re dupe cols (A, A.1, A.2 vs A, A ) (GH3468_) + Note: The default value will change in 0.12 to the "no mangle" behaviour, + If your code relies on this behaviour, explicitly specify mangle_dupe_cols=True + in your calls. **Bug Fixes** @@ -72,6 +77,7 @@ pandas 0.11.1 .. _GH3466: https://github.com/pydata/pandas/issues/3466 .. _GH3038: https://github.com/pydata/pandas/issues/3038 .. _GH3437: https://github.com/pydata/pandas/issues/3437 +.. _GH3468: https://github.com/pydata/pandas/issues/3468 .. _GH3455: https://github.com/pydata/pandas/issues/3455 .. _GH3457: https://github.com/pydata/pandas/issues/3457 .. _GH3461: https://github.com/pydata/pandas/issues/3461 diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 89f892daf9389..e2bbd456ea113 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -254,7 +254,8 @@ def _read(filepath_or_buffer, kwds): 'verbose': False, 'encoding': None, 'squeeze': False, - 'compression': None + 'compression': None, + 'mangle_dupe_cols': True, } @@ -340,7 +341,9 @@ def parser_f(filepath_or_buffer, verbose=False, encoding=None, - squeeze=False): + squeeze=False, + mangle_dupe_cols=True + ): # Alias sep -> delimiter. if delimiter is None: @@ -396,7 +399,9 @@ def parser_f(filepath_or_buffer, warn_bad_lines=warn_bad_lines, error_bad_lines=error_bad_lines, low_memory=low_memory, - buffer_lines=buffer_lines) + buffer_lines=buffer_lines, + mangle_dupe_cols=mangle_dupe_cols + ) return _read(filepath_or_buffer, kwds) @@ -1142,6 +1147,7 @@ def __init__(self, f, **kwds): self.skipinitialspace = kwds['skipinitialspace'] self.lineterminator = kwds['lineterminator'] self.quoting = kwds['quoting'] + self.mangle_dupe_cols = kwds.get('mangle_dupe_cols',True) self.has_index_names = False if 'has_index_names' in kwds: @@ -1323,12 +1329,13 @@ def _infer_columns(self): else: columns.append(c) - counts = {} - for i, col in enumerate(columns): - cur_count = counts.get(col, 0) - if cur_count > 0: - columns[i] = '%s.%d' % (col, cur_count) - counts[col] = cur_count + 1 + if self.mangle_dupe_cols: + counts = {} + for i, col in enumerate(columns): + cur_count = counts.get(col, 0) + if cur_count > 0: + columns[i] = '%s.%d' % (col, cur_count) + counts[col] = cur_count + 1 self._clear_buffer() diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index aa3fce3959860..5ff832431c917 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -589,14 +589,21 @@ def test_string_nas(self): tm.assert_frame_equal(result, expected) def test_duplicate_columns(self): - data = """A,A,B,B,B -1,2,3,4,5 -6,7,8,9,10 -11,12,13,14,15 -""" - df = self.read_table(StringIO(data), sep=',') - self.assert_(np.array_equal(df.columns, - ['A', 'A.1', 'B', 'B.1', 'B.2'])) + for engine in ['python', 'c']: + data = """A,A,B,B,B + 1,2,3,4,5 + 6,7,8,9,10 + 11,12,13,14,15 + """ + # check default beahviour + df = self.read_table(StringIO(data), sep=',',engine=engine) + self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2']) + + df = self.read_table(StringIO(data), sep=',',engine=engine,mangle_dupe_cols=False) + self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B']) + + df = self.read_table(StringIO(data), sep=',',engine=engine,mangle_dupe_cols=True) + self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2']) def test_csv_mixed_type(self): data = """A,B,C diff --git a/pandas/src/parser.pyx b/pandas/src/parser.pyx index 95c57f1675c64..694a769641b0d 100644 --- a/pandas/src/parser.pyx +++ b/pandas/src/parser.pyx @@ -249,6 +249,7 @@ cdef class TextReader: object dtype object encoding object compression + object mangle_dupe_cols set noconvert, usecols def __cinit__(self, source, @@ -298,11 +299,14 @@ cdef class TextReader: buffer_lines=None, skiprows=None, skip_footer=0, - verbose=False): + verbose=False, + mangle_dupe_cols=True): self.parser = parser_new() self.parser.chunksize = tokenize_chunksize + self.mangle_dupe_cols=mangle_dupe_cols + # For timekeeping self.clocks = [] @@ -571,8 +575,9 @@ cdef class TextReader: if name == '': name = 'Unnamed: %d' % i + count = counts.get(name, 0) - if count > 0: + if count > 0 and self.mangle_dupe_cols: header.append('%s.%d' % (name, count)) else: header.append(name)
#3468 as discussed, an added option to disable th mangling behavior of dupe columns taken by pd.read_csv going back to olden days. Not sure what the default should be, currently the default is compatible with existing scripts. Tested with both c and python parser engines.
https://api.github.com/repos/pandas-dev/pandas/pulls/3511
2013-05-02T05:34:41Z
2013-05-05T09:14:08Z
2013-05-05T09:14:08Z
2014-06-24T13:24:56Z
Fixed Unbound Variable `edge` access when BinGrouper is empty
diff --git a/RELEASE.rst b/RELEASE.rst index fbf8c28cffdea..4dc3746943605 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -58,6 +58,7 @@ pandas 0.11.1 - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_) - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_) - DataFrames fetched via FRED now handle '.' as a NaN. (GH3469_) + - Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 @@ -73,6 +74,7 @@ pandas 0.11.1 .. _GH3461: https://github.com/pydata/pandas/issues/3461 .. _GH3448: https://github.com/pydata/pandas/issues/3448 .. _GH3449: https://github.com/pydata/pandas/issues/3449 +.. _GH3510: https://github.com/pydata/pandas/issues/3510 pandas 0.11.0 diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index aef44bd91396d..7762803b029e9 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1012,8 +1012,8 @@ def get_iterator(self, data, axis=0): yield label, data[start:edge] start = edge - if edge < len(data): - yield self.binlabels[-1], data[edge:] + if start < len(data): + yield self.binlabels[-1], data[start:] else: start = 0 for edge, label in izip(self.bins, self.binlabels): @@ -1022,8 +1022,8 @@ def get_iterator(self, data, axis=0): start = edge n = len(data.axes[axis]) - if edge < n: - inds = range(edge, n) + if start < n: + inds = range(start, n) yield self.binlabels[-1], data.take(inds, axis=axis) def apply(self, f, data, axis=0, keep_internal=False): diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 4604678d58d5a..994f7d12ef523 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2427,6 +2427,16 @@ def noddy(value, weight): # don't die no_toes = df_grouped.apply(lambda x: noddy(x.value, x.weight )) + def test_groupby_with_empty(self): + import pandas as pd + index = pd.DatetimeIndex(()) + data = () + series = pd.Series(data, index) + grouper = pd.tseries.resample.TimeGrouper('D') + grouped = series.groupby(grouper) + assert next(iter(grouped), None) is None + + def assert_fp_equal(a, b): assert((np.abs(a - b) < 1e-12).all())
Incorrectly accessed variable `edge` instead of pre-filled value `start`. Signed-off-by: Kevin Stone kevinastone@gmail.com
https://api.github.com/repos/pandas-dev/pandas/pulls/3510
2013-05-02T02:45:24Z
2013-05-03T15:52:21Z
null
2014-06-20T12:16:59Z
BUG/CLN: Allow the BlockManager to have a non-unique items (axis 0)
diff --git a/RELEASE.rst b/RELEASE.rst index f3fb98535cb61..1a86ac02b2f7e 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -61,8 +61,20 @@ pandas 0.11.1 - Fix regression in a DataFrame apply with axis=1, objects were not being converted back to base dtypes correctly (GH3480_) - Fix issue when storing uint dtypes in an HDFStore. (GH3493_) + - Non-unique index support clarified (GH3468_) + + - Fix assigning a new index to a duplicate index in a DataFrame would fail (GH3468_) + - Fix construction of a DataFrame with a duplicate index + - ref_locs support to allow duplicative indices across dtypes, + allows iget support to always find the index (even across dtypes) (GH2194_) + - applymap on a DataFrame with a non-unique index now works + (removed warning) (GH2786_), and fix (GH3230_) + - Fix to_csv to handle non-unique columns (GH3495_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 +.. _GH2786: https://github.com/pydata/pandas/issues/2786 +.. _GH2194: https://github.com/pydata/pandas/issues/2194 +.. _GH3230: https://github.com/pydata/pandas/issues/3230 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3480: https://github.com/pydata/pandas/issues/3480 @@ -75,8 +87,10 @@ pandas 0.11.1 .. _GH3455: https://github.com/pydata/pandas/issues/3455 .. _GH3457: https://github.com/pydata/pandas/issues/3457 .. _GH3461: https://github.com/pydata/pandas/issues/3461 +.. _GH3468: https://github.com/pydata/pandas/issues/3468 .. _GH3448: https://github.com/pydata/pandas/issues/3448 .. _GH3449: https://github.com/pydata/pandas/issues/3449 +.. _GH3495: https://github.com/pydata/pandas/issues/3495 .. _GH3493: https://github.com/pydata/pandas/issues/3493 diff --git a/pandas/core/common.py b/pandas/core/common.py index e6ce9fc5fc925..490f269c8c104 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1156,6 +1156,7 @@ def _default_index(n): values = np.arange(n, dtype=np.int64) result = values.view(Int64Index) result.name = None + result.is_unique = True return result diff --git a/pandas/core/format.py b/pandas/core/format.py index 5b68b26a41b77..fa2135bb4310c 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -820,21 +820,7 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None, self.blocks = self.obj._data.blocks ncols = sum(len(b.items) for b in self.blocks) self.data =[None] * ncols - - if self.obj.columns.is_unique: - self.colname_map = dict((k,i) for i,k in enumerate(self.obj.columns)) - else: - ks = [set(x.items) for x in self.blocks] - u = len(reduce(lambda a,x: a.union(x),ks,set())) - t = sum(map(len,ks)) - if u != t: - if len(set(self.cols)) != len(self.cols): - raise NotImplementedError("duplicate columns with differing dtypes are unsupported") - else: - # if columns are not unique and we acces this, - # we're doing it wrong - pass - + self.column_map = self.obj._data.get_items_map() if chunksize is None: chunksize = (100000/ (len(self.cols) or 1)) or 1 @@ -1034,18 +1020,13 @@ def _save_chunk(self, start_i, end_i): # create the data for a chunk slicer = slice(start_i,end_i) - if self.obj.columns.is_unique: - for i in range(len(self.blocks)): - b = self.blocks[i] - d = b.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format) - for j, k in enumerate(b.items): - # self.data is a preallocated list - self.data[self.colname_map[k]] = d[j] - else: - # self.obj should contain a proper view of the dataframes - # with the specified ordering of cols if cols was specified - for i in range(len(self.obj.columns)): - self.data[i] = self.obj.icol(i).values[slicer].tolist() + for i in range(len(self.blocks)): + b = self.blocks[i] + d = b.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format) + for i, item in enumerate(b.items): + + # self.data is a preallocated list + self.data[self.column_map[b][i]] = d[i] ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2cb7608c7aba6..8bfdee3b75170 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4261,9 +4261,6 @@ def infer(x): if com.is_datetime64_dtype(x): x = lib.map_infer(x, lib.Timestamp) return lib.map_infer(x, func) - #GH2786 - if not self.columns.is_unique: - raise ValueError("applymap does not support dataframes having duplicate column labels") return self.apply(infer) #---------------------------------------------------------------------- diff --git a/pandas/core/index.py b/pandas/core/index.py index 34edd26a49617..101b69ffc3c7e 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -278,7 +278,7 @@ def is_monotonic(self): def is_lexsorted_for_tuple(self, tup): return True - @cache_readonly + @cache_readonly(allow_setting=True) def is_unique(self): return self._engine.is_unique diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 03cfd18f5afe5..5c0f9253beb62 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -61,9 +61,15 @@ def ref_locs(self): if (indexer == -1).any(): raise AssertionError('Some block items were not in block ' 'ref_items') + self._ref_locs = indexer return self._ref_locs + def set_ref_locs(self, placement): + """ explicity set the ref_locs indexer, only necessary for duplicate indicies """ + if placement is not None: + self._ref_locs = np.array(placement,dtype='int64') + def set_ref_items(self, ref_items, maybe_rename=True): """ If maybe_rename=True, need to set the items for this guy @@ -164,6 +170,9 @@ def get(self, item): loc = self.items.get_loc(item) return self.values[loc] + def iget(self, i): + return self.values[i] + def set(self, item, value): """ Modify Block in-place with new item value @@ -710,7 +719,7 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True): # attempt to create new type blocks blocks = [] for i, c in enumerate(self.items): - values = self.get(c) + values = self.iget(i) values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric) values = _block_shape(values) @@ -879,7 +888,7 @@ class BlockManager(object): ----- This is *not* a public API class """ - __slots__ = ['axes', 'blocks', '_known_consolidated', '_is_consolidated'] + __slots__ = ['axes', 'blocks', '_known_consolidated', '_is_consolidated', '_ref_locs', '_items_map'] def __init__(self, blocks, axes, do_integrity_check=True): self.axes = [_ensure_index(ax) for ax in axes] @@ -897,6 +906,10 @@ def __init__(self, blocks, axes, do_integrity_check=True): self._consolidate_check() + # we have a duplicate items index, setup the block maps + if not self.items.is_unique: + self._set_ref_locs(do_refs=True) + @classmethod def make_empty(self): return BlockManager([], [[], []]) @@ -915,12 +928,141 @@ def set_axis(self, axis, value): if len(value) != len(cur_axis): raise Exception('Length mismatch (%d vs %d)' % (len(value), len(cur_axis))) + self.axes[axis] = value if axis == 0: + + # set/reset ref_locs based on the current index + # and map the new index if needed + self._set_ref_locs(labels=cur_axis) + + # take via ref_locs for block in self.blocks: block.set_ref_items(self.items, maybe_rename=True) + # set/reset ref_locs based on the new index + self._set_ref_locs(labels=value, do_refs=True) + + def _set_ref_locs(self, labels=None, do_refs=False): + """ + if we have a non-unique index on this axis, set the indexers + we need to set an absolute indexer for the blocks + return the indexer if we are not unique + + labels : the (new) labels for this manager + ref : boolean, whether to set the labels (one a 1-1 mapping) + + """ + + im = None + if labels is None: + labels = self.items + else: + _ensure_index(labels) + + # we are unique, and coming from a unique + if labels.is_unique and not do_refs: + + # reset our ref locs + self._ref_locs = None + for b in self.blocks: + b._ref_locs = None + + return None + + # we are going to a non-unique index + # we have ref_locs on the block at this point + # or if ref_locs are not set, then we must assume a block + # ordering + if not labels.is_unique and do_refs: + + # create the items map + im = getattr(self,'_items_map',None) + if im is None: + + im = dict() + def maybe_create_block(block): + try: + return d[block] + except: + im[block] = l = [ None ] * len(block.items) + return l + + count_items = 0 + for block in self.blocks: + + # if we have a duplicate index but + # _ref_locs have not been set....then + # have to assume ordered blocks are passed + num_items = len(block.items) + try: + rl = block.ref_locs + except: + rl = np.arange(num_items) + count_items + + m = maybe_create_block(block) + for i, item in enumerate(block.items): + m[i] = rl[i] + count_items += num_items + + self._items_map = im + + # create the _ref_loc map here + rl = np.empty(len(labels),dtype=object) + for block, items in im.items(): + for i, loc in enumerate(items): + rl[loc] = (block,i) + self._ref_locs = rl + return rl + + # return our cached _ref_locs (or will compute again + # when we recreate the block manager if needed + return getattr(self,'_ref_locs',None) + + def get_items_map(self): + """ + return an inverted ref_loc map for an item index + block -> item (in that block) location -> column location + """ + + # cache check + im = getattr(self,'_items_map',None) + if im is not None: + return im + + im = dict() + rl = self._set_ref_locs() + + def maybe_create_block(block): + try: + return im[block] + except: + im[block] = l = [ None ] * len(block.items) + return l + + # we have a non-duplicative index + if rl is None: + + axis = self.axes[0] + for block in self.blocks: + + m = maybe_create_block(block) + for i, item in enumerate(block.items): + m[i] = axis.get_loc(item) + + + # use the ref_locs to construct the map + else: + + for i, (block, idx) in enumerate(rl): + + m = maybe_create_block(block) + m[idx] = i + + self._items_map = im + return im + # make items read only for now def _get_items(self): return self.axes[0] @@ -1185,13 +1327,16 @@ def get_slice(self, slobj, axis=0, raise_on_error=False): new_items, klass=blk.__class__, fastpath=True) + newb.set_ref_locs(blk._ref_locs) new_blocks = [newb] else: return self.reindex_items(new_items) else: new_blocks = self._slice_blocks(slobj, axis) - return BlockManager(new_blocks, new_axes, do_integrity_check=False) + bm = BlockManager(new_blocks, new_axes, do_integrity_check=False) + bm._consolidate_inplace() + return bm def _slice_blocks(self, slobj, axis): new_blocks = [] @@ -1206,6 +1351,7 @@ def _slice_blocks(self, slobj, axis): block.ref_items, klass=block.__class__, fastpath=True) + newb.set_ref_locs(block._ref_locs) new_blocks.append(newb) return new_blocks @@ -1387,26 +1533,11 @@ def iget(self, i): item = self.items[i] if self.items.is_unique: return self.get(item) - else: - # ugh - try: - inds, = (self.items == item).nonzero() - except AttributeError: # MultiIndex - inds, = self.items.map(lambda x: x == item).nonzero() - _, block = self._find_block(item) - - try: - binds, = (block.items == item).nonzero() - except AttributeError: # MultiIndex - binds, = block.items.map(lambda x: x == item).nonzero() - - for j, (k, b) in enumerate(zip(inds, binds)): - if i == k: - return block.values[b] - - raise Exception('Cannot have duplicate column names ' - 'split across dtypes') + # compute the duplicative indexer if needed + ref_locs = self._set_ref_locs() + b, loc = ref_locs[i] + return b.iget(loc) def get_scalar(self, tup): """ @@ -1582,6 +1713,8 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value): # keep track of what items aren't found anywhere mask = np.zeros(len(item_order), dtype=bool) + new_axes = [new_items] + self.axes[1:] + new_blocks = [] for blk in self.blocks: blk_indexer = blk.items.get_indexer(item_order) @@ -1605,7 +1738,7 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value): new_blocks.append(na_block) new_blocks = _consolidate(new_blocks, new_items) - return BlockManager(new_blocks, [new_items] + self.axes[1:]) + return BlockManager(new_blocks, new_axes) def reindex_items(self, new_items, copy=True, fill_value=np.nan): """ @@ -1619,6 +1752,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): # TODO: this part could be faster (!) new_items, indexer = self.items.reindex(new_items) + new_axes = [new_items] + self.axes[1:] # could have so me pathological (MultiIndex) issues here new_blocks = [] @@ -1643,7 +1777,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): new_blocks.append(na_block) new_blocks = _consolidate(new_blocks, new_items) - return BlockManager(new_blocks, [new_items] + self.axes[1:]) + return BlockManager(new_blocks, new_axes) def _make_na_block(self, items, ref_items, fill_value=np.nan): # TODO: infer dtypes other than float64 from fill_value @@ -1685,11 +1819,11 @@ def merge(self, other, lsuffix=None, rsuffix=None): this, other = self._maybe_rename_join(other, lsuffix, rsuffix) cons_items = this.items + other.items - consolidated = _consolidate(this.blocks + other.blocks, cons_items) - new_axes = list(this.axes) new_axes[0] = cons_items + consolidated = _consolidate(this.blocks + other.blocks, cons_items) + return BlockManager(consolidated, new_axes) def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True): @@ -1842,54 +1976,55 @@ def form_blocks(arrays, names, axes): bool_items = [] object_items = [] datetime_items = [] - for k, v in zip(names, arrays): + for i, (k, v) in enumerate(zip(names, arrays)): if issubclass(v.dtype.type, np.floating): - float_items.append((k, v)) + float_items.append((i, k, v)) elif issubclass(v.dtype.type, np.complexfloating): - complex_items.append((k, v)) + complex_items.append((i, k, v)) elif issubclass(v.dtype.type, np.datetime64): if v.dtype != _NS_DTYPE: v = tslib.cast_to_nanoseconds(v) if hasattr(v, 'tz') and v.tz is not None: - object_items.append((k, v)) + object_items.append((i, k, v)) else: - datetime_items.append((k, v)) + datetime_items.append((i, k, v)) elif issubclass(v.dtype.type, np.integer): if v.dtype == np.uint64: # HACK #2355 definite overflow if (v > 2 ** 63 - 1).any(): - object_items.append((k, v)) + object_items.append((i, k, v)) continue - int_items.append((k, v)) + int_items.append((i, k, v)) elif v.dtype == np.bool_: - bool_items.append((k, v)) + bool_items.append((i, k, v)) else: - object_items.append((k, v)) + object_items.append((i, k, v)) + is_unique = items.is_unique blocks = [] if len(float_items): - float_blocks = _multi_blockify(float_items, items) + float_blocks = _multi_blockify(float_items, items, is_unique=is_unique) blocks.extend(float_blocks) if len(complex_items): - complex_blocks = _simple_blockify(complex_items, items, np.complex128) + complex_blocks = _simple_blockify(complex_items, items, np.complex128, is_unique=is_unique) blocks.extend(complex_blocks) if len(int_items): - int_blocks = _multi_blockify(int_items, items) + int_blocks = _multi_blockify(int_items, items, is_unique=is_unique) blocks.extend(int_blocks) if len(datetime_items): - datetime_blocks = _simple_blockify(datetime_items, items, _NS_DTYPE) + datetime_blocks = _simple_blockify(datetime_items, items, _NS_DTYPE, is_unique=is_unique) blocks.extend(datetime_blocks) if len(bool_items): - bool_blocks = _simple_blockify(bool_items, items, np.bool_) + bool_blocks = _simple_blockify(bool_items, items, np.bool_, is_unique=is_unique) blocks.extend(bool_blocks) if len(object_items) > 0: - object_blocks = _simple_blockify(object_items, items, np.object_) + object_blocks = _simple_blockify(object_items, items, np.object_, is_unique=is_unique) blocks.extend(object_blocks) if len(extra_items): @@ -1897,38 +2032,40 @@ def form_blocks(arrays, names, axes): # empty items -> dtype object block_values = np.empty(shape, dtype=object) - block_values.fill(nan) na_block = make_block(block_values, extra_items, items) blocks.append(na_block) - blocks = _consolidate(blocks, items) return blocks -def _simple_blockify(tuples, ref_items, dtype): +def _simple_blockify(tuples, ref_items, dtype, is_unique=True): """ return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """ - block_items, values = _stack_arrays(tuples, ref_items, dtype) + block_items, values, placement = _stack_arrays(tuples, ref_items, dtype) # CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) - return [ make_block(values, block_items, ref_items) ] + block = make_block(values, block_items, ref_items) + if not is_unique: + block.set_ref_locs(placement) + return [ block ] - -def _multi_blockify(tuples, ref_items, dtype = None): +def _multi_blockify(tuples, ref_items, dtype = None, is_unique=True): """ return an array of blocks that potentially have different dtypes """ # group by dtype - grouper = itertools.groupby(tuples, lambda x: x[1].dtype) + grouper = itertools.groupby(tuples, lambda x: x[2].dtype) new_blocks = [] for dtype, tup_block in grouper: - block_items, values = _stack_arrays(list(tup_block), ref_items, dtype) + block_items, values, placement = _stack_arrays(list(tup_block), ref_items, dtype) block = make_block(values, block_items, ref_items) + if not is_unique: + block.set_ref_locs(placement) new_blocks.append(block) return new_blocks @@ -1951,10 +2088,7 @@ def _shape_compat(x): else: return x.shape - names, arrays = zip(*tuples) - - # index may box values - items = ref_items[ref_items.isin(names)] + placement, names, arrays = zip(*tuples) first = arrays[0] shape = (len(arrays),) + _shape_compat(first) @@ -1963,7 +2097,15 @@ def _shape_compat(x): for i, arr in enumerate(arrays): stacked[i] = _asarray_compat(arr) - return items, stacked + # index may box values + if ref_items.is_unique: + items = ref_items[ref_items.isin(names)] + else: + items = _ensure_index([ n for n in names if n in ref_items ]) + if len(items) != len(stacked): + raise Exception("invalid names passed _stack_arrays") + + return items, stacked, placement def _blocks_to_series_dict(blocks, index=None): diff --git a/pandas/src/properties.pyx b/pandas/src/properties.pyx index 53bb561ef9110..1df11cecf7b94 100644 --- a/pandas/src/properties.pyx +++ b/pandas/src/properties.pyx @@ -4,16 +4,20 @@ from cpython cimport PyDict_Contains, PyDict_GetItem, PyDict_GetItem cdef class cache_readonly(object): cdef readonly: - object fget, name + object func, name, allow_setting - def __init__(self, func): - self.fget = func - self.name = func.__name__ + def __init__(self, func=None, allow_setting=False): + if func is not None: + self.func = func + self.name = func.__name__ + self.allow_setting = allow_setting - def __get__(self, obj, type): - if obj is None: - return self.fget + def __call__(self, func, doc=None): + self.func = func + self.name = func.__name__ + return self + def __get__(self, obj, typ): # Get the cache or set a default one if needed cache = getattr(obj, '_cache', None) @@ -23,12 +27,23 @@ cdef class cache_readonly(object): if PyDict_Contains(cache, self.name): # not necessary to Py_INCREF val = <object> PyDict_GetItem(cache, self.name) - return val else: - val = self.fget(obj) + val = self.func(obj) PyDict_SetItem(cache, self.name, val) - return val + return val + + def __set__(self, obj, value): + + if not self.allow_setting: + raise Exception("cannot set values for [%s]" % self.name) + + # Get the cache or set a default one if needed + cache = getattr(obj, '_cache', None) + if cache is None: + cache = obj._cache = {} + PyDict_SetItem(cache, self.name, value) + cdef class AxisProperty(object): cdef: Py_ssize_t axis diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7bafed216b9b9..69225c40e36df 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4973,17 +4973,33 @@ def test_to_csv_dups_cols(self): with ensure_clean() as filename: df.to_csv(filename) # single dtype, fine + result = read_csv(filename,index_col=0) + result.columns = df.columns + assert_frame_equal(result,df) - df_float = DataFrame(np.random.randn(1000, 30),dtype='float64') - df_int = DataFrame(np.random.randn(1000, 30),dtype='int64') - df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns) - df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns) - df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns) - df = pan.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1) + df_float = DataFrame(np.random.randn(1000, 3),dtype='float64') + df_int = DataFrame(np.random.randn(1000, 3),dtype='int64') + df_bool = DataFrame(True,index=df_float.index,columns=range(3)) + df_object = DataFrame('foo',index=df_float.index,columns=range(3)) + df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=range(3)) + df = pan.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1, ignore_index=True) + + cols = [] + for i in range(5): + cols.extend([0,1,2]) + df.columns = cols - #### this raises because we have duplicate column names across dtypes #### + from pandas import to_datetime with ensure_clean() as filename: - self.assertRaises(Exception, df.to_csv, filename) + df.to_csv(filename) + result = read_csv(filename,index_col=0) + + # date cols + for i in ['0.4','1.4','2.4']: + result[i] = to_datetime(result[i]) + + result.columns = df.columns + assert_frame_equal(result,df) # GH3457 from pandas.util.testing import makeCustomDataframe as mkdf @@ -7492,12 +7508,15 @@ def test_applymap(self): self.assert_(result.dtypes[0] == object) # GH2786 - df = DataFrame(np.random.random((3,4))) - df.columns = ['a','a','a','a'] - try: - df.applymap(str) - except ValueError as e: - self.assertTrue("support" in str(e)) + df = DataFrame(np.random.random((3,4))) + df2 = df.copy() + cols = ['a','a','a','a'] + df.columns = cols + + expected = df2.applymap(str) + expected.columns = cols + result = df.applymap(str) + assert_frame_equal(result,expected) def test_filter(self): # items @@ -9201,6 +9220,62 @@ def test_assign_columns(self): assert_series_equal(self.frame['C'], frame['baz']) assert_series_equal(self.frame['hi'], frame['foo2']) + def test_columns_with_dups(self): + + # GH 3468 related + + # basic + df = DataFrame([[1,2]], columns=['a','a']) + df.columns = ['a','a.1'] + str(df) + expected = DataFrame([[1,2]], columns=['a','a.1']) + assert_frame_equal(df, expected) + + df = DataFrame([[1,2,3]], columns=['b','a','a']) + df.columns = ['b','a','a.1'] + str(df) + expected = DataFrame([[1,2,3]], columns=['b','a','a.1']) + assert_frame_equal(df, expected) + + # with a dup index + df = DataFrame([[1,2]], columns=['a','a']) + df.columns = ['b','b'] + str(df) + expected = DataFrame([[1,2]], columns=['b','b']) + assert_frame_equal(df, expected) + + # multi-dtype + df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=['a','a','b','b','d','c','c']) + df.columns = list('ABCDEFG') + str(df) + expected = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('ABCDEFG')) + assert_frame_equal(df, expected) + + # this is an error because we cannot disambiguate the dup columns + self.assertRaises(Exception, lambda x: DataFrame([[1,2,'foo','bar']], columns=['a','a','a','a'])) + + # dups across blocks + df_float = DataFrame(np.random.randn(10, 3),dtype='float64') + df_int = DataFrame(np.random.randn(10, 3),dtype='int64') + df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns) + df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns) + df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns) + df = pan.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1) + + result = df._data._set_ref_locs() + self.assert_(len(result) == len(df.columns)) + + # testing iget + for i in range(len(df.columns)): + df.iloc[:,i] + + # dup columns across dtype GH 2079/2194 + vals = [[1, -1, 2.], [2, -2, 3.]] + rs = DataFrame(vals, columns=['A', 'A', 'B']) + xp = DataFrame(vals) + xp.columns = ['A', 'A', 'B'] + assert_frame_equal(rs, xp) + def test_cast_internals(self): casted = DataFrame(self.frame._data, dtype=int) expected = DataFrame(self.frame._series, dtype=int) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 86cd0ef524b35..ae71ec8b35422 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -772,6 +772,19 @@ def test_dups_fancy_indexing(self): expected = Index(['b','a','a']) self.assert_(result.equals(expected)) + # across dtypes + df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('aaaaaaa')) + df.head() + str(df) + result = DataFrame([[1,2,1.,2.,3.,'foo','bar']]) + result.columns = list('aaaaaaa') + + df_v = df.iloc[:,4] + res_v = result.iloc[:,4] + + assert_frame_equal(df,result) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index eec5f5632d36b..e25bd0de769a7 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -268,7 +268,7 @@ def test_duplicate_item_failure(self): b.ref_items = items mgr = BlockManager(blocks, [items, np.arange(N)]) - self.assertRaises(Exception, mgr.iget, 1) + mgr.iget(1) def test_contains(self): self.assert_('a' in self.mgr)
- Non-unique index support clarified #3092 - Fix assigning a new index to a duplicate index in a DataFrame would fail #3468 - Fix construction of a DataFrame with a duplicate index - ref_locs support to allow duplicative indices across dtypes, allows iget support to always find the index (even across dtypes) #2194 - applymap on a DataFrame with a non-unique index now works (removed warning) #2786, and fix #3230 - Fix to_csv to handle non-unique columns #3495 - Modification to cache_readonly to allow you to pass an argument (allow_setting), to 'set' this value (useful in order to avoid a computation you know to be true, e.g. is_unique = True for a default index partially fixes #3468 This would previously raise (same dtype assignment to a non-multi dtype frame with dup indicies) ``` In [6]: df = DataFrame([[1,2]], columns=['a','a']) In [7]: df.columns = ['a','a.1'] In [8]: df Out[8]: a a.1 0 1 2 ``` construction of a multi-dtype frame with a dup index (#2194) is fixed ``` In [18]: DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('aaaaaaa')) Out[18]: a a a a a a a 0 1 2 1 2 3 foo bar ``` This was also previously would raise ``` In [3]: df_float = DataFrame(np.random.randn(10, 3),dtype='float64') In [4]: df_int = DataFrame(np.random.randn(10, 3),dtype='int64') In [5]: df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns) In [6]: df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns) In [7]: df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns) In [9]: df = pan.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1) In [14]: cols = [] In [15]: for i in range(5): ....: cols.extend([0,1,2]) ....: In [16]: df.columns = cols In [17]: df Out[17]: 0 1 2 0 1 2 0 1 2 0 1 2 0 1 2 0 0.586610 0.369944 1.341337 1 1 1 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 1 -1.944284 -0.813987 0.061306 0 0 1 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 2 -1.688694 1.644802 0.659083 0 0 0 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 3 1.422893 0.712382 0.749263 -1 0 -1 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 4 -0.453802 0.228886 -0.339753 2 0 -2 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 5 -0.189643 1.309407 -0.386121 0 0 0 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 6 0.455658 0.822050 -0.741014 0 0 0 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 7 -0.484678 -1.089146 0.774849 0 1 0 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 8 0.720365 1.696400 -0.604040 -1 0 0 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 9 -0.344480 0.886489 0.274428 1 0 0 True True True foo foo foo 2001-01-01 00:00:00 2001-01-01 00:00:00 2001-01-01 00:00:00 ``` For those of you interested.....here is the new ref_loc indexer for duplicate columns its by necessity a block oriented indexer, returns the column map (by column number) to a tuple of the block and the index in the block, only created when needed (e.g. when trying to get a column via iget and the index is non-unique, and the results are cached), this is #3092 ``` In [1]: df = pd.DataFrame(np.random.randn(8,4),columns=['a']*4) In [2]: df._data.blocks Out[2]: [FloatBlock: [a, a, a, a], 4 x 8, dtype float64] In [3]: df._data.blocks[0]._ref_locs In [4]: df._data._set_ref_locs() Out[4]: array([(FloatBlock: [a, a, a, a], 4 x 8, dtype float64, 0), (FloatBlock: [a, a, a, a], 4 x 8, dtype float64, 1), (FloatBlock: [a, a, a, a], 4 x 8, dtype float64, 2), (FloatBlock: [a, a, a, a], 4 x 8, dtype float64, 3)], dtype=object) ``` Fixed the #2786, #3230 bug that caused applymap to not work (we temp worked around by raising a ValueError; removed that check) ``` n [3]: In [3]: df = pd.DataFrame(np.random.random((3,4))) In [4]: In [4]: cols = pd.Index(['a','a','a','a']) In [5]: In [5]: df.columns = cols In [6]: In [6]: df.applymap(str) Out[6]: a a a a 0 0.494204195164 0.534601503195 0.471870025143 0.880092879641 1 0.860369768954 0.0472931994392 0.775532754792 0.822046777859 2 0.478775855962 0.623584943227 0.932012693593 0.739502590395 ``` Finally, to_csv writing has been fixed to use a single column mapper (which is derived from the ref_locs if the index is non-unique or the column numbering if it is unique)
https://api.github.com/repos/pandas-dev/pandas/pulls/3509
2013-05-02T01:14:01Z
2013-05-02T14:51:57Z
2013-05-02T14:51:57Z
2014-06-13T16:52:03Z
ENH: Support reading from S3
diff --git a/README.rst b/README.rst index c9b70f07b0862..ea713006c7189 100644 --- a/README.rst +++ b/README.rst @@ -90,6 +90,7 @@ Optional dependencies * openpyxl version 1.6.1 or higher, for writing .xlsx files * xlrd >= 0.9.0 * Needed for Excel I/O + * `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access. Installation from sources diff --git a/RELEASE.rst b/RELEASE.rst index 77e8e85db6a76..0f52babf26ff0 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -32,6 +32,7 @@ pandas 0.11.1 - pd.read_html() can now parse HTML string, files or urls and return dataframes courtesy of @cpcloud. (GH3477_) + - Support for reading Amazon S3 files. (GH3504_) **Improvements to existing features** diff --git a/doc/source/io.rst b/doc/source/io.rst index 9001ae393d552..8da3d422c50be 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -40,8 +40,9 @@ for some advanced strategies They can take a number of arguments: - - ``filepath_or_buffer``: Either a string path to a file, or any object with a - ``read`` method (such as an open file or ``StringIO``). + - ``filepath_or_buffer``: Either a string path to a file, url + (including http, ftp, and s3 locations), or any object with a ``read`` + method (such as an open file or ``StringIO``). - ``sep`` or ``delimiter``: A delimiter / separator to split fields on. `read_csv` is capable of inferring the delimiter automatically in some cases by "sniffing." The separator may be specified as a regular diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 161e7a521b997..1430843998843 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -34,7 +34,7 @@ class DateConversionError(Exception): Parameters ---------- filepath_or_buffer : string or file handle / StringIO. The string could be - a URL. Valid URL schemes include http, ftp, and file. For file URLs, a host + a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. For instance, a local file could be file ://localhost/path/to/table.csv %s @@ -188,6 +188,12 @@ def _is_url(url): except: return False +def _is_s3_url(url): + """ Check for an s3 url """ + try: + return urlparse.urlparse(url).scheme == 's3' + except: + return False def _read(filepath_or_buffer, kwds): "Generic reader of line files." @@ -196,17 +202,32 @@ def _read(filepath_or_buffer, kwds): if skipfooter is not None: kwds['skip_footer'] = skipfooter - if isinstance(filepath_or_buffer, basestring) and _is_url(filepath_or_buffer): - from urllib2 import urlopen - filepath_or_buffer = urlopen(filepath_or_buffer) - if py3compat.PY3: # pragma: no cover - if encoding: - errors = 'strict' - else: - errors = 'replace' - encoding = 'utf-8' - bytes = filepath_or_buffer.read() - filepath_or_buffer = StringIO(bytes.decode(encoding, errors)) + if isinstance(filepath_or_buffer, basestring): + if _is_url(filepath_or_buffer): + from urllib2 import urlopen + filepath_or_buffer = urlopen(filepath_or_buffer) + if py3compat.PY3: # pragma: no cover + if encoding: + errors = 'strict' + else: + errors = 'replace' + encoding = 'utf-8' + bytes = filepath_or_buffer.read() + filepath_or_buffer = StringIO(bytes.decode(encoding, errors)) + + if _is_s3_url(filepath_or_buffer): + try: + import boto + except: + raise ImportError("boto is required to handle s3 files") + # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY + # are environment variables + parsed_url = urlparse.urlparse(filepath_or_buffer) + conn = boto.connect_s3() + b = conn.get_bucket(parsed_url.netloc) + k = boto.s3.key.Key(b) + k.key = parsed_url.path + filepath_or_buffer = StringIO(k.get_contents_as_string()) if kwds.get('date_parser', None) is not None: if isinstance(kwds['parse_dates'], bool):
Tests not written. Will need a way to mock boto.
https://api.github.com/repos/pandas-dev/pandas/pulls/3504
2013-05-01T17:43:15Z
2013-05-10T14:33:46Z
2013-05-10T14:33:46Z
2014-06-17T08:29:03Z
BUG: GH3480 Fix regression in a DataFrame apply with axis=1
diff --git a/RELEASE.rst b/RELEASE.rst index fbf8c28cffdea..eecf32e8de21e 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -58,10 +58,13 @@ pandas 0.11.1 - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_) - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_) - DataFrames fetched via FRED now handle '.' as a NaN. (GH3469_) + - Fix regression in a DataFrame apply with axis=1, objects were not being converted back + to base dtypes correctly (GH3480_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 +.. _GH3480: https://github.com/pydata/pandas/issues/3480 .. _GH3454: https://github.com/pydata/pandas/issues/3454 .. _GH3457: https://github.com/pydata/pandas/issues/3457 .. _GH3426: https://github.com/pydata/pandas/issues/3426 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 977dc9e2b56ff..2cb7608c7aba6 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4210,7 +4210,7 @@ def _apply_standard(self, func, axis, ignore_failures=False): if axis == 1: result = result.T - result = result.convert_objects(convert_dates=False) + result = result.convert_objects() return result else: diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 4604678d58d5a..0b4dbed0b685d 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -1826,6 +1826,13 @@ def convert_force_pure(x): self.assert_(result.dtype == np.object_) self.assert_(isinstance(result[0], Decimal)) + def test_apply_with_mixed_dtype(self): + # GH3480, apply with mixed dtype on axis=1 breaks in 0.11 + df = DataFrame({'foo1' : ['one', 'two', 'two', 'three', 'one', 'two'], + 'foo2' : np.random.randn(6)}) + result = df.apply(lambda x: x, axis=1) + assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts()) + def test_groupby_list_infer_array_like(self): result = self.df.groupby(list(self.df['A'])).mean() expected = self.df.groupby(self.df['A']).mean()
objects were not being converted back to base dtypes correctly closes #3480
https://api.github.com/repos/pandas-dev/pandas/pulls/3502
2013-05-01T14:06:46Z
2013-05-01T14:26:50Z
2013-05-01T14:26:50Z
2014-06-16T21:06:53Z
My first PR ever. Adding documentation to Cookbook.
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 338963abd24e3..bc665cd5dba69 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -1,343 +1,348 @@ -.. _cookbook: - -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - import random - import os - np.random.seed(123456) - from pandas import * - import pandas as pd - randn = np.random.randn - randint = np.random.randint - np.set_printoptions(precision=4, suppress=True) - -******** -Cookbook -******** - -This is a respository for *short and sweet* examples and links for useful pandas recipes. -We encourage users to add to this documentation. - -This is a great *First Pull Request* (to add interesting links and/or put short code inline -for existing links) - -.. _cookbook.selection: - -Selection ---------- - -The :ref:`indexing <indexing>` docs. - -`Boolean Rows Indexing -<http://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing>`__ - -`Using loc and iloc in selections -<https://github.com/pydata/pandas/issues/2904>`__ - -`Extending a panel along the minor axis -<http://stackoverflow.com/questions/15364050/extending-a-pandas-panel-frame-along-the-minor-axis>`__ - -`Boolean masking in a panel -<http://stackoverflow.com/questions/14650341/boolean-mask-in-pandas-panel>`__ - -`Selecting via the complement -<http://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas>`__ - -.. _cookbook.multi_index: - -MultiIndexing -------------- - -The :ref:`multindexing <indexing.hierarchical>` docs. - -`Creating a multi-index from a labeled frame -<http://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels>`__ - -Slicing -~~~~~~~ - -`Slicing a multi-index with xs -<http://stackoverflow.com/questions/12590131/how-to-slice-multindex-columns-in-pandas-dataframes>`__ - -`Slicing a multi-index with xs #2 -<http://stackoverflow.com/questions/14964493/multiindex-based-indexing-in-pandas>`__ - -Sorting -~~~~~~~ - -`Multi-index sorting -<http://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas>`__ - -`Partial Selection, the need for sortedness -<https://github.com/pydata/pandas/issues/2995>`__ - -Levels -~~~~~~ - -`Prepending a level to a multiindex -<http://stackoverflow.com/questions/14744068/prepend-a-level-to-a-pandas-multiindex>`__ - -`Flatten Hierarchical columns -<http://stackoverflow.com/questions/14507794/python-pandas-how-to-flatten-a-hierarchical-index-in-columns>`__ - -.. _cookbook.grouping: - -Grouping --------- - -The :ref:`grouping <groupby>` docs. - -`Basic grouping with apply -<http://stackoverflow.com/questions/15322632/python-pandas-df-groupy-agg-column-reference-in-agg>`__ - -`Using get_group -<http://stackoverflow.com/questions/14734533/how-to-access-pandas-groupby-dataframe-by-key>`__ - -`Apply to different items in a group -<http://stackoverflow.com/questions/15262134/apply-different-functions-to-different-items-in-group-object-python-pandas>`__ - -`Expanding Apply -<http://stackoverflow.com/questions/14542145/reductions-down-a-column-in-pandas>`__ - -`Replacing values with groupby means -<http://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means>`__ - -`Sort by group with aggregation -<http://stackoverflow.com/questions/14941366/pandas-sort-by-group-aggregate-and-column>`__ - -`Create multiple aggregated columns -<http://stackoverflow.com/questions/14897100/create-multiple-columns-in-pandas-aggregation-function>`__ - -Expanding Data -~~~~~~~~~~~~~~ - -`Alignment and to-date -<http://stackoverflow.com/questions/15489011/python-time-series-alignment-and-to-date-functions>`__ - -`Rolling Computation window based on values instead of counts -<http://stackoverflow.com/questions/14300768/pandas-rolling-computation-with-window-based-on-values-instead-of-counts>`__ - -`Rolling Mean by Time Interval -<http://stackoverflow.com/questions/15771472/pandas-rolling-mean-by-time-interval>`__ - -Splitting -~~~~~~~~~ - -`Splitting a frame -<http://stackoverflow.com/questions/13353233/best-way-to-split-a-dataframe-given-an-edge/15449992#15449992>`__ - -.. _cookbook.pivot: - -Pivot -~~~~~ -The :ref:`Pivot <reshaping.pivot>` docs. - -`Partial sums and subtotals -<http://stackoverflow.com/questions/15570099/pandas-pivot-tables-row-subtotals/15574875#15574875>`__ - -`Frequency table like plyr in R -<http://stackoverflow.com/questions/15589354/frequency-tables-in-pandas-like-plyr-in-r>`__ - -Timeseries ----------- - -`Between times -<http://stackoverflow.com/questions/14539992/pandas-drop-rows-outside-of-time-range>`__ - -`Vectorized Lookup -<http://stackoverflow.com/questions/13893227/vectorized-look-up-of-values-in-pandas-dataframe>`__ - -Turn a matrix with hours in columns and days in rows into a continous row sequence in the form of a time series. -`How to rearrange a python pandas dataframe? -<http://stackoverflow.com/questions/15432659/how-to-rearrange-a-python-pandas-dataframe>`__ - -.. _cookbook.resample: - -Resampling -~~~~~~~~~~ - -The :ref:`Resample <timeseries.resampling>` docs. - -`TimeGrouping of values grouped across time -<http://stackoverflow.com/questions/15297053/how-can-i-divide-single-values-of-a-dataframe-by-monthly-averages>`__ - -`TimeGrouping #2 -<http://stackoverflow.com/questions/14569223/timegrouper-pandas>`__ - -`Resampling with custom periods -<http://stackoverflow.com/questions/15408156/resampling-with-custom-periods>`__ - -`Resample intraday frame without adding new days -<http://stackoverflow.com/questions/14898574/resample-intrday-pandas-dataframe-without-add-new-days>`__ - -`Resample minute data -<http://stackoverflow.com/questions/14861023/resampling-minute-data>`__ - -.. _cookbook.merge: - -Merge ------ - -The :ref:`Concat <merging.concatenation>` docs. The :ref:`Join <merging.join>` docs. - -`emulate R rbind -<http://stackoverflow.com/questions/14988480/pandas-version-of-rbind>`__ - -`Self Join -<https://github.com/pydata/pandas/issues/2996>`__ - -`How to set the index and join -<http://stackoverflow.com/questions/14341805/pandas-merge-pd-merge-how-to-set-the-index-and-join>`__ - -`KDB like asof join -<http://stackoverflow.com/questions/12322289/kdb-like-asof-join-for-timeseries-data-in-pandas/12336039#12336039>`__ - -`Join with a criteria based on the values -<http://stackoverflow.com/questions/15581829/how-to-perform-an-inner-or-outer-join-of-dataframes-with-pandas-on-non-simplisti>`__ - -.. _cookbook.plotting: - -Plotting --------- - -The :ref:`Plotting <visualization>` docs. - -`Make Matplotlib look like R -<http://stackoverflow.com/questions/14349055/making-matplotlib-graphs-look-like-r-by-default>`__ - -`Setting x-axis major and minor labels -<http://stackoverflow.com/questions/12945971/pandas-timeseries-plot-setting-x-axis-major-and-minor-ticks-and-labels>`__ - -Data In/Out ------------ - -.. _cookbook.csv: - -CSV -~~~ - -The :ref:`CSV <io.read_csv_table>` docs - -`read_csv in action -<http://wesmckinney.com/blog/?p=635>`__ - -`Reading a csv chunk-by-chunk -<http://stackoverflow.com/questions/11622652/large-persistent-dataframe-in-pandas/12193309#12193309>`__ - -`Reading the first few lines of a frame -<http://stackoverflow.com/questions/15008970/way-to-read-first-few-lines-for-pandas-dataframe>`__ - -`Inferring dtypes from a file -<http://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize>`__ - -`Dealing with bad lines -<https://github.com/pydata/pandas/issues/2886>`__ - -.. _cookbook.sql: - -SQL -~~~ - -The :ref:`SQL <io.sql>` docs - -`Reading from databases with SQL -<http://stackoverflow.com/questions/10065051/python-pandas-and-databases-like-mysql>`__ - -.. _cookbook.excel: - -Excel -~~~~~ - -The :ref:`Excel <io.excel>` docs - -`Reading from a filelike handle -<http://stackoverflow.com/questions/15588713/sheets-of-excel-workbook-from-a-url-into-a-pandas-dataframe>`__ - -.. _cookbook.hdf: - -HDFStore -~~~~~~~~ - -The :ref:`HDFStores <io.hdf5>` docs - -`Simple Queries with a Timestamp Index -<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__ - -`Managing heteregenous data using a linked multiple table hierarchy -<https://github.com/pydata/pandas/issues/3032>`__ - -`Merging on-disk tables with millions of rows -<http://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925>`__ - -Deduplicating a large store by chunks, essentially a recusive reduction operation. Shows a function for taking in data from -csv file and creating a store by chunks, with date parsing as well. -`See here -<http://stackoverflow.com/questions/16110252/need-to-compare-very-large-files-around-1-5gb-in-python/16110391#16110391>`__ - -`Large Data work flows -<http://stackoverflow.com/questions/14262433/large-data-work-flows-using-pandas>`__ - -`Groupby on a HDFStore -<http://stackoverflow.com/questions/15798209/pandas-group-by-query-on-large-data-in-hdfstore>`__ - -`Troubleshoot HDFStore exceptions -<http://stackoverflow.com/questions/15488809/how-to-trouble-shoot-hdfstore-exception-cannot-find-the-correct-atom-type>`__ - -`Setting min_itemsize with strings -<http://stackoverflow.com/questions/15988871/hdfstore-appendstring-dataframe-fails-when-string-column-contents-are-longer>`__ - -Storing Attributes to a group node - -.. ipython:: python - - df = DataFrame(np.random.randn(8,3)) - store = HDFStore('test.h5') - store.put('df',df) - - # you can store an arbitrary python object via pickle - store.get_storer('df').attrs.my_attribute = dict(A = 10) - store.get_storer('df').attrs.my_attribute - -.. ipython:: python - :suppress: - - store.close() - os.remove('test.h5') - -Miscellaneous -------------- - -The :ref:`Timedeltas <timeseries.timedeltas>` docs. - -`Operating with timedeltas -<https://github.com/pydata/pandas/pull/2899>`__ - -`Create timedeltas with date differences -<http://stackoverflow.com/questions/15683588/iterating-through-a-pandas-dataframe>`__ - -Aliasing Axis Names -------------------- - -To globally provide aliases for axis names, one can define these 2 functions: - -.. ipython:: python - - def set_axis_alias(cls, axis, alias): - if axis not in cls._AXIS_NUMBERS: - raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias)) - cls._AXIS_ALIASES[alias] = axis - - def clear_axis_alias(cls, axis, alias): - if axis not in cls._AXIS_NUMBERS: - raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias)) - cls._AXIS_ALIASES.pop(alias,None) - - - set_axis_alias(DataFrame,'columns', 'myaxis2') - df2 = DataFrame(randn(3,2),columns=['c1','c2'],index=['i1','i2','i3']) - df2.sum(axis='myaxis2') - clear_axis_alias(DataFrame,'columns', 'myaxis2') +.. _cookbook: + +.. currentmodule:: pandas + +.. ipython:: python + :suppress: + + import numpy as np + import random + import os + np.random.seed(123456) + from pandas import * + import pandas as pd + randn = np.random.randn + randint = np.random.randint + np.set_printoptions(precision=4, suppress=True) + +******** +Cookbook +******** + +This is a respository for *short and sweet* examples and links for useful pandas recipes. +We encourage users to add to this documentation. + +This is a great *First Pull Request* (to add interesting links and/or put short code inline +for existing links) + +.. _cookbook.selection: + +Selection +--------- + +The :ref:`indexing <indexing>` docs. + +`Boolean Rows Indexing +<http://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing>`__ + Indexing using both row labels and conditionals + +`Using loc and iloc in selections +<https://github.com/pydata/pandas/issues/2904>`__ + Use loc for label-oriented slicing and iloc positional slicing + +`Extending a panel along the minor axis +<http://stackoverflow.com/questions/15364050/extending-a-pandas-panel-frame-along-the-minor-axis>`__ + Extend a panel frame by transposing, adding a new dimension, and transposing back to the original dimensions + +`Boolean masking in a panel +<http://stackoverflow.com/questions/14650341/boolean-mask-in-pandas-panel>`__ + Mask a panel by using ``np.where`` and then reconstructing the panel with the new masked values + +`Selecting via the complement +<http://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas>`__ + ``~`` can be used to take the complement of a boolean array + +.. _cookbook.multi_index: + +MultiIndexing +------------- + +The :ref:`multindexing <indexing.hierarchical>` docs. + +`Creating a multi-index from a labeled frame +<http://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels>`__ + +Slicing +~~~~~~~ + +`Slicing a multi-index with xs +<http://stackoverflow.com/questions/12590131/how-to-slice-multindex-columns-in-pandas-dataframes>`__ + +`Slicing a multi-index with xs #2 +<http://stackoverflow.com/questions/14964493/multiindex-based-indexing-in-pandas>`__ + +Sorting +~~~~~~~ + +`Multi-index sorting +<http://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas>`__ + +`Partial Selection, the need for sortedness +<https://github.com/pydata/pandas/issues/2995>`__ + +Levels +~~~~~~ + +`Prepending a level to a multiindex +<http://stackoverflow.com/questions/14744068/prepend-a-level-to-a-pandas-multiindex>`__ + +`Flatten Hierarchical columns +<http://stackoverflow.com/questions/14507794/python-pandas-how-to-flatten-a-hierarchical-index-in-columns>`__ + +.. _cookbook.grouping: + +Grouping +-------- + +The :ref:`grouping <groupby>` docs. + +`Basic grouping with apply +<http://stackoverflow.com/questions/15322632/python-pandas-df-groupy-agg-column-reference-in-agg>`__ + +`Using get_group +<http://stackoverflow.com/questions/14734533/how-to-access-pandas-groupby-dataframe-by-key>`__ + +`Apply to different items in a group +<http://stackoverflow.com/questions/15262134/apply-different-functions-to-different-items-in-group-object-python-pandas>`__ + +`Expanding Apply +<http://stackoverflow.com/questions/14542145/reductions-down-a-column-in-pandas>`__ + +`Replacing values with groupby means +<http://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means>`__ + +`Sort by group with aggregation +<http://stackoverflow.com/questions/14941366/pandas-sort-by-group-aggregate-and-column>`__ + +`Create multiple aggregated columns +<http://stackoverflow.com/questions/14897100/create-multiple-columns-in-pandas-aggregation-function>`__ + +Expanding Data +~~~~~~~~~~~~~~ + +`Alignment and to-date +<http://stackoverflow.com/questions/15489011/python-time-series-alignment-and-to-date-functions>`__ + +`Rolling Computation window based on values instead of counts +<http://stackoverflow.com/questions/14300768/pandas-rolling-computation-with-window-based-on-values-instead-of-counts>`__ + +`Rolling Mean by Time Interval +<http://stackoverflow.com/questions/15771472/pandas-rolling-mean-by-time-interval>`__ + +Splitting +~~~~~~~~~ + +`Splitting a frame +<http://stackoverflow.com/questions/13353233/best-way-to-split-a-dataframe-given-an-edge/15449992#15449992>`__ + +.. _cookbook.pivot: + +Pivot +~~~~~ +The :ref:`Pivot <reshaping.pivot>` docs. + +`Partial sums and subtotals +<http://stackoverflow.com/questions/15570099/pandas-pivot-tables-row-subtotals/15574875#15574875>`__ + +`Frequency table like plyr in R +<http://stackoverflow.com/questions/15589354/frequency-tables-in-pandas-like-plyr-in-r>`__ + +Timeseries +---------- + +`Between times +<http://stackoverflow.com/questions/14539992/pandas-drop-rows-outside-of-time-range>`__ + +`Vectorized Lookup +<http://stackoverflow.com/questions/13893227/vectorized-look-up-of-values-in-pandas-dataframe>`__ + +Turn a matrix with hours in columns and days in rows into a continous row sequence in the form of a time series. +`How to rearrange a python pandas dataframe? +<http://stackoverflow.com/questions/15432659/how-to-rearrange-a-python-pandas-dataframe>`__ + +.. _cookbook.resample: + +Resampling +~~~~~~~~~~ + +The :ref:`Resample <timeseries.resampling>` docs. + +`TimeGrouping of values grouped across time +<http://stackoverflow.com/questions/15297053/how-can-i-divide-single-values-of-a-dataframe-by-monthly-averages>`__ + +`TimeGrouping #2 +<http://stackoverflow.com/questions/14569223/timegrouper-pandas>`__ + +`Resampling with custom periods +<http://stackoverflow.com/questions/15408156/resampling-with-custom-periods>`__ + +`Resample intraday frame without adding new days +<http://stackoverflow.com/questions/14898574/resample-intrday-pandas-dataframe-without-add-new-days>`__ + +`Resample minute data +<http://stackoverflow.com/questions/14861023/resampling-minute-data>`__ + +.. _cookbook.merge: + +Merge +----- + +The :ref:`Concat <merging.concatenation>` docs. The :ref:`Join <merging.join>` docs. + +`emulate R rbind +<http://stackoverflow.com/questions/14988480/pandas-version-of-rbind>`__ + +`Self Join +<https://github.com/pydata/pandas/issues/2996>`__ + +`How to set the index and join +<http://stackoverflow.com/questions/14341805/pandas-merge-pd-merge-how-to-set-the-index-and-join>`__ + +`KDB like asof join +<http://stackoverflow.com/questions/12322289/kdb-like-asof-join-for-timeseries-data-in-pandas/12336039#12336039>`__ + +`Join with a criteria based on the values +<http://stackoverflow.com/questions/15581829/how-to-perform-an-inner-or-outer-join-of-dataframes-with-pandas-on-non-simplisti>`__ + +.. _cookbook.plotting: + +Plotting +-------- + +The :ref:`Plotting <visualization>` docs. + +`Make Matplotlib look like R +<http://stackoverflow.com/questions/14349055/making-matplotlib-graphs-look-like-r-by-default>`__ + +`Setting x-axis major and minor labels +<http://stackoverflow.com/questions/12945971/pandas-timeseries-plot-setting-x-axis-major-and-minor-ticks-and-labels>`__ + +Data In/Out +----------- + +.. _cookbook.csv: + +CSV +~~~ + +The :ref:`CSV <io.read_csv_table>` docs + +`read_csv in action +<http://wesmckinney.com/blog/?p=635>`__ + +`Reading a csv chunk-by-chunk +<http://stackoverflow.com/questions/11622652/large-persistent-dataframe-in-pandas/12193309#12193309>`__ + +`Reading the first few lines of a frame +<http://stackoverflow.com/questions/15008970/way-to-read-first-few-lines-for-pandas-dataframe>`__ + +`Inferring dtypes from a file +<http://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize>`__ + +`Dealing with bad lines +<https://github.com/pydata/pandas/issues/2886>`__ + +.. _cookbook.sql: + +SQL +~~~ + +The :ref:`SQL <io.sql>` docs + +`Reading from databases with SQL +<http://stackoverflow.com/questions/10065051/python-pandas-and-databases-like-mysql>`__ + +.. _cookbook.excel: + +Excel +~~~~~ + +The :ref:`Excel <io.excel>` docs + +`Reading from a filelike handle +<http://stackoverflow.com/questions/15588713/sheets-of-excel-workbook-from-a-url-into-a-pandas-dataframe>`__ + +.. _cookbook.hdf: + +HDFStore +~~~~~~~~ + +The :ref:`HDFStores <io.hdf5>` docs + +`Simple Queries with a Timestamp Index +<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__ + +`Managing heteregenous data using a linked multiple table hierarchy +<https://github.com/pydata/pandas/issues/3032>`__ + +`Merging on-disk tables with millions of rows +<http://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925>`__ + +Deduplicating a large store by chunks, essentially a recusive reduction operation. Shows a function for taking in data from +csv file and creating a store by chunks, with date parsing as well. +`See here +<http://stackoverflow.com/questions/16110252/need-to-compare-very-large-files-around-1-5gb-in-python/16110391#16110391>`__ + +`Large Data work flows +<http://stackoverflow.com/questions/14262433/large-data-work-flows-using-pandas>`__ + +`Groupby on a HDFStore +<http://stackoverflow.com/questions/15798209/pandas-group-by-query-on-large-data-in-hdfstore>`__ + +`Troubleshoot HDFStore exceptions +<http://stackoverflow.com/questions/15488809/how-to-trouble-shoot-hdfstore-exception-cannot-find-the-correct-atom-type>`__ + +`Setting min_itemsize with strings +<http://stackoverflow.com/questions/15988871/hdfstore-appendstring-dataframe-fails-when-string-column-contents-are-longer>`__ + +Storing Attributes to a group node + +.. ipython:: python + + df = DataFrame(np.random.randn(8,3)) + store = HDFStore('test.h5') + store.put('df',df) + + # you can store an arbitrary python object via pickle + store.get_storer('df').attrs.my_attribute = dict(A = 10) + store.get_storer('df').attrs.my_attribute + +.. ipython:: python + :suppress: + + store.close() + os.remove('test.h5') + +Miscellaneous +------------- + +The :ref:`Timedeltas <timeseries.timedeltas>` docs. + +`Operating with timedeltas +<https://github.com/pydata/pandas/pull/2899>`__ + +`Create timedeltas with date differences +<http://stackoverflow.com/questions/15683588/iterating-through-a-pandas-dataframe>`__ + +Aliasing Axis Names +------------------- + +To globally provide aliases for axis names, one can define these 2 functions: + +.. ipython:: python + + def set_axis_alias(cls, axis, alias): + if axis not in cls._AXIS_NUMBERS: + raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias)) + cls._AXIS_ALIASES[alias] = axis + + def clear_axis_alias(cls, axis, alias): + if axis not in cls._AXIS_NUMBERS: + raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias)) + cls._AXIS_ALIASES.pop(alias,None) + + + set_axis_alias(DataFrame,'columns', 'myaxis2') + df2 = DataFrame(randn(3,2),columns=['c1','c2'],index=['i1','i2','i3']) + df2.sum(axis='myaxis2') + clear_axis_alias(DataFrame,'columns', 'myaxis2')
Just adding some more description to Cookbook documentation per J. Reback's request. If I didn't mess up anything here, I will go through the rest of the items and add some description. Please let me know if I messed anything up.
https://api.github.com/repos/pandas-dev/pandas/pulls/3500
2013-05-01T04:03:03Z
2013-05-10T10:38:12Z
null
2013-05-10T18:02:59Z
BUG: GH3416 properly convert np.datetime64 objects in the _possibily_convert_datetimes
diff --git a/RELEASE.rst b/RELEASE.rst index fbf8c28cffdea..97f2446e92013 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -58,8 +58,10 @@ pandas 0.11.1 - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_) - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_) - DataFrames fetched via FRED now handle '.' as a NaN. (GH3469_) + - Properly convert np.datetime64 objects in a Series (GH3416_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 +.. _GH3416: https://github.com/pydata/pandas/issues/3416 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3454: https://github.com/pydata/pandas/issues/3454 diff --git a/pandas/core/common.py b/pandas/core/common.py index e6ce9fc5fc925..60d8d3cf28a3d 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1119,12 +1119,12 @@ def _possibly_cast_to_datetime(value, dtype, coerce = False): v = [ v ] if len(v): inferred_type = lib.infer_dtype(v) - if inferred_type == 'datetime': + if inferred_type in ['datetime','datetime64']: try: value = tslib.array_to_datetime(np.array(v)) except: pass - elif inferred_type == 'timedelta': + elif inferred_type in ['timedelta','timedelta64']: value = _possibly_cast_to_timedelta(value) return value diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 4845ae5258892..3918cad4e606a 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -469,6 +469,20 @@ def test_constructor_dtype_datetime64(self): self.assert_(isnull(s[1]) == True) self.assert_(s.dtype == 'M8[ns]') + # GH3416 + import pdb; pdb.set_trace() + dates = [ + np.datetime64(datetime(2013, 1, 1)), + np.datetime64(datetime(2013, 1, 2)), + np.datetime64(datetime(2013, 1, 3)), + ] + + s = Series(dates) + self.assert_(s.dtype == 'M8[ns]') + + s.ix[0] = np.nan + self.assert_(s.dtype == 'M8[ns]') + def test_constructor_dict(self): d = {'a': 0., 'b': 1., 'c': 2.} result = Series(d, index=['b', 'c', 'd', 'a'])
closes #3416
https://api.github.com/repos/pandas-dev/pandas/pulls/3496
2013-04-30T19:51:34Z
2013-04-30T20:22:09Z
null
2014-06-29T16:39:27Z
BUG: GH3493 fix Cannot append DataFrames with uint dtypes to HDFStore
diff --git a/RELEASE.rst b/RELEASE.rst index 49d576aacaff9..c4ca7dafc3610 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -58,6 +58,7 @@ pandas 0.12.0 - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_) - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_) - DataFrames fetched via FRED now handle '.' as a NaN. (GH3469_) + - Fix issue when storing uint dtypes in an HDFStore. (GH3493_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b9db30245eb1b..06ae9a7f7f11f 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1284,8 +1284,17 @@ def set_atom_string(self, block, existing_col, min_itemsize, nan_rep): def convert_string_data(self, data, itemsize): return data.astype('S%s' % itemsize) + def get_atom_coltype(self): + """ return the PyTables column class for this column """ + if self.kind.startswith('uint'): + col_name = "UInt%sCol" % self.kind[4:] + else: + col_name = "%sCol" % self.kind.capitalize() + + return getattr(_tables(), col_name) + def get_atom_data(self, block): - return getattr(_tables(), "%sCol" % self.kind.capitalize())(shape=block.shape[0]) + return self.get_atom_coltype()(shape=block.shape[0]) def set_atom_data(self, block): self.kind = block.dtype.name @@ -1383,7 +1392,7 @@ def get_atom_string(self, block, itemsize): return _tables().StringCol(itemsize=itemsize) def get_atom_data(self, block): - return getattr(_tables(), "%sCol" % self.kind.capitalize())() + return self.get_atom_coltype()() def get_atom_datetime64(self, block): return _tables().Int64Col() diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 1999789f206be..d7f497648236a 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -458,6 +458,21 @@ def test_append(self): store.append('df', df) tm.assert_frame_equal(store['df'], df) + # uints - test storage of uints + uint_data = DataFrame({'u08' : Series(np.random.random_integers(0, high=255, size=5), dtype=np.uint8), + 'u16' : Series(np.random.random_integers(0, high=65535, size=5), dtype=np.uint16), + 'u32' : Series(np.random.random_integers(0, high=2**30, size=5), dtype=np.uint32), + 'u64' : Series([2**58, 2**59, 2**60, 2**61, 2**62], dtype=np.uint64)}, + index=np.arange(5)) + _maybe_remove(store, 'uints') + store.append('uints', uint_data) + tm.assert_frame_equal(store['uints'], uint_data) + + # uints - test storage of uints in indexable columns + _maybe_remove(store, 'uints') + store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported + tm.assert_frame_equal(store['uints'], uint_data) + def test_append_some_nans(self): with ensure_clean(self.path) as store:
Fix for self-reported uint bug in HDFStore. #3493 Travis results (pass): https://travis-ci.org/jmellen/pandas/builds/6764554
https://api.github.com/repos/pandas-dev/pandas/pulls/3494
2013-04-30T17:56:04Z
2013-05-01T16:29:09Z
2013-05-01T16:29:09Z
2014-07-02T10:29:10Z
Failing Unit Test and Patch for Fixing MonthEnd DateRange Unions with Timezones
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 3bc801bd38695..025a12a17687e 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -363,7 +363,7 @@ class MonthEnd(DateOffset, CacheableOffset): """DateOffset of one month end""" def apply(self, other): - other = datetime(other.year, other.month, other.day) + other = datetime(other.year, other.month, other.day, tzinfo=other.tzinfo) n = self.n _, days_in_month = tslib.monthrange(other.year, other.month) diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index fa53222567800..22ed41f82506d 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -340,6 +340,22 @@ def test_range_tz(self): self.assert_(dr[0] == start) self.assert_(dr[2] == end) + def test_month_range_union_tz(self): + _skip_if_no_pytz() + from pytz import timezone + tz = timezone('US/Eastern') + + early_start = datetime(2011, 1, 1) + early_end = datetime(2011, 3, 1) + + late_start = datetime(2011, 3, 1) + late_end = datetime(2011, 5, 1) + + early_dr = date_range(start=early_start, end=early_end, tz=tz, freq=datetools.monthEnd) + late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=datetools.monthEnd) + + early_dr.union(late_dr) + if __name__ == '__main__': import nose
This patch preserves the timezone information when applying an offset to `DatetimeIndex` with a timezone applied. The `MonthEnd` Offset wasn't retaining the timezone information resulting in an error even when both indexes had timezones. ``` TypeError: can't compare offset-naive and offset-aware datetimes ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3491
2013-04-30T08:35:55Z
2013-05-05T10:53:33Z
null
2014-06-25T20:51:29Z
BUG: GH3468 Fix assigning a new index to a duplicate index in a DataFrame would fail
diff --git a/RELEASE.rst b/RELEASE.rst index f3fb98535cb61..38298fde12ff0 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -61,8 +61,21 @@ pandas 0.11.1 - Fix regression in a DataFrame apply with axis=1, objects were not being converted back to base dtypes correctly (GH3480_) - Fix issue when storing uint dtypes in an HDFStore. (GH3493_) + - Fix assigning a new index to a duplicate index in a DataFrame would fail (GH3468_) + - ref_locs support to allow duplicative indices across dtypes (GH3468_) + - Non-unique index support clarified (GH3468_) + + - Fix assigning a new index to a duplicate index in a DataFrame would fail + - Fix construction of a DataFrame with a duplicate index + - ref_locs support to allow duplicative indices across dtypes + (GH2194_) + - applymap on a DataFrame with a non-unique index now works + (removed warning) (GH2786_), and fix (GH3230_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 +.. _GH2786: https://github.com/pydata/pandas/issues/2786 +.. _GH2194: https://github.com/pydata/pandas/issues/2194 +.. _GH3230: https://github.com/pydata/pandas/issues/3230 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3480: https://github.com/pydata/pandas/issues/3480 @@ -75,6 +88,7 @@ pandas 0.11.1 .. _GH3455: https://github.com/pydata/pandas/issues/3455 .. _GH3457: https://github.com/pydata/pandas/issues/3457 .. _GH3461: https://github.com/pydata/pandas/issues/3461 +.. _GH3468: https://github.com/pydata/pandas/issues/3468 .. _GH3448: https://github.com/pydata/pandas/issues/3448 .. _GH3449: https://github.com/pydata/pandas/issues/3449 .. _GH3493: https://github.com/pydata/pandas/issues/3493 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2cb7608c7aba6..8bfdee3b75170 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4261,9 +4261,6 @@ def infer(x): if com.is_datetime64_dtype(x): x = lib.map_infer(x, lib.Timestamp) return lib.map_infer(x, func) - #GH2786 - if not self.columns.is_unique: - raise ValueError("applymap does not support dataframes having duplicate column labels") return self.apply(infer) #---------------------------------------------------------------------- diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 03cfd18f5afe5..c874b061dd63d 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -61,6 +61,7 @@ def ref_locs(self): if (indexer == -1).any(): raise AssertionError('Some block items were not in block ' 'ref_items') + self._ref_locs = indexer return self._ref_locs @@ -164,6 +165,9 @@ def get(self, item): loc = self.items.get_loc(item) return self.values[loc] + def iget(self, i): + return self.values[i] + def set(self, item, value): """ Modify Block in-place with new item value @@ -710,7 +714,7 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True): # attempt to create new type blocks blocks = [] for i, c in enumerate(self.items): - values = self.get(c) + values = self.iget(i) values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric) values = _block_shape(values) @@ -879,7 +883,7 @@ class BlockManager(object): ----- This is *not* a public API class """ - __slots__ = ['axes', 'blocks', '_known_consolidated', '_is_consolidated'] + __slots__ = ['axes', 'blocks', '_known_consolidated', '_is_consolidated', '_ref_locs'] def __init__(self, blocks, axes, do_integrity_check=True): self.axes = [_ensure_index(ax) for ax in axes] @@ -915,12 +919,82 @@ def set_axis(self, axis, value): if len(value) != len(cur_axis): raise Exception('Length mismatch (%d vs %d)' % (len(value), len(cur_axis))) + self.axes[axis] = value if axis == 0: + + # we have a non-unique index, so setup the ref_locs + if not cur_axis.is_unique: + self.set_ref_locs(cur_axis) + + # take via ref_locs for block in self.blocks: block.set_ref_items(self.items, maybe_rename=True) + def set_ref_locs(self, labels = None): + # if we have a non-unique index on this axis, set the indexers + # we need to set an absolute indexer for the blocks + # return the indexer if we are not unique + if labels is None: + labels = self.items + + if labels.is_unique: + return None + + #### THIS IS POTENTIALLY VERY SLOW ##### + + # if we are already computed, then we are done + rl = getattr(self,'_ref_locs',None) + if rl is not None: + return rl + + blocks = self.blocks + + # initialize + blockmap = dict() + for b in blocks: + arr = np.empty(len(b.items),dtype='int64') + arr.fill(-1) + b._ref_locs = arr + + # add this block to the blockmap for each + # of the items in the block + for item in b.items: + if item not in blockmap: + blockmap[item] = [] + blockmap[item].append(b) + + rl = np.empty(len(labels),dtype=object) + for i, item in enumerate(labels.values): + + try: + block = blockmap[item].pop(0) + except: + raise Exception("not enough items in set_ref_locs") + + indexer = np.arange(len(block.items)) + mask = (block.items == item) & (block._ref_locs == -1) + if not mask.any(): + + # this case will catch a comparison of a index of tuples + mask = np.empty(len(block.items),dtype=bool) + mask.fill(False) + for j, (bitem, brl) in enumerate(zip(block.items,block._ref_locs)): + mask[j] = bitem == item and brl == -1 + + indices = indexer[mask] + if len(indices): + idx = indices[0] + else: + raise Exception("already set too many items in set_ref_locs") + + block._ref_locs[idx] = i + rl[i] = (block,idx) + + self._ref_locs = rl + return rl + # make items read only for now def _get_items(self): return self.axes[0] @@ -1387,26 +1461,11 @@ def iget(self, i): item = self.items[i] if self.items.is_unique: return self.get(item) - else: - # ugh - try: - inds, = (self.items == item).nonzero() - except AttributeError: # MultiIndex - inds, = self.items.map(lambda x: x == item).nonzero() - - _, block = self._find_block(item) - - try: - binds, = (block.items == item).nonzero() - except AttributeError: # MultiIndex - binds, = block.items.map(lambda x: x == item).nonzero() - for j, (k, b) in enumerate(zip(inds, binds)): - if i == k: - return block.values[b] - - raise Exception('Cannot have duplicate column names ' - 'split across dtypes') + # compute the duplicative indexer if needed + ref_locs = self.set_ref_locs() + b, loc = ref_locs[i] + return b.values[loc] def get_scalar(self, tup): """ @@ -1582,6 +1641,8 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value): # keep track of what items aren't found anywhere mask = np.zeros(len(item_order), dtype=bool) + new_axes = [new_items] + self.axes[1:] + new_blocks = [] for blk in self.blocks: blk_indexer = blk.items.get_indexer(item_order) @@ -1605,7 +1666,7 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value): new_blocks.append(na_block) new_blocks = _consolidate(new_blocks, new_items) - return BlockManager(new_blocks, [new_items] + self.axes[1:]) + return BlockManager(new_blocks, new_axes) def reindex_items(self, new_items, copy=True, fill_value=np.nan): """ @@ -1619,6 +1680,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): # TODO: this part could be faster (!) new_items, indexer = self.items.reindex(new_items) + new_axes = [new_items] + self.axes[1:] # could have so me pathological (MultiIndex) issues here new_blocks = [] @@ -1643,7 +1705,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): new_blocks.append(na_block) new_blocks = _consolidate(new_blocks, new_items) - return BlockManager(new_blocks, [new_items] + self.axes[1:]) + return BlockManager(new_blocks, new_axes) def _make_na_block(self, items, ref_items, fill_value=np.nan): # TODO: infer dtypes other than float64 from fill_value @@ -1685,11 +1747,11 @@ def merge(self, other, lsuffix=None, rsuffix=None): this, other = self._maybe_rename_join(other, lsuffix, rsuffix) cons_items = this.items + other.items - consolidated = _consolidate(this.blocks + other.blocks, cons_items) - new_axes = list(this.axes) new_axes[0] = cons_items + consolidated = _consolidate(this.blocks + other.blocks, cons_items) + return BlockManager(consolidated, new_axes) def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True): @@ -1902,7 +1964,6 @@ def form_blocks(arrays, names, axes): na_block = make_block(block_values, extra_items, items) blocks.append(na_block) - blocks = _consolidate(blocks, items) return blocks @@ -1953,9 +2014,6 @@ def _shape_compat(x): names, arrays = zip(*tuples) - # index may box values - items = ref_items[ref_items.isin(names)] - first = arrays[0] shape = (len(arrays),) + _shape_compat(first) @@ -1963,6 +2021,14 @@ def _shape_compat(x): for i, arr in enumerate(arrays): stacked[i] = _asarray_compat(arr) + # index may box values + if ref_items.is_unique: + items = ref_items[ref_items.isin(names)] + else: + items = _ensure_index([ n for n in names if n in ref_items ]) + if len(items) != len(stacked): + raise Exception("invalid names passed _stack_arrays") + return items, stacked diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7bafed216b9b9..cb3799c28d0cf 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7492,12 +7492,15 @@ def test_applymap(self): self.assert_(result.dtypes[0] == object) # GH2786 - df = DataFrame(np.random.random((3,4))) - df.columns = ['a','a','a','a'] - try: - df.applymap(str) - except ValueError as e: - self.assertTrue("support" in str(e)) + df = DataFrame(np.random.random((3,4))) + df2 = df.copy() + cols = ['a','a','a','a'] + df.columns = cols + + expected = df2.applymap(str) + expected.columns = cols + result = df.applymap(str) + assert_frame_equal(result,expected) def test_filter(self): # items @@ -9201,6 +9204,62 @@ def test_assign_columns(self): assert_series_equal(self.frame['C'], frame['baz']) assert_series_equal(self.frame['hi'], frame['foo2']) + def test_columns_with_dups(self): + + # GH 3468 related + + # basic + df = DataFrame([[1,2]], columns=['a','a']) + df.columns = ['a','a.1'] + str(df) + expected = DataFrame([[1,2]], columns=['a','a.1']) + assert_frame_equal(df, expected) + + df = DataFrame([[1,2,3]], columns=['b','a','a']) + df.columns = ['b','a','a.1'] + str(df) + expected = DataFrame([[1,2,3]], columns=['b','a','a.1']) + assert_frame_equal(df, expected) + + # with a dup index + df = DataFrame([[1,2]], columns=['a','a']) + df.columns = ['b','b'] + str(df) + expected = DataFrame([[1,2]], columns=['b','b']) + assert_frame_equal(df, expected) + + # multi-dtype + df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=['a','a','b','b','d','c','c']) + df.columns = list('ABCDEFG') + str(df) + expected = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('ABCDEFG')) + assert_frame_equal(df, expected) + + # this is an error because we cannot disambiguate the dup columns + self.assertRaises(Exception, lambda x: DataFrame([[1,2,'foo','bar']], columns=['a','a','a','a'])) + + # dups across blocks + df_float = DataFrame(np.random.randn(10, 3),dtype='float64') + df_int = DataFrame(np.random.randn(10, 3),dtype='int64') + df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns) + df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns) + df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns) + df = pan.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1) + + result = df._data.set_ref_locs() + self.assert_(len(result) == len(df.columns)) + + # testing iget + for i in range(len(df.columns)): + df.iloc[:,i] + + # dup columns across dtype GH 2079/2194 + vals = [[1, -1, 2.], [2, -2, 3.]] + rs = DataFrame(vals, columns=['A', 'A', 'B']) + xp = DataFrame(vals) + xp.columns = ['A', 'A', 'B'] + assert_frame_equal(rs, xp) + def test_cast_internals(self): casted = DataFrame(self.frame._data, dtype=int) expected = DataFrame(self.frame._series, dtype=int) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 86cd0ef524b35..8e1ea569973a6 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -772,6 +772,13 @@ def test_dups_fancy_indexing(self): expected = Index(['b','a','a']) self.assert_(result.equals(expected)) + # across dtypes + df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('aaaaaaa')) + result = DataFrame([[1,2,1.,2.,3.,'foo','bar']]) + result.columns = list('aaaaaaa') + assert_frame_equal(df,result) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index eec5f5632d36b..e25bd0de769a7 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -268,7 +268,7 @@ def test_duplicate_item_failure(self): b.ref_items = items mgr = BlockManager(blocks, [items, np.arange(N)]) - self.assertRaises(Exception, mgr.iget, 1) + mgr.iget(1) def test_contains(self): self.assert_('a' in self.mgr)
partially fixes #3468 This would previously raise (same dtype assignment to a non-multi dtype frame with dup indicies) ``` In [6]: df = DataFrame([[1,2]], columns=['a','a']) In [7]: df.columns = ['a','a.1'] In [8]: df Out[8]: a a.1 0 1 2 ``` construction of a multi-dtype frame with a dup index (#2194) is fixed ``` In [1]: DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('aaaaaaa')) Out[1]: a a a a a a a 0 1 2 3 1 2 foo bar ``` This was also previously would raise ``` In [2]: %cpaste Pasting code; enter '--' alone on the line to stop or use Ctrl-D. : df_float = DataFrame(np.random.randn(10, 3),dtype='float64') : df_int = DataFrame(np.random.randn(10, 3),dtype='int64') : df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns) : df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns) : df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns) : df = pan.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1) :-- In [3]: df Out[3]: 0 1 2 0 1 \ 0 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 1 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 2 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 3 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 4 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 5 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 6 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 7 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 8 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 9 True True True 2001-01-01 00:00:00 2001-01-01 00:00:00 2 0 1 2 0 1 2 0 1 2 0 2001-01-01 00:00:00 foo foo foo 0 0 0 0.431857 -0.131747 -1.039563 1 2001-01-01 00:00:00 foo foo foo 0 0 -1 0.516910 -0.683163 0.736468 2 2001-01-01 00:00:00 foo foo foo 0 0 0 -0.147417 -0.305452 0.006213 3 2001-01-01 00:00:00 foo foo foo 0 -1 0 1.443031 0.082710 -0.335054 4 2001-01-01 00:00:00 foo foo foo 1 0 0 -1.349293 0.645316 0.305524 5 2001-01-01 00:00:00 foo foo foo -1 0 0 0.571095 0.756571 -0.773880 6 2001-01-01 00:00:00 foo foo foo 0 0 0 -0.285091 1.196018 0.882786 7 2001-01-01 00:00:00 foo foo foo 2 0 0 0.003610 0.549072 -0.823217 8 2001-01-01 00:00:00 foo foo foo -1 1 0 -0.348279 -0.728958 -0.397435 9 2001-01-01 00:00:00 foo foo foo -1 0 0 0.363489 2.154132 0.494673 ``` For those of you interested.....here is the new ref_loc indexer for duplicate columns its by necessity a block oriented indexer, returns the column map (by column number) to a tuple of the block and the index in the block, only created when needed (e.g. when trying to get a column via iget and the index is non-unique, and the results are cached), this is #3092 ``` In [1]: df = pd.DataFrame(np.random.randn(8,4),columns=['a']*4) In [2]: df._data.blocks Out[2]: [FloatBlock: [a, a, a, a], 4 x 8, dtype float64] In [3]: df._data.blocks[0]._ref_locs In [4]: df._data.set_ref_locs() Out[4]: array([(FloatBlock: [a, a, a, a], 4 x 8, dtype float64, 0), (FloatBlock: [a, a, a, a], 4 x 8, dtype float64, 1), (FloatBlock: [a, a, a, a], 4 x 8, dtype float64, 2), (FloatBlock: [a, a, a, a], 4 x 8, dtype float64, 3)], dtype=object) ``` Fixed the #2786, #3230 bug that caused applymap to not work (we temp worked around by raising a ValueError; removed that check) ``` n [3]: In [3]: df = pd.DataFrame(np.random.random((3,4))) In [4]: In [4]: cols = pd.Index(['a','a','a','a']) In [5]: In [5]: df.columns = cols In [6]: In [6]: df.applymap(str) Out[6]: a a a a 0 0.494204195164 0.534601503195 0.471870025143 0.880092879641 1 0.860369768954 0.0472931994392 0.775532754792 0.822046777859 2 0.478775855962 0.623584943227 0.932012693593 0.739502590395 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3483
2013-04-29T16:06:30Z
2013-05-02T01:15:09Z
null
2014-06-12T15:57:23Z
CLN: series to now inherit from NDFrame
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index c37776b3a3cd8..a0818831fb988 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -478,7 +478,7 @@ maximum value for each column occurred: tsdf = DataFrame(randn(1000, 3), columns=['A', 'B', 'C'], index=date_range('1/1/2000', periods=1000)) - tsdf.apply(lambda x: x.index[x.dropna().argmax()]) + tsdf.apply(lambda x: x[x.idxmax()]) You may also pass additional arguments and keyword arguments to the ``apply`` method. For instance, consider the following function you would like to apply: diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index c1d034d0d8e58..397a3ab7911a9 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -44,10 +44,15 @@ When using pandas, we recommend the following import convention: Series ------ -:class:`Series` is a one-dimensional labeled array (technically a subclass of -ndarray) capable of holding any data type (integers, strings, floating point -numbers, Python objects, etc.). The axis labels are collectively referred to as -the **index**. The basic method to create a Series is to call: +.. warning:: + + In 0.13.0 ``Series`` has internaly been refactored to no longer sub-class ``ndarray`` + but instead subclass ``NDFrame``, similarly to the rest of the pandas containers. This should be + a transparent change with only very limited API implications (See the :ref:`Internal Refactoring<whatsnew_0130.refactoring>`) + +:class:`Series` is a one-dimensional labeled array capable of holding any data +type (integers, strings, floating point numbers, Python objects, etc.). The axis +labels are collectively referred to as the **index**. The basic method to create a Series is to call: :: @@ -109,9 +114,8 @@ provided. The value will be repeated to match the length of **index** Series is ndarray-like ~~~~~~~~~~~~~~~~~~~~~~ -As a subclass of ndarray, Series is a valid argument to most NumPy functions -and behaves similarly to a NumPy array. However, things like slicing also slice -the index. +``Series`` acts very similary to a ``ndarray``, and is a valid argument to most NumPy functions. +However, things like slicing also slice the index. .. ipython :: python @@ -177,7 +181,7 @@ labels. The result of an operation between unaligned Series will have the **union** of the indexes involved. If a label is not found in one Series or the other, the -result will be marked as missing (NaN). Being able to write code without doing +result will be marked as missing ``NaN``. Being able to write code without doing any explicit data alignment grants immense freedom and flexibility in interactive data analysis and research. The integrated data alignment features of the pandas data structures set pandas apart from the majority of related @@ -924,11 +928,11 @@ Here we slice to a Panel4D. from pandas.core import panelnd Panel5D = panelnd.create_nd_panel_factory( klass_name = 'Panel5D', - axis_orders = [ 'cool', 'labels','items','major_axis','minor_axis'], - axis_slices = { 'labels' : 'labels', 'items' : 'items', - 'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' }, - slicer = Panel4D, - axis_aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' }, + orders = [ 'cool', 'labels','items','major_axis','minor_axis'], + slices = { 'labels' : 'labels', 'items' : 'items', + 'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' }, + slicer = Panel4D, + aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' }, stat_axis = 2) p5d = Panel5D(dict(C1 = p4d)) diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index 2fd606daa43b9..95428bd27e2a2 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -26,7 +26,7 @@ Enhancing Performance Cython (Writing C extensions for pandas) ---------------------------------------- -For many use cases writing pandas in pure python and numpy is sufficient. In some +For many use cases writing pandas in pure python and numpy is sufficient. In some computationally heavy applications however, it can be possible to achieve sizeable speed-ups by offloading work to `cython <http://cython.org/>`__. @@ -68,7 +68,7 @@ Here's the function in pure python: We achieve our result by by using ``apply`` (row-wise): .. ipython:: python - + %timeit df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1) But clearly this isn't fast enough for us. Let's take a look and see where the @@ -83,7 +83,7 @@ By far the majority of time is spend inside either ``integrate_f`` or ``f``, hence we'll concentrate our efforts cythonizing these two functions. .. note:: - + In python 2 replacing the ``range`` with its generator counterpart (``xrange``) would mean the ``range`` line would vanish. In python 3 range is already a generator. @@ -125,7 +125,7 @@ is here to distinguish between function versions): %timeit df.apply(lambda x: integrate_f_plain(x['a'], x['b'], x['N']), axis=1) -Already this has shaved a third off, not too bad for a simple copy and paste. +Already this has shaved a third off, not too bad for a simple copy and paste. .. _enhancingperf.type: @@ -175,7 +175,7 @@ in python, so maybe we could minimise these by cythonizing the apply part. We are now passing ndarrays into the cython function, fortunately cython plays very nicely with numpy. -.. ipython:: +.. ipython:: In [4]: %%cython ...: cimport numpy as np @@ -205,6 +205,24 @@ The implementation is simple, it creates an array of zeros and loops over the rows, applying our ``integrate_f_typed``, and putting this in the zeros array. +.. warning:: + + In 0.13.0 since ``Series`` has internaly been refactored to no longer sub-class ``ndarray`` + but instead subclass ``NDFrame``, you can **not pass** a ``Series`` directly as a ``ndarray`` typed parameter + to a cython function. Instead pass the actual ``ndarray`` using the ``.values`` attribute of the Series. + + Prior to 0.13.0 + + .. code-block:: python + + apply_integrate_f(df['a'], df['b'], df['N']) + + Use ``.values`` to get the underlying ``ndarray`` + + .. code-block:: python + + apply_integrate_f(df['a'].values, df['b'].values, df['N'].values) + .. note:: Loop like this would be *extremely* slow in python, but in cython looping over @@ -212,13 +230,13 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra .. ipython:: python - %timeit apply_integrate_f(df['a'], df['b'], df['N']) + %timeit apply_integrate_f(df['a'].values, df['b'].values, df['N'].values) We've gone another three times faster! Let's check again where the time is spent: .. ipython:: python - %prun -l 4 apply_integrate_f(df['a'], df['b'], df['N']) + %prun -l 4 apply_integrate_f(df['a'].values, df['b'].values, df['N'].values) As one might expect, the majority of the time is now spent in ``apply_integrate_f``, so if we wanted to make anymore efficiencies we must continue to concentrate our @@ -261,7 +279,7 @@ advanced cython techniques: .. ipython:: python - %timeit apply_integrate_f_wrap(df['a'], df['b'], df['N']) + %timeit apply_integrate_f_wrap(df['a'].values, df['b'].values, df['N'].values) This shaves another third off! diff --git a/doc/source/release.rst b/doc/source/release.rst index d761f1f008754..390c6e857ba32 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -115,6 +115,76 @@ pandas 0.13 - ``MultiIndex.astype()`` now only allows ``np.object_``-like dtypes and now returns a ``MultiIndex`` rather than an ``Index``. (:issue:`4039`) +**Internal Refactoring** + +In 0.13.0 there is a major refactor primarily to subclass ``Series`` from ``NDFrame``, +which is the base class currently for ``DataFrame`` and ``Panel``, to unify methods +and behaviors. Series formerly subclassed directly from ``ndarray``. (:issue:`4080`, :issue:`3862`, :issue:`816`) +See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` + +- Refactor of series.py/frame.py/panel.py to move common code to generic.py + + - added ``_setup_axes`` to created generic NDFrame structures + - moved methods + + - ``from_axes,_wrap_array,axes,ix,loc,iloc,shape,empty,swapaxes,transpose,pop`` + - ``__iter__,keys,__contains__,__len__,__neg__,__invert__`` + - ``convert_objects,as_blocks,as_matrix,values`` + - ``__getstate__,__setstate__`` (compat remains in frame/panel) + - ``__getattr__,__setattr__`` + - ``_indexed_same,reindex_like,align,where,mask`` + - ``fillna,replace`` (``Series`` replace is now consistent with ``DataFrame``) + - ``filter`` (also added axis argument to selectively filter on a different axis) + - ``reindex,reindex_axis`` (which was the biggest change to make generic) + - ``truncate`` (moved to become part of ``NDFrame``) + +- These are API changes which make ``Panel`` more consistent with ``DataFrame`` + + - ``swapaxes`` on a ``Panel`` with the same axes specified now return a copy + - support attribute access for setting + - filter supports same api as original ``DataFrame`` filter + +- Reindex called with no arguments will now return a copy of the input object + +- Series now inherits from ``NDFrame`` rather than directly from ``ndarray``. + There are several minor changes that affect the API. + + - numpy functions that do not support the array interface will now + return ``ndarrays`` rather than series, e.g. ``np.diff`` and ``np.ones_like`` + - ``Series(0.5)`` would previously return the scalar ``0.5``, this is no + longer supported + - ``TimeSeries`` is now an alias for ``Series``. the property ``is_time_series`` + can be used to distinguish (if desired) + +- Refactor of Sparse objects to use BlockManager + + - Created a new block type in internals, ``SparseBlock``, which can hold multi-dtypes + and is non-consolidatable. ``SparseSeries`` and ``SparseDataFrame`` now inherit + more methods from there hierarchy (Series/DataFrame), and no longer inherit + from ``SparseArray`` (which instead is the object of the ``SparseBlock``) + - Sparse suite now supports integration with non-sparse data. Non-float sparse + data is supportable (partially implemented) + - Operations on sparse structures within DataFrames should preserve sparseness, + merging type operations will convert to dense (and back to sparse), so might + be somewhat inefficient + - enable setitem on ``SparseSeries`` for boolean/integer/slices + - ``SparsePanels`` implementation is unchanged (e.g. not using BlockManager, needs work) + +- added ``ftypes`` method to Series/DataFame, similar to ``dtypes``, but indicates + if the underlying is sparse/dense (as well as the dtype) + +- All ``NDFrame`` objects now have a ``_prop_attributes``, which can be used to indcated various + values to propogate to a new object from an existing (e.g. name in ``Series`` will follow + more automatically now) + +- Internal type checking is now done via a suite of generated classes, allowing ``isinstance(value, klass)`` + without having to directly import the klass, courtesy of @jtratner + +- Bug in Series update where the parent frame is not updating its cache based on + changes (:issue:`4080`) or types (:issue:`3217`), fillna (:issue:`3386`) + +- Indexing with dtype conversions fixed (:issue:`4463`, :issue:`4204`) + **Experimental Features** **Bug Fixes** diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index bac8cb3193527..9776c3e4662ec 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -6,6 +6,12 @@ v0.13.0 (August ??, 2013) This is a major release from 0.12.0 and includes several new features and enhancements along with a large number of bug fixes. +.. warning:: + + In 0.13.0 ``Series`` has internaly been refactored to no longer sub-class ``ndarray`` + but instead subclass ``NDFrame``, similarly to the rest of the pandas containers. This should be + a transparent change with only very limited API implications. See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` + API changes ~~~~~~~~~~~ @@ -134,6 +140,103 @@ Enhancements from pandas import offsets td + offsets.Minute(5) + offsets.Milli(5) +.. _whatsnew_0130.refactoring: + +Internal Refactoring +~~~~~~~~~~~~~~~~~~~~ + +In 0.13.0 there is a major refactor primarily to subclass ``Series`` from ``NDFrame``, +which is the base class currently for ``DataFrame`` and ``Panel``, to unify methods +and behaviors. Series formerly subclassed directly from ``ndarray``. (:issue:`4080`, :issue:`3862`, :issue:`816`) + +.. warning:: + + There are two potential incompatibilities from < 0.13.0 + + - Using certain numpy functions would previously return a ``Series`` if passed a ``Series`` + as an argument. This seems only to affect ``np.ones_like``, ``np.empty_like``, and + ``np.diff``. These now return ``ndarrays``. + + .. ipython:: python + + s = Series([1,2,3,4]) + + # numpy usage + np.ones_like(s) + np.diff(s) + + # pandonic usage + Series(1,index=s.index) + s.diff() + + - Passing a ``Series`` directly to a cython function expecting an ``ndarray`` type will no + long work directly, you must pass ``Series.values``, See :ref:`Enhancing Performance<enhancingperf.ndarray>` + + - ``Series(0.5)`` would previously return the scalar ``0.5``, instead this will return a 1-element ``Series`` + +- Refactor of series.py/frame.py/panel.py to move common code to generic.py + + - added ``_setup_axes`` to created generic NDFrame structures + - moved methods + + - ``from_axes,_wrap_array,axes,ix,loc,iloc,shape,empty,swapaxes,transpose,pop`` + - ``__iter__,keys,__contains__,__len__,__neg__,__invert__`` + - ``convert_objects,as_blocks,as_matrix,values`` + - ``__getstate__,__setstate__`` (compat remains in frame/panel) + - ``__getattr__,__setattr__`` + - ``_indexed_same,reindex_like,align,where,mask`` + - ``fillna,replace`` (``Series`` replace is now consistent with ``DataFrame``) + - ``filter`` (also added axis argument to selectively filter on a different axis) + - ``reindex,reindex_axis`` (which was the biggest change to make generic) + - ``truncate`` (moved to become part of ``NDFrame``) + +- These are API changes which make ``Panel`` more consistent with ``DataFrame`` + + - ``swapaxes`` on a ``Panel`` with the same axes specified now return a copy + - support attribute access for setting + - filter supports same api as original ``DataFrame`` filter + +- Reindex called with no arguments will now return a copy of the input object + +- Series now inherits from ``NDFrame`` rather than directly from ``ndarray``. + There are several minor changes that affect the API. + + - numpy functions that do not support the array interface will now + return ``ndarrays`` rather than series, e.g. ``np.diff`` and ``np.ones_like`` + - ``Series(0.5)`` would previously return the scalar ``0.5``, this is no + longer supported + - ``TimeSeries`` is now an alias for ``Series``. the property ``is_time_series`` + can be used to distinguish (if desired) + +- Refactor of Sparse objects to use BlockManager + + - Created a new block type in internals, ``SparseBlock``, which can hold multi-dtypes + and is non-consolidatable. ``SparseSeries`` and ``SparseDataFrame`` now inherit + more methods from there hierarchy (Series/DataFrame), and no longer inherit + from ``SparseArray`` (which instead is the object of the ``SparseBlock``) + - Sparse suite now supports integration with non-sparse data. Non-float sparse + data is supportable (partially implemented) + - Operations on sparse structures within DataFrames should preserve sparseness, + merging type operations will convert to dense (and back to sparse), so might + be somewhat inefficient + - enable setitem on ``SparseSeries`` for boolean/integer/slices + - ``SparsePanels`` implementation is unchanged (e.g. not using BlockManager, needs work) + +- added ``ftypes`` method to Series/DataFame, similar to ``dtypes``, but indicates + if the underlying is sparse/dense (as well as the dtype) + +- All ``NDFrame`` objects now have a ``_prop_attributes``, which can be used to indcated various + values to propogate to a new object from an existing (e.g. name in ``Series`` will follow + more automatically now) + +- Internal type checking is now done via a suite of generated classes, allowing ``isinstance(value, klass)`` + without having to directly import the klass, courtesy of @jtratner + +- Bug in Series update where the parent frame is not updating its cache based on + changes (:issue:`4080`) or types (:issue:`3217`), fillna (:issue:`3386`) + +- Indexing with dtype conversions fixed (:issue:`4463`, :issue:`4204`) + Bug Fixes ~~~~~~~~~ diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py new file mode 100644 index 0000000000000..58bbf70c0bea9 --- /dev/null +++ b/pandas/compat/pickle_compat.py @@ -0,0 +1,62 @@ +""" support pre 0.12 series pickle compatibility """ + +import sys +import pickle +import numpy as np +import pandas +from pandas import compat +from pandas.core.series import Series +from pandas.sparse.series import SparseSeries + +def load_reduce(self): + stack = self.stack + args = stack.pop() + func = stack[-1] + if type(args[0]) is type: + n = args[0].__name__ + if n == 'DeprecatedSeries': + stack[-1] = object.__new__(Series) + return + elif n == 'DeprecatedSparseSeries': + stack[-1] = object.__new__(SparseSeries) + return + + try: + value = func(*args) + except: + print(sys.exc_info()) + print(func, args) + raise + + stack[-1] = value + +if compat.PY3: + class Unpickler(pickle._Unpickler): + pass +else: + class Unpickler(pickle.Unpickler): + pass + +Unpickler.dispatch[pickle.REDUCE[0]] = load_reduce + +def load(file): + # try to load a compatibility pickle + # fake the old class hierarchy + # if it works, then return the new type objects + + try: + pandas.core.series.Series = DeprecatedSeries + pandas.sparse.series.SparseSeries = DeprecatedSparseSeries + with open(file,'rb') as fh: + return Unpickler(fh).load() + except: + raise + finally: + pandas.core.series.Series = Series + pandas.sparse.series.SparseSeries = SparseSeries + +class DeprecatedSeries(Series, np.ndarray): + pass + +class DeprecatedSparseSeries(DeprecatedSeries): + pass diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index a649edfada739..f1d78dc34957b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -186,7 +186,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False): values = com._ensure_object(values) keys, counts = htable.value_count_object(values, mask) - result = Series(counts, index=keys) + result = Series(counts, index=com._values_from_object(keys)) if sort: result.sort() diff --git a/pandas/core/array.py b/pandas/core/array.py index c9a8a00b7f2d7..6847ba073b92a 100644 --- a/pandas/core/array.py +++ b/pandas/core/array.py @@ -34,3 +34,19 @@ globals()[_f] = getattr(np.random, _f) NA = np.nan + +#### a series-like ndarray #### + +class SNDArray(Array): + + def __new__(cls, data, index=None, name=None): + data = data.view(SNDArray) + data.index = index + data.name = name + + return data + + @property + def values(self): + return self.view(Array) + diff --git a/pandas/core/base.py b/pandas/core/base.py index e635844248371..04f48f85fa023 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -3,16 +3,24 @@ """ from pandas import compat import numpy as np +from pandas.core import common as com class StringMixin(object): """implements string methods so long as object defines a `__unicode__` method. Handles Python2/3 compatibility transparently.""" # side note - this could be made into a metaclass if more than one object nees + + #---------------------------------------------------------------------- + # Formatting + + def __unicode__(self): + raise NotImplementedError + def __str__(self): """ - Return a string representation for a particular object. + Return a string representation for a particular Object - Invoked by str(obj) in both py2/py3. + Invoked by str(df) in both py2/py3. Yields Bytestring in Py2, Unicode String in py3. """ diff --git a/pandas/core/common.py b/pandas/core/common.py index c34486fc28025..5765340f2906a 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -40,11 +40,34 @@ class AmbiguousIndexError(PandasError, KeyError): _np_version_under1p6 = LooseVersion(_np_version) < '1.6' _np_version_under1p7 = LooseVersion(_np_version) < '1.7' -_POSSIBLY_CAST_DTYPES = set([ np.dtype(t) for t in ['M8[ns]','m8[ns]','O','int8','uint8','int16','uint16','int32','uint32','int64','uint64'] ]) +_POSSIBLY_CAST_DTYPES = set([np.dtype(t) + for t in ['M8[ns]', 'm8[ns]', 'O', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']]) + _NS_DTYPE = np.dtype('M8[ns]') _TD_DTYPE = np.dtype('m8[ns]') _INT64_DTYPE = np.dtype(np.int64) -_DATELIKE_DTYPES = set([ np.dtype(t) for t in ['M8[ns]','m8[ns]'] ]) +_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', 'm8[ns]']]) + +# define abstract base classes to enable isinstance type checking on our objects +def create_pandas_abc_type(name, attr, comp): + @classmethod + def _check(cls, inst): + return getattr(inst, attr, None) in comp + dct = dict(__instancecheck__=_check, + __subclasscheck__=_check) + meta = type("ABCBase", (type,), dct) + return meta(name, tuple(), dct) + +ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",)) +ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) +ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",)) +ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp", ('sparse_series', 'sparse_time_series')) +ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp", ('sparse_array', 'sparse_series')) + +class _ABCGeneric(type): + def __instancecheck__(cls, inst): + return hasattr(inst, "_data") +ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {}) def isnull(obj): """Detect missing values (NaN in numeric arrays, None/NaN in object arrays) @@ -67,14 +90,12 @@ def _isnull_new(obj): if lib.isscalar(obj): return lib.checknull(obj) - from pandas.core.generic import PandasContainer - if isinstance(obj, np.ndarray): + if isinstance(obj, (ABCSeries, np.ndarray)): return _isnull_ndarraylike(obj) - elif isinstance(obj, PandasContainer): - # TODO: optimize for DataFrame, etc. + elif isinstance(obj, ABCGeneric): return obj.apply(isnull) elif isinstance(obj, list) or hasattr(obj, '__array__'): - return _isnull_ndarraylike(obj) + return _isnull_ndarraylike(np.asarray(obj)) else: return obj is None @@ -94,19 +115,18 @@ def _isnull_old(obj): if lib.isscalar(obj): return lib.checknull_old(obj) - from pandas.core.generic import PandasContainer - if isinstance(obj, np.ndarray): + if isinstance(obj, (ABCSeries, np.ndarray)): return _isnull_ndarraylike_old(obj) - elif isinstance(obj, PandasContainer): - # TODO: optimize for DataFrame, etc. + elif isinstance(obj, ABCGeneric): return obj.apply(_isnull_old) elif isinstance(obj, list) or hasattr(obj, '__array__'): - return _isnull_ndarraylike_old(obj) + return _isnull_ndarraylike_old(np.asarray(obj)) else: return obj is None _isnull = _isnull_new + def _use_inf_as_null(key): '''Option change callback for null/inf behaviour Choose which replacement for numpy.isnan / -numpy.isfinite is used. @@ -134,39 +154,42 @@ def _use_inf_as_null(key): def _isnull_ndarraylike(obj): - from pandas import Series - values = np.asarray(obj) - if values.dtype.kind in ('O', 'S', 'U'): + values = obj + dtype = values.dtype + + if dtype.kind in ('O', 'S', 'U'): # Working around NumPy ticket 1542 shape = values.shape - if values.dtype.kind in ('S', 'U'): + if dtype.kind in ('S', 'U'): result = np.zeros(values.shape, dtype=bool) else: result = np.empty(shape, dtype=bool) vec = lib.isnullobj(values.ravel()) result[:] = vec.reshape(shape) - if isinstance(obj, Series): - result = Series(result, index=obj.index, copy=False) - elif values.dtype == np.dtype('M8[ns]'): - # this is the NaT pattern - result = values.view('i8') == tslib.iNaT - elif values.dtype == np.dtype('m8[ns]'): + elif dtype in _DATELIKE_DTYPES: # this is the NaT pattern - result = values.view('i8') == tslib.iNaT + v = getattr(values, 'asi8', None) + if v is None: + v = values.view('i8') + result = v == tslib.iNaT else: - # -np.isfinite(obj) result = np.isnan(obj) + + if isinstance(obj, ABCSeries): + from pandas import Series + result = Series(result, index=obj.index, copy=False) + return result def _isnull_ndarraylike_old(obj): - from pandas import Series - values = np.asarray(obj) + values = obj + dtype = values.dtype - if values.dtype.kind in ('O', 'S', 'U'): + if dtype.kind in ('O', 'S', 'U'): # Working around NumPy ticket 1542 shape = values.shape @@ -177,13 +200,19 @@ def _isnull_ndarraylike_old(obj): vec = lib.isnullobj_old(values.ravel()) result[:] = vec.reshape(shape) - if isinstance(obj, Series): - result = Series(result, index=obj.index, copy=False) - elif values.dtype == np.dtype('M8[ns]'): + elif dtype in _DATELIKE_DTYPES: # this is the NaT pattern - result = values.view('i8') == tslib.iNaT + v = getattr(values, 'asi8', None) + if v is None: + v = values.view('i8') + result = v == tslib.iNaT else: result = -np.isfinite(obj) + + if isinstance(obj, ABCSeries): + from pandas import Series + result = Series(result, index=obj.index, copy=False) + return result @@ -231,9 +260,9 @@ def mask_missing(arr, values_to_mask): # if x is a string and mask is not, then we get a scalar # return value, which is not good - if not isinstance(mask,np.ndarray): + if not isinstance(mask, np.ndarray): m = mask - mask = np.empty(arr.shape,dtype=np.bool) + mask = np.empty(arr.shape, dtype=np.bool) mask.fill(m) else: mask = mask | (arr == x) @@ -338,11 +367,11 @@ def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info): ('float64', 'float64'): algos.take_1d_float64_float64, ('object', 'object'): algos.take_1d_object_object, ('bool', 'bool'): - _view_wrapper(algos.take_1d_bool_bool, np.uint8, np.uint8), + _view_wrapper(algos.take_1d_bool_bool, np.uint8, np.uint8), ('bool', 'object'): - _view_wrapper(algos.take_1d_bool_object, np.uint8, None), - ('datetime64[ns]','datetime64[ns]'): - _view_wrapper(algos.take_1d_int64_int64, np.int64, np.int64, np.int64) + _view_wrapper(algos.take_1d_bool_object, np.uint8, None), + ('datetime64[ns]', 'datetime64[ns]'): + _view_wrapper(algos.take_1d_int64_int64, np.int64, np.int64, np.int64) } @@ -365,12 +394,12 @@ def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info): ('float64', 'float64'): algos.take_2d_axis0_float64_float64, ('object', 'object'): algos.take_2d_axis0_object_object, ('bool', 'bool'): - _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8, np.uint8), + _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8, np.uint8), ('bool', 'object'): - _view_wrapper(algos.take_2d_axis0_bool_object, np.uint8, None), - ('datetime64[ns]','datetime64[ns]'): - _view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64, - fill_wrap=np.int64) + _view_wrapper(algos.take_2d_axis0_bool_object, np.uint8, None), + ('datetime64[ns]', 'datetime64[ns]'): + _view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64, + fill_wrap=np.int64) } @@ -393,12 +422,12 @@ def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info): ('float64', 'float64'): algos.take_2d_axis1_float64_float64, ('object', 'object'): algos.take_2d_axis1_object_object, ('bool', 'bool'): - _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8, np.uint8), + _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8, np.uint8), ('bool', 'object'): - _view_wrapper(algos.take_2d_axis1_bool_object, np.uint8, None), - ('datetime64[ns]','datetime64[ns]'): - _view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64, - fill_wrap=np.int64) + _view_wrapper(algos.take_2d_axis1_bool_object, np.uint8, None), + ('datetime64[ns]', 'datetime64[ns]'): + _view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64, + fill_wrap=np.int64) } @@ -421,12 +450,12 @@ def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info): ('float64', 'float64'): algos.take_2d_multi_float64_float64, ('object', 'object'): algos.take_2d_multi_object_object, ('bool', 'bool'): - _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8, np.uint8), + _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8, np.uint8), ('bool', 'object'): - _view_wrapper(algos.take_2d_multi_bool_object, np.uint8, None), - ('datetime64[ns]','datetime64[ns]'): - _view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64, - fill_wrap=np.int64) + _view_wrapper(algos.take_2d_multi_bool_object, np.uint8, None), + ('datetime64[ns]', 'datetime64[ns]'): + _view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64, + fill_wrap=np.int64) } @@ -667,7 +696,7 @@ def diff(arr, n, axis=0): lag = lag.copy() lag[mask] = 0 - result = res-lag + result = res - lag result[mask] = na out_arr[res_indexer] = result else: @@ -685,10 +714,11 @@ def _infer_dtype_from_scalar(val): # a 1-element ndarray if isinstance(val, pa.Array): if val.ndim != 0: - raise ValueError("invalid ndarray passed to _infer_dtype_from_scalar") + raise ValueError( + "invalid ndarray passed to _infer_dtype_from_scalar") dtype = val.dtype - val = val.item() + val = val.item() elif isinstance(val, compat.string_types): @@ -702,7 +732,7 @@ def _infer_dtype_from_scalar(val): elif isinstance(val, np.datetime64): # ugly hacklet - val = lib.Timestamp(val).value + val = lib.Timestamp(val).value dtype = np.dtype('M8[ns]') elif is_bool(val): @@ -727,11 +757,12 @@ def _maybe_cast_scalar(dtype, value): return tslib.iNaT return value + def _maybe_promote(dtype, fill_value=np.nan): # if we passed an array here, determine the fill value by dtype - if isinstance(fill_value,np.ndarray): - if issubclass(fill_value.dtype.type, (np.datetime64,np.timedelta64)): + if isinstance(fill_value, np.ndarray): + if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)): fill_value = tslib.iNaT else: @@ -742,7 +773,7 @@ def _maybe_promote(dtype, fill_value=np.nan): fill_value = np.nan # returns tuple of (dtype, fill_value) - if issubclass(dtype.type, (np.datetime64,np.timedelta64)): + if issubclass(dtype.type, (np.datetime64, np.timedelta64)): # for now: refuse to upcast datetime64 # (this is because datetime64 will not implicitly upconvert # to object correctly as of numpy 1.6.1) @@ -799,6 +830,7 @@ def _maybe_upcast_putmask(result, mask, other, dtype=None, change=None): if mask.any(): other = _maybe_cast_scalar(result.dtype, other) + def changeit(): # try to directly set by expanding our array to full @@ -814,8 +846,10 @@ def changeit(): except: pass - # we are forced to change the dtype of the result as the input isn't compatible - r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True) + # we are forced to change the dtype of the result as the input + # isn't compatible + r, fill_value = _maybe_upcast( + result, fill_value=other, dtype=dtype, copy=True) np.putmask(r, mask, other) # we need to actually change the dtype here @@ -824,7 +858,8 @@ def changeit(): # if we are trying to do something unsafe # like put a bigger dtype in a smaller one, use the smaller one if change.dtype.itemsize < r.dtype.itemsize: - raise Exception("cannot change dtype of input to smaller size") + raise Exception( + "cannot change dtype of input to smaller size") change.dtype = r.dtype change[:] = r @@ -834,12 +869,12 @@ def changeit(): # if we have nans in the False portion of our mask then we need to upcast (possibily) # otherwise we DON't want to upcast (e.g. if we are have values, say integers in # the success portion then its ok to not upcast) - new_dtype, fill_value = _maybe_promote(result.dtype,other) + new_dtype, fill_value = _maybe_promote(result.dtype, other) if new_dtype != result.dtype: # we have a scalar or len 0 ndarray # and its nan and we are changing some values - if np.isscalar(other) or (isinstance(other,np.ndarray) and other.ndim < 1): + if np.isscalar(other) or (isinstance(other, np.ndarray) and other.ndim < 1): if isnull(other): return changeit() @@ -856,6 +891,7 @@ def changeit(): return result, False + def _maybe_upcast_indexer(result, indexer, other, dtype=None): """ a safe version of setitem that (potentially upcasts the result return the result and a changed flag @@ -863,9 +899,11 @@ def _maybe_upcast_indexer(result, indexer, other, dtype=None): other = _maybe_cast_scalar(result.dtype, other) original_dtype = result.dtype + def changeit(): # our type is wrong here, need to upcast - r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True) + r, fill_value = _maybe_upcast( + result, fill_value=other, dtype=dtype, copy=True) try: r[indexer] = other except: @@ -874,10 +912,10 @@ def changeit(): r[indexer] = fill_value # if we have changed to floats, might want to cast back if we can - r = _possibly_downcast_to_dtype(r,original_dtype) + r = _possibly_downcast_to_dtype(r, original_dtype) return r, True - new_dtype, fill_value = _maybe_promote(original_dtype,other) + new_dtype, fill_value = _maybe_promote(original_dtype, other) if new_dtype != result.dtype: return changeit() @@ -888,6 +926,7 @@ def changeit(): return result, False + def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False): """ provide explicty type promotion and coercion @@ -922,13 +961,13 @@ def _possibly_downcast_to_dtype(result, dtype): """ try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32 """ - if not isinstance(result, np.ndarray): + if np.isscalar(result): return result try: - if issubclass(dtype.type,np.floating): + if issubclass(dtype.type, np.floating): return result.astype(dtype) - elif dtype == np.bool_ or issubclass(dtype.type,np.integer): + elif dtype == np.bool_ or issubclass(dtype.type, np.integer): if issubclass(result.dtype.type, np.number) and notnull(result).all(): new_result = result.astype(dtype) if (new_result == result).all(): @@ -938,6 +977,7 @@ def _possibly_downcast_to_dtype(result, dtype): return result + def _lcd_dtypes(a_dtype, b_dtype): """ return the lcd dtype to hold these types """ @@ -965,6 +1005,7 @@ def _lcd_dtypes(a_dtype, b_dtype): return np.float64 return np.object + def _fill_zeros(result, y, fill): """ if we have an integer value (or array in y) and we have 0's, fill them with the fill, @@ -973,7 +1014,7 @@ def _fill_zeros(result, y, fill): if fill is not None: if not isinstance(y, np.ndarray): dtype, value = _infer_dtype_from_scalar(y) - y = pa.empty(result.shape,dtype=dtype) + y = pa.empty(result.shape, dtype=dtype) y.fill(value) if is_integer_dtype(y): @@ -981,11 +1022,13 @@ def _fill_zeros(result, y, fill): mask = y.ravel() == 0 if mask.any(): shape = result.shape - result, changed = _maybe_upcast_putmask(result.ravel(),mask,fill) + result, changed = _maybe_upcast_putmask( + result.ravel(), mask, fill) result = result.reshape(shape) return result + def _interp_wrapper(f, wrap_dtype, na_override=None): def wrapper(arr, mask, limit=None): view = arr.view(wrap_dtype) @@ -1003,10 +1046,10 @@ def wrapper(arr, mask, limit=None): def pad_1d(values, limit=None, mask=None): - dtype = values.dtype.name + dtype = values.dtype.name _method = None if is_float_dtype(values): - _method = getattr(algos,'pad_inplace_%s' % dtype,None) + _method = getattr(algos, 'pad_inplace_%s' % dtype, None) elif is_datetime64_dtype(values): _method = _pad_1d_datetime elif values.dtype == np.object_: @@ -1023,10 +1066,10 @@ def pad_1d(values, limit=None, mask=None): def backfill_1d(values, limit=None, mask=None): - dtype = values.dtype.name + dtype = values.dtype.name _method = None if is_float_dtype(values): - _method = getattr(algos,'backfill_inplace_%s' % dtype,None) + _method = getattr(algos, 'backfill_inplace_%s' % dtype, None) elif is_datetime64_dtype(values): _method = _backfill_1d_datetime elif values.dtype == np.object_: @@ -1044,10 +1087,10 @@ def backfill_1d(values, limit=None, mask=None): def pad_2d(values, limit=None, mask=None): - dtype = values.dtype.name + dtype = values.dtype.name _method = None if is_float_dtype(values): - _method = getattr(algos,'pad_2d_inplace_%s' % dtype,None) + _method = getattr(algos, 'pad_2d_inplace_%s' % dtype, None) elif is_datetime64_dtype(values): _method = _pad_2d_datetime elif values.dtype == np.object_: @@ -1069,10 +1112,10 @@ def pad_2d(values, limit=None, mask=None): def backfill_2d(values, limit=None, mask=None): - dtype = values.dtype.name + dtype = values.dtype.name _method = None if is_float_dtype(values): - _method = getattr(algos,'backfill_2d_inplace_%s' % dtype,None) + _method = getattr(algos, 'backfill_2d_inplace_%s' % dtype, None) elif is_datetime64_dtype(values): _method = _backfill_2d_datetime elif values.dtype == np.object_: @@ -1092,6 +1135,36 @@ def backfill_2d(values, limit=None, mask=None): pass +def interpolate_2d(values, method='pad', axis=0, limit=None, missing=None): + """ perform an actual interpolation of values, values will be make 2-d if needed + fills inplace, returns the result """ + + transf = (lambda x: x) if axis == 0 else (lambda x: x.T) + + # reshape a 1 dim if needed + ndim = values.ndim + if values.ndim == 1: + if axis != 0: + raise Exception("cannot interpolate on a ndim == 1 with axis != 0") + values = values.reshape(tuple((1,) + values.shape)) + + if missing is None: + mask = None + else: # todo create faster fill func without masking + mask = mask_missing(transf(values), missing) + + if method == 'pad': + pad_2d(transf(values), limit=limit, mask=mask) + else: + backfill_2d(transf(values), limit=limit, mask=mask) + + # reshape back + if ndim == 1: + values = values[0] + + return values + + def _consensus_name_attr(objs): name = objs[0].name for obj in objs[1:]: @@ -1103,27 +1176,54 @@ def _consensus_name_attr(objs): # Lots of little utilities +def _maybe_box(indexer, values, obj, key): + + # if we have multiples coming back, box em + if isinstance(values, np.ndarray): + return obj[indexer.get_loc(key)] + + # return the value + return values + + +def _values_from_object(o): + """ return my values or the object if we are say an ndarray """ + f = getattr(o, 'get_values', None) + if f is not None: + o = f() + return o + + def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True): """ if we have an object dtype, try to coerce dates and/or numers """ + # if we have passed in a list or scalar + if isinstance(values, (list, tuple)): + values = np.array(values, dtype=np.object_) + if not hasattr(values, 'dtype'): + values = np.array([values], dtype=np.object_) + # convert dates if convert_dates and values.dtype == np.object_: # we take an aggressive stance and convert to datetime64[ns] if convert_dates == 'coerce': - new_values = _possibly_cast_to_datetime(values, 'M8[ns]', coerce = True) + new_values = _possibly_cast_to_datetime( + values, 'M8[ns]', coerce=True) # if we are all nans then leave me alone if not isnull(new_values).all(): values = new_values else: - values = lib.maybe_convert_objects(values, convert_datetime=convert_dates) + values = lib.maybe_convert_objects( + values, convert_datetime=convert_dates) # convert to numeric if convert_numeric and values.dtype == np.object_: try: - new_values = lib.maybe_convert_numeric(values,set(),coerce_numeric=True) + new_values = lib.maybe_convert_numeric( + values, set(), coerce_numeric=True) # if we are all nans then leave me alone if not isnull(new_values).all(): @@ -1134,19 +1234,24 @@ def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True): return values + def _possibly_castable(arr): return arr.dtype not in _POSSIBLY_CAST_DTYPES + def _possibly_convert_platform(values): """ try to do platform conversion, allow ndarray or list here """ - if isinstance(values, (list,tuple)): + if isinstance(values, (list, tuple)): values = lib.list_to_object_array(values) - if getattr(values,'dtype',None) == np.object_: + if getattr(values, 'dtype', None) == np.object_: + if hasattr(values, 'values'): + values = values.values values = lib.maybe_convert_objects(values) return values + def _possibly_cast_to_timedelta(value, coerce=True): """ try to cast to timedelta64, if already a timedeltalike, then make sure that we are [ns] (as numpy 1.6.2 is very buggy in this regards, @@ -1191,37 +1296,41 @@ def convert(td, type): return np.array([ convert(v,dtype) for v in value ], dtype='m8[ns]') # deal with numpy not being able to handle certain timedelta operations - if isinstance(value,np.ndarray) and value.dtype.kind == 'm': + if isinstance(value, (ABCSeries, np.ndarray)) and value.dtype.kind == 'm': if value.dtype != 'timedelta64[ns]': value = value.astype('timedelta64[ns]') return value - # we don't have a timedelta, but we want to try to convert to one (but don't force it) + # we don't have a timedelta, but we want to try to convert to one (but + # don't force it) if coerce: - - new_value = tslib.array_to_timedelta64(value.astype(object), coerce=False) + new_value = tslib.array_to_timedelta64( + _values_from_object(value).astype(object), coerce=False) if new_value.dtype == 'i8': - value = np.array(new_value,dtype='timedelta64[ns]') + value = np.array(new_value, dtype='timedelta64[ns]') return value -def _possibly_cast_to_datetime(value, dtype, coerce = False): + +def _possibly_cast_to_datetime(value, dtype, coerce=False): """ try to cast the array/value to a datetimelike dtype, converting float nan to iNaT """ if dtype is not None: if isinstance(dtype, compat.string_types): dtype = np.dtype(dtype) - is_datetime64 = is_datetime64_dtype(dtype) + is_datetime64 = is_datetime64_dtype(dtype) is_timedelta64 = is_timedelta64_dtype(dtype) if is_datetime64 or is_timedelta64: # force the dtype if needed if is_datetime64 and dtype != _NS_DTYPE: - raise TypeError("cannot convert datetimelike to dtype [%s]" % dtype) + raise TypeError( + "cannot convert datetimelike to dtype [%s]" % dtype) elif is_timedelta64 and dtype != _TD_DTYPE: - raise TypeError("cannot convert timedeltalike to dtype [%s]" % dtype) + raise TypeError( + "cannot convert timedeltalike to dtype [%s]" % dtype) if np.isscalar(value): if value == tslib.iNaT or isnull(value): @@ -1256,35 +1365,37 @@ def _possibly_cast_to_datetime(value, dtype, coerce = False): # don't change the value unless we find a datetime set v = value if not is_list_like(v): - v = [ v ] + v = [v] if len(v): inferred_type = lib.infer_dtype(v) - if inferred_type in ['datetime','datetime64']: + if inferred_type in ['datetime', 'datetime64']: try: value = tslib.array_to_datetime(np.array(v)) except: pass - elif inferred_type in ['timedelta','timedelta64']: + elif inferred_type in ['timedelta', 'timedelta64']: value = _possibly_cast_to_timedelta(value) return value def _is_bool_indexer(key): - if isinstance(key, np.ndarray) and key.dtype == np.object_: - key = np.asarray(key) - - if not lib.is_bool_array(key): - if isnull(key).any(): - raise ValueError('cannot index with vector containing ' - 'NA / NaN values') - return False - return True - elif isinstance(key, np.ndarray) and key.dtype == np.bool_: - return True + if isinstance(key, (ABCSeries, np.ndarray)): + if key.dtype == np.object_: + key = np.asarray(_values_from_object(key)) + + if len(key) and not lib.is_bool_array(key): + if isnull(key).any(): + raise ValueError('cannot index with vector containing ' + 'NA / NaN values') + return False + return True + elif key.dtype == np.bool_: + return True elif isinstance(key, list): try: - return np.asarray(key).dtype == np.bool_ + arr = np.asarray(key) + return arr.dtype == np.bool_ and len(arr) == len(key) except TypeError: # pragma: no cover return False @@ -1438,6 +1549,7 @@ def banner(message): bar = '=' * 80 return '%s\n%s\n%s' % (bar, message, bar) + def _long_prod(vals): result = long(1) for x in vals: @@ -1446,12 +1558,14 @@ def _long_prod(vals): class groupby(dict): + """ A simple groupby different from the one in itertools. Does not require the sequence elements to be sorted by keys, however it is slower. """ + def __init__(self, seq, key=lambda x: x): for value in seq: k = key(value) @@ -1618,9 +1732,11 @@ def is_timedelta64_dtype(arr_or_dtype): tipo = arr_or_dtype.dtype.type return issubclass(tipo, np.timedelta64) + def needs_i8_conversion(arr_or_dtype): return is_datetime64_dtype(arr_or_dtype) or is_timedelta64_dtype(arr_or_dtype) + def is_float_dtype(arr_or_dtype): if isinstance(arr_or_dtype, np.dtype): tipo = arr_or_dtype.type @@ -1628,6 +1744,7 @@ def is_float_dtype(arr_or_dtype): tipo = arr_or_dtype.dtype.type return issubclass(tipo, np.floating) + def is_complex_dtype(arr_or_dtype): if isinstance(arr_or_dtype, np.dtype): tipo = arr_or_dtype.type @@ -1652,6 +1769,7 @@ def is_re_compilable(obj): def is_list_like(arg): return hasattr(arg, '__iter__') and not isinstance(arg, compat.string_types) + def _is_sequence(x): try: iter(x) @@ -1671,7 +1789,8 @@ def _is_sequence(x): def _astype_nansafe(arr, dtype, copy=True): - """ return a view if copy is False """ + """ return a view if copy is False, but + need to be very careful as the result shape could change! """ if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) @@ -1681,7 +1800,8 @@ def _astype_nansafe(arr, dtype, copy=True): elif dtype == np.int64: return arr.view(dtype) elif dtype != _NS_DTYPE: - raise TypeError("cannot astype a datetimelike from [%s] to [%s]" % (arr.dtype,dtype)) + raise TypeError( + "cannot astype a datetimelike from [%s] to [%s]" % (arr.dtype, dtype)) return arr.astype(_NS_DTYPE) elif is_timedelta64_dtype(arr): if dtype == np.int64: @@ -1730,9 +1850,11 @@ def _all_none(*args): class UTF8Recoder: + """ Iterator that reads an encoded stream and reencodes the input to UTF-8 """ + def __init__(self, f, encoding): self.reader = codecs.getreader(encoding)(f) @@ -1785,6 +1907,7 @@ def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds): return csv.writer(f, dialect=dialect, **kwds) else: class UnicodeReader: + """ A CSV reader which will iterate over lines in the CSV file "f", which is encoded in the given encoding. @@ -1808,6 +1931,7 @@ def __iter__(self): # pragma: no cover return self class UnicodeWriter: + """ A CSV writer which will write rows to CSV file "f", which is encoded in the given encoding. @@ -1866,7 +1990,8 @@ def _concat_compat(to_concat, axis=0): to_concat = [x for x in to_concat if x.shape[axis] > 0] # return the empty np array, if nothing to concatenate, #3121 - if not to_concat: return np.array([], dtype=object) + if not to_concat: + return np.array([], dtype=object) is_datetime64 = [x.dtype == _NS_DTYPE for x in to_concat] if all(is_datetime64): @@ -1888,25 +2013,28 @@ def _to_pydatetime(x): return x + def _where_compat(mask, arr1, arr2): if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE: - new_vals = np.where(mask, arr1.view(np.int64), arr2.view(np.int64)) + new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8')) return new_vals.view(_NS_DTYPE) import pandas.tslib as tslib if arr1.dtype == _NS_DTYPE: - arr1 = tslib.ints_to_pydatetime(arr1.view(np.int64)) + arr1 = tslib.ints_to_pydatetime(arr1.view('i8')) if arr2.dtype == _NS_DTYPE: - arr2 = tslib.ints_to_pydatetime(arr2.view(np.int64)) + arr2 = tslib.ints_to_pydatetime(arr2.view('i8')) return np.where(mask, arr1, arr2) + def sentinal_factory(): class Sentinal(object): pass return Sentinal() + def in_interactive_session(): """ check if we're running in an interactive shell @@ -1929,28 +2057,30 @@ def in_qtconsole(): """ try: ip = get_ipython() - front_end = (ip.config.get('KernelApp',{}).get('parent_appname',"") or - ip.config.get('IPKernelApp',{}).get('parent_appname',"")) + front_end = (ip.config.get('KernelApp', {}).get('parent_appname', "") or + ip.config.get('IPKernelApp', {}).get('parent_appname', "")) if 'qtconsole' in front_end.lower(): return True except: return False return False + def in_ipnb(): """ check if we're inside an IPython Notebook """ try: ip = get_ipython() - front_end = (ip.config.get('KernelApp',{}).get('parent_appname',"") or - ip.config.get('IPKernelApp',{}).get('parent_appname',"")) + front_end = (ip.config.get('KernelApp', {}).get('parent_appname', "") or + ip.config.get('IPKernelApp', {}).get('parent_appname', "")) if 'notebook' in front_end.lower(): return True except: return False return False + def in_ipython_frontend(): """ check if we're inside an an IPython zmq frontend @@ -2008,19 +2138,19 @@ def _pprint_seq(seq, _nest_lvl=0, **kwds): s = iter(seq) r = [] - for i in range(min(nitems,len(seq))): # handle sets, no slicing + for i in range(min(nitems, len(seq))): # handle sets, no slicing r.append(pprint_thing(next(s), _nest_lvl + 1, **kwds)) body = ", ".join(r) if nitems < len(seq): - body+= ", ..." - elif isinstance(seq,tuple) and len(seq) == 1: + body += ", ..." + elif isinstance(seq, tuple) and len(seq) == 1: body += ',' return fmt % body -def _pprint_dict(seq, _nest_lvl=0,**kwds): +def _pprint_dict(seq, _nest_lvl=0, **kwds): """ internal. pprinter for iterables. you should probably use pprint_thing() rather then calling this directly. @@ -2068,10 +2198,10 @@ def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False, result - unicode object on py2, str on py3. Always Unicode. """ - def as_escaped_unicode(thing,escape_chars=escape_chars): + def as_escaped_unicode(thing, escape_chars=escape_chars): # Unicode is fine, else we try to decode using utf-8 and 'replace' # if that's not it either, we have no way of knowing and the user - #should deal with it himself. + # should deal with it himself. try: result = compat.text_type(thing) # we should try this first @@ -2100,7 +2230,7 @@ def as_escaped_unicode(thing,escape_chars=escape_chars): return compat.text_type(thing) elif (isinstance(thing, dict) and _nest_lvl < get_option("display.pprint_nest_depth")): - result = _pprint_dict(thing, _nest_lvl,quote_strings=True) + result = _pprint_dict(thing, _nest_lvl, quote_strings=True) elif _is_sequence(thing) and _nest_lvl < \ get_option("display.pprint_nest_depth"): result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars, @@ -2133,6 +2263,7 @@ def console_encode(object, **kwds): return pprint_thing_encoded(object, get_option("display.encoding")) + def load(path): # TODO remove in 0.13 """ Load pickled pandas object (or any other pickled object) from the specified @@ -2155,6 +2286,7 @@ def load(path): # TODO remove in 0.13 from pandas.io.pickle import read_pickle return read_pickle(path) + def save(obj, path): # TODO remove in 0.13 ''' Pickle (serialize) object to input file path diff --git a/pandas/core/expressions.py b/pandas/core/expressions.py index 27c06e23b5a9e..b1bd104ce48a5 100644 --- a/pandas/core/expressions.py +++ b/pandas/core/expressions.py @@ -6,6 +6,7 @@ """ import numpy as np +from pandas.core.common import _values_from_object try: import numexpr as ne @@ -58,7 +59,7 @@ def _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs): def _can_use_numexpr(op, op_str, a, b, dtype_check): """ return a boolean if we WILL be using numexpr """ if op_str is not None: - + # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: @@ -89,9 +90,9 @@ def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False, **eval_kwargs): a_value = a_value.values if hasattr(b_value,'values'): b_value = b_value.values - result = ne.evaluate('a_value %s b_value' % op_str, - local_dict={ 'a_value' : a_value, - 'b_value' : b_value }, + result = ne.evaluate('a_value %s b_value' % op_str, + local_dict={ 'a_value' : a_value, + 'b_value' : b_value }, casting='safe', **eval_kwargs) except (ValueError) as detail: if 'unknown type object' in str(detail): @@ -105,8 +106,8 @@ def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False, **eval_kwargs): return result -def _where_standard(cond, a, b, raise_on_error=True): - return np.where(cond, a, b) +def _where_standard(cond, a, b, raise_on_error=True): + return np.where(_values_from_object(cond), _values_from_object(a), _values_from_object(b)) def _where_numexpr(cond, a, b, raise_on_error = False): result = None @@ -123,8 +124,8 @@ def _where_numexpr(cond, a, b, raise_on_error = False): b_value = b_value.values result = ne.evaluate('where(cond_value,a_value,b_value)', local_dict={ 'cond_value' : cond_value, - 'a_value' : a_value, - 'b_value' : b_value }, + 'a_value' : a_value, + 'b_value' : b_value }, casting='safe') except (ValueError) as detail: if 'unknown type object' in str(detail): diff --git a/pandas/core/format.py b/pandas/core/format.py index 30856d371c084..e84916009bbe1 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -388,7 +388,7 @@ def write(buf, frame, column_format, strcols): def _format_col(self, i): formatter = self._get_formatter(i) - return format_array(self.frame.icol(i).values, formatter, + return format_array(self.frame.icol(i).get_values(), formatter, float_format=self.float_format, na_rep=self.na_rep, space=self.col_space) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 20a2dab06368b..200e4ce9322fd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -25,7 +25,7 @@ from pandas.core.common import (isnull, notnull, PandasError, _try_sort, _default_index, _maybe_upcast, _is_sequence, - _infer_dtype_from_scalar) + _infer_dtype_from_scalar, _values_from_object) from pandas.core.generic import NDFrame from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (_NDFrameIndexer, _maybe_droplevels, @@ -36,6 +36,7 @@ create_block_manager_from_blocks) from pandas.core.series import Series, _radd_compat import pandas.core.expressions as expressions +from pandas.sparse.array import SparseArray from pandas.compat.scipy import scoreatpercentile as _quantile from pandas import compat from pandas.util.terminal import get_terminal_size @@ -189,16 +190,17 @@ class DataConflictError(Exception): # Factory helper methods -def _arith_method(op, name, str_rep = None, default_axis='columns', fill_zeros=None, **eval_kwargs): +def _arith_method(op, name, str_rep=None, default_axis='columns', fill_zeros=None, **eval_kwargs): def na_op(x, y): try: - result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True, **eval_kwargs) - result = com._fill_zeros(result,y,fill_zeros) + result = expressions.evaluate( + op, str_rep, x, y, raise_on_error=True, **eval_kwargs) + result = com._fill_zeros(result, y, fill_zeros) except TypeError: xrav = x.ravel() result = np.empty(x.size, dtype=x.dtype) - if isinstance(y, np.ndarray): + if isinstance(y, (np.ndarray, Series)): yrav = y.ravel() mask = notnull(xrav) & notnull(yrav) result[mask] = op(xrav[mask], yrav[mask]) @@ -206,7 +208,7 @@ def na_op(x, y): mask = notnull(xrav) result[mask] = op(xrav[mask], y) - result, changed = com._maybe_upcast_putmask(result,-mask,np.nan) + result, changed = com._maybe_upcast_putmask(result, -mask, np.nan) result = result.reshape(x.shape) return result @@ -245,7 +247,7 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): return f -def _flex_comp_method(op, name, str_rep = None, default_axis='columns'): +def _flex_comp_method(op, name, str_rep=None, default_axis='columns'): def na_op(x, y): try: @@ -253,7 +255,7 @@ def na_op(x, y): except TypeError: xrav = x.ravel() result = np.empty(x.size, dtype=x.dtype) - if isinstance(y, np.ndarray): + if isinstance(y, (np.ndarray, Series)): yrav = y.ravel() mask = notnull(xrav) & notnull(yrav) result[mask] = op(np.array(list(xrav[mask])), @@ -323,7 +325,7 @@ def f(self, other): # straight boolean comparisions we want to allow all columns # (regardless of dtype to pass thru) - return self._combine_const(other, func, raise_on_error = False).fillna(True).astype(bool) + return self._combine_const(other, func, raise_on_error=False).fillna(True).astype(bool) f.__name__ = name @@ -335,6 +337,7 @@ def f(self, other): class DataFrame(NDFrame): + """ Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like @@ -371,16 +374,13 @@ class DataFrame(NDFrame): read_csv / read_table / read_clipboard """ _auto_consolidate = True - _het_axis = 1 - _info_axis = 'columns' - _col_klass = Series + _verbose_info = True - _AXIS_NUMBERS = { - 'index': 0, - 'columns': 1 - } + @property + def _constructor(self): + return DataFrame - _AXIS_NAMES = dict((v, k) for k, v in compat.iteritems(_AXIS_NUMBERS)) + _constructor_sliced = Series def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): @@ -391,7 +391,8 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, data = data._data if isinstance(data, BlockManager): - mgr = self._init_mgr(data, index, columns, dtype=dtype, copy=copy) + mgr = self._init_mgr( + data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = self._init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): @@ -403,12 +404,14 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, data = data.copy() mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) - elif isinstance(data, np.ndarray): + elif isinstance(data, (np.ndarray, Series)): if data.dtype.names: data_columns, data = _rec_to_dict(data) if columns is None: columns = data_columns mgr = self._init_dict(data, index, columns, dtype=dtype) + elif getattr(data,'name',None): + mgr = self._init_dict({ data.name : data }, index, columns, dtype=dtype) else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) @@ -451,30 +454,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, else: raise PandasError('DataFrame constructor not properly called!') - NDFrame.__init__(self, mgr) - - @classmethod - def _from_axes(cls, data, axes): - # for construction from BlockManager - if isinstance(data, BlockManager): - return cls(data) - else: - columns, index = axes - return cls(data, index=index, columns=columns, copy=False) - - def _init_mgr(self, mgr, index, columns, dtype=None, copy=False): - if columns is not None: - mgr = mgr.reindex_axis(columns, axis=0, copy=False) - if index is not None: - mgr = mgr.reindex_axis(index, axis=1, copy=False) - # do not copy BlockManager unless explicitly done - if copy and dtype is None: - mgr = mgr.copy() - elif dtype is not None: - # avoid copy if we can - if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype: - mgr = mgr.astype(dtype) - return mgr + NDFrame.__init__(self, mgr, fastpath=True) def _init_dict(self, data, index, columns, dtype=None): """ @@ -506,7 +486,7 @@ def _init_dict(self, data, index, columns, dtype=None): continue if dtype is None: - # #1783 + # 1783 v = np.empty(len(index), dtype=object) else: v = np.empty(len(index), dtype=dtype) @@ -536,6 +516,10 @@ def _init_ndarray(self, values, index, columns, dtype=None, else: values = values.reindex(index) + # zero len case (GH #2234) + if not len(values) and len(columns): + values = np.empty((0, 1), dtype=object) + values = _prep_ndarray(values, copy=copy) if dtype is not None: @@ -557,11 +541,7 @@ def _init_ndarray(self, values, index, columns, dtype=None, else: columns = _ensure_index(columns) - return create_block_manager_from_blocks([ values.T ], [ columns, index ]) - - def _wrap_array(self, arr, axes, copy=False): - index, columns = axes - return self._constructor(arr, index=index, columns=columns, copy=copy) + return create_block_manager_from_blocks([values.T], [columns, index]) @property def _verbose_info(self): @@ -569,14 +549,6 @@ def _verbose_info(self): '0.13. please use "max_info_rows"', FutureWarning) return get_option('display.max_info_rows') is None - @_verbose_info.setter - def _verbose_info(self, value): - warnings.warn('The _verbose_info property will be removed in version ' - '0.13. please use "max_info_rows"', FutureWarning) - - value = None if value else 1000000 - set_option('display.max_info_rows', value) - @property def axes(self): return [self.index, self.columns] @@ -585,8 +557,6 @@ def axes(self): def shape(self): return (len(self.index), len(self.columns)) - #---------------------------------------------------------------------- - # Class behavior def _repr_fits_vertical_(self): """ Check length against max_rows. @@ -594,7 +564,7 @@ def _repr_fits_vertical_(self): max_rows = get_option("display.max_rows") return len(self) <= max_rows - def _repr_fits_horizontal_(self,ignore_width=False): + def _repr_fits_horizontal_(self, ignore_width=False): """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case off non-interactive session, no @@ -611,15 +581,15 @@ def _repr_fits_horizontal_(self,ignore_width=False): # exceed max columns if ((max_columns and nb_columns > max_columns) or - ((not ignore_width) and width and nb_columns > (width // 2))): + ((not ignore_width) and width and nb_columns > (width // 2))): return False if (ignore_width # used by repr_html under IPython notebook - or not com.in_interactive_session()): # scripts ignore terminal dims + or not com.in_interactive_session()): # scripts ignore terminal dims return True if (get_option('display.width') is not None or - com.in_ipython_frontend()): + com.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: @@ -634,9 +604,9 @@ def _repr_fits_horizontal_(self,ignore_width=False): # and to_string on entire frame may be expensive d = self - if not (max_rows is None): # unlimited rows + if not (max_rows is None): # unlimited rows # min of two, where one may be None - d=d.iloc[:min(max_rows,len(d))] + d = d.iloc[:min(max_rows, len(d))] else: return True @@ -671,7 +641,7 @@ def __unicode__(self): # of terminal, then use expand_repr if (fits_vertical and expand_repr and - len(self.columns) <= max_columns): + len(self.columns) <= max_columns): self.to_string(buf=buf, line_width=width) else: max_info_rows = get_option('display.max_info_rows') @@ -705,7 +675,8 @@ def _repr_html_(self): fits_vertical = self._repr_fits_vertical_() fits_horizontal = False if fits_vertical: - fits_horizontal = self._repr_fits_horizontal_(ignore_width=ipnbh) + fits_horizontal = self._repr_fits_horizontal_( + ignore_width=ipnbh) if fits_horizontal and fits_vertical: return ('<div style="max-height:1000px;' @@ -725,15 +696,6 @@ def _repr_html_(self): else: return None - def __iter__(self): - """ - Iterate over columns of the frame. - """ - return iter(self.columns) - - def keys(self): - return self.columns - def iteritems(self): """Iterator over (column, series) pairs""" if self.columns.is_unique and hasattr(self, '_item_cache'): @@ -767,9 +729,7 @@ def iterrows(self): """ columns = self.columns for k, v in zip(self.index, self.values): - s = v.view(Series) - s.index = columns - s.name = k + s = Series(v, index=columns, name=k) yield k, s def itertuples(self, index=True): @@ -789,13 +749,9 @@ def itertuples(self, index=True): items = iteritems def __len__(self): - """Returns length of index""" + """Returns length of info axis, but here we use the index """ return len(self.index) - def __contains__(self, key): - """True if DataFrame has this column""" - return key in self.columns - #---------------------------------------------------------------------- # Arithmetic methods @@ -827,7 +783,8 @@ def __contains__(self, key): # currently causes a floating point exception to occur - so sticking with unaccelerated for now # __mod__ = _arith_method(operator.mod, '__mod__', '%', default_axis=None, fill_zeros=np.nan) - __mod__ = _arith_method(operator.mod, '__mod__', default_axis=None, fill_zeros=np.nan) + __mod__ = _arith_method( + operator.mod, '__mod__', default_axis=None, fill_zeros=np.nan) __radd__ = _arith_method(_radd_compat, '__radd__', default_axis=None) __rmul__ = _arith_method(operator.mul, '__rmul__', default_axis=None) @@ -853,19 +810,11 @@ def __contains__(self, key): __rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__', default_axis=None, fill_zeros=np.inf) - def __neg__(self): - arr = operator.neg(self.values) - return self._wrap_array(arr, self.axes, copy=False) - - def __invert__(self): - arr = operator.inv(self.values) - return self._wrap_array(arr, self.axes, copy=False) - # Comparison methods __eq__ = _comp_method(operator.eq, '__eq__', '==') __ne__ = _comp_method(operator.ne, '__ne__', '!=') - __lt__ = _comp_method(operator.lt, '__lt__', '<' ) - __gt__ = _comp_method(operator.gt, '__gt__', '>' ) + __lt__ = _comp_method(operator.lt, '__lt__', '<') + __gt__ = _comp_method(operator.gt, '__gt__', '>') __le__ = _comp_method(operator.le, '__le__', '<=') __ge__ = _comp_method(operator.ge, '__ge__', '>=') @@ -1062,9 +1011,11 @@ def from_records(cls, data, index=None, exclude=None, columns=None, # reorder according to the columns if len(columns) and len(arr_columns): - indexer = _ensure_index(arr_columns).get_indexer(columns) - arr_columns = _ensure_index([ arr_columns[i] for i in indexer ]) - arrays = [ arrays[i] for i in indexer ] + indexer = _ensure_index( + arr_columns).get_indexer(columns) + arr_columns = _ensure_index( + [arr_columns[i] for i in indexer]) + arrays = [arrays[i] for i in indexer] elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = _to_arrays(data, columns) @@ -1147,7 +1098,7 @@ def to_records(self, index=True, convert_datetime64=True): else: ix_vals = [self.index.values] - arrays = ix_vals+ [self[c].values for c in self.columns] + arrays = ix_vals + [self[c].values for c in self.columns] count = 0 index_names = list(self.index.names) @@ -1260,7 +1211,7 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, from pandas.io.parsers import read_table return read_table(path, header=header, sep=sep, parse_dates=parse_dates, index_col=index_col, - encoding=encoding,tupleize_cols=False) + encoding=encoding, tupleize_cols=False) def to_sparse(self, fill_value=None, kind='block'): """ @@ -1321,7 +1272,7 @@ def to_panel(self): new_blocks = [] for block in selfsorted._data.blocks: newb = block2d_to_blocknd(block.values.T, block.items, shape, - [ major_labels, minor_labels ], + [major_labels, minor_labels], ref_items=selfsorted.columns) new_blocks.append(newb) @@ -1395,11 +1346,12 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None, formatter = fmt.CSVFormatter(self, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, - quoting=quoting,na_rep=na_rep, + quoting=quoting, na_rep=na_rep, float_format=float_format, cols=cols, header=header, index=index, - index_label=index_label,mode=mode, - chunksize=chunksize,engine=kwds.get("engine"), + index_label=index_label, mode=mode, + chunksize=chunksize, engine=kwds.get( + "engine"), tupleize_cols=tupleize_cols) formatter.save() @@ -1463,8 +1415,9 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='', if need_save: excel_writer.save() - def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-1", - byteorder=None): + def to_stata( + self, fname, convert_dates=None, write_index=True, encoding="latin-1", + byteorder=None): """ A class for writing Stata binary dta files from array-like objects @@ -1494,7 +1447,8 @@ def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin- >>> writer.write_file() """ from pandas.io.stata import StataWriter - writer = StataWriter(fname,self,convert_dates=convert_dates, encoding=encoding, byteorder=byteorder) + writer = StataWriter( + fname, self, convert_dates=convert_dates, encoding=encoding, byteorder=byteorder) writer.write_file() def to_sql(self, name, con, flavor='sqlite', if_exists='fail', **kwargs): @@ -1512,7 +1466,8 @@ def to_sql(self, name, con, flavor='sqlite', if_exists='fail', **kwargs): append: If table exists, insert data. Create if does not exist. """ from pandas.io.sql import write_frame - write_frame(self, name, con, flavor=flavor, if_exists=if_exists, **kwargs) + write_frame( + self, name, con, flavor=flavor, if_exists=if_exists, **kwargs) @Appender(fmt.docstring_to_string, indents=1) def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, @@ -1662,10 +1617,12 @@ def info(self, verbose=True, buf=None, max_cols=None): # hack if max_cols is None: - max_cols = get_option('display.max_info_columns',len(self.columns)+1) + max_cols = get_option( + 'display.max_info_columns', len(self.columns) + 1) if verbose and len(self.columns) <= max_cols: - lines.append('Data columns (total %d columns):' % len(self.columns)) + lines.append('Data columns (total %d columns):' % + len(self.columns)) space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4 counts = self.count() if len(cols) != len(counts): @@ -1684,135 +1641,20 @@ def info(self, verbose=True, buf=None, max_cols=None): @property def dtypes(self): - return self.apply(lambda x: x.dtype) - - def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True): - """ - Attempt to infer better dtype for object columns - - Parameters - ---------- - convert_dates : if True, attempt to soft convert_dates, if 'coerce', force conversion (and non-convertibles get NaT) - convert_numeric : if True attempt to coerce to numerbers (including strings), non-convertibles get NaN - copy : boolean, return a copy if True (True by default) - - Returns - ------- - converted : DataFrame - """ - return self._constructor(self._data.convert(convert_dates=convert_dates, - convert_numeric=convert_numeric, - copy=copy)) - - #---------------------------------------------------------------------- - # properties for index and columns - - columns = lib.AxisProperty(0) - index = lib.AxisProperty(1) - - def as_matrix(self, columns=None): - """ - Convert the frame to its Numpy-array matrix representation. Columns - are presented in sorted order unless a specific list of columns is - provided. - - NOTE: the dtype will be a lower-common-denominator dtype (implicit upcasting) - that is to say if the dtypes (even of numeric types) are mixed, the one that accomodates all will be chosen - use this with care if you are not dealing with the blocks - - e.g. if the dtypes are float16,float32 -> float32 - float16,float32,float64 -> float64 - int32,uint8 -> int32 - - Parameters - ---------- - columns : array-like - Specific column order + return self.apply(lambda x: x.dtype, reduce=False) - Returns - ------- - values : ndarray - If the DataFrame is heterogeneous and contains booleans or objects, - the result will be of dtype=object - """ - self._consolidate_inplace() - return self._data.as_matrix(columns).T - - values = property(fget=as_matrix) - - def as_blocks(self, columns=None): - """ - Convert the frame to a dict of dtype -> DataFrames that each has a homogeneous dtype. - are presented in sorted order unless a specific list of columns is - provided. - - NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) - - Parameters - ---------- - columns : array-like - Specific column order - - Returns - ------- - values : a list of DataFrames - """ - self._consolidate_inplace() - - bd = dict() - for b in self._data.blocks: - b = b.reindex_items_from(columns or b.items) - bd[str(b.dtype)] = DataFrame(BlockManager([ b ], [ b.items, self.index ])) - return bd - - blocks = property(fget=as_blocks) + @property + def ftypes(self): + return self.apply(lambda x: x.ftype, reduce=False) def transpose(self): - """ - Returns a DataFrame with the rows/columns switched. If the DataFrame is - homogeneously-typed, the data is not copied - """ - return self._constructor(data=self.values.T, index=self.columns, - columns=self.index, copy=False) + return super(DataFrame, self).transpose(1, 0) T = property(transpose) - def swapaxes(self, i, j): - """ - Like ndarray.swapaxes, equivalent to transpose - - Returns - ------- - swapped : DataFrame - View on original data (no copy) - """ - if i in (0, 1) and j in (0, 1): - if i == j: - return self - return self._constructor(data=self.values.T, index=self.columns, - columns=self.index, copy=False) - else: - raise ValueError('Axis numbers must be in (0, 1)') - #---------------------------------------------------------------------- # Picklability - def __getstate__(self): - return self._data - - def __setstate__(self, state): - # old DataFrame pickle - if isinstance(state, BlockManager): - self._data = state - elif isinstance(state[0], dict): # pragma: no cover - self._unpickle_frame_compat(state) - else: # pragma: no cover - # old pickling format, for compatibility - self._unpickle_matrix_compat(state) - - # ordinarily created in NDFrame - self._item_cache = {} - # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover from pandas.core.common import _unpickle_array @@ -1846,15 +1688,6 @@ def _unpickle_matrix_compat(self, state): # pragma: no cover self._data = dm._data #---------------------------------------------------------------------- - # Array interface - - def __array__(self, dtype=None): - return self.values - - def __array_wrap__(self, result): - return self._constructor(result, index=self.index, - columns=self.columns, copy=False) - #---------------------------------------------------------------------- # Getting and setting elements @@ -1873,7 +1706,7 @@ def get_value(self, index, col): """ series = self._get_item_cache(col) engine = self.index._engine - return engine.get_value(series, index) + return engine.get_value(series.values, index) def set_value(self, index, col, value): """ @@ -1894,7 +1727,7 @@ def set_value(self, index, col, value): try: series = self._get_item_cache(col) engine = self.index._engine - engine.set_value(series, index, value) + engine.set_value(series.values, index, value) return self except KeyError: new_index, new_columns = self._expand_axes((index, col)) @@ -1911,10 +1744,10 @@ def set_value(self, index, col, value): return result.set_value(index, col, value) def irow(self, i, copy=False): - return self._ixs(i,axis=0) + return self._ixs(i, axis=0) def icol(self, i): - return self._ixs(i,axis=1) + return self._ixs(i, axis=1) def _ixs(self, i, axis=0, copy=False): """ @@ -1968,11 +1801,12 @@ def _ixs(self, i, axis=0, copy=False): return self.take(i, axis=1, convert=True) values = self._data.iget(i) - return self._col_klass.from_array(values, index=self.index, - name=label) + return self._constructor_sliced.from_array( + values, index=self.index, + name=label, fastpath=True) def iget_value(self, i, j): - return self.iat[i,j] + return self.iat[i, j] def __getitem__(self, key): @@ -1981,7 +1815,7 @@ def __getitem__(self, key): if indexer is not None: return self._getitem_slice(indexer) - if isinstance(key, (np.ndarray, list)): + if isinstance(key, (Series, np.ndarray, list)): # either boolean or fancy integer index return self._getitem_array(key) elif isinstance(key, DataFrame): @@ -2023,7 +1857,7 @@ def _getitem_array(self, key): def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) - if isinstance(loc, (slice, np.ndarray)): + if isinstance(loc, (slice, Series, np.ndarray)): new_columns = self.columns[loc] result_columns = _maybe_droplevels(new_columns, key) if self._is_mixed_type: @@ -2051,15 +1885,9 @@ def _getitem_frame(self, key): return self.where(key) def _slice(self, slobj, axis=0, raise_on_error=False): - if axis == 0: - mgr_axis = 1 - else: - mgr_axis = 0 - - self._consolidate_inplace() - new_data = self._data.get_slice(slobj, axis=mgr_axis, - raise_on_error=raise_on_error) - + axis = self._get_block_manager_axis(axis) + new_data = self._data.get_slice( + slobj, axis=axis, raise_on_error=raise_on_error) return self._constructor(new_data) def _box_item_values(self, key, values): @@ -2067,32 +1895,11 @@ def _box_item_values(self, key, values): if values.ndim == 2: return self._constructor(values.T, columns=items, index=self.index) else: - return Series.from_array(values, index=self.index, name=items) - - def __getattr__(self, name): - """After regular attribute access, try looking up the name of a column. - This allows simpler access to columns for interactive use.""" - if name in self.columns: - return self[name] - raise AttributeError("'%s' object has no attribute '%s'" % - (type(self).__name__, name)) - - def __setattr__(self, name, value): - """After regular attribute access, try looking up the name of a column. - This allows simpler access to columns for interactive use.""" - if name == '_data': - super(DataFrame, self).__setattr__(name, value) - else: - try: - existing = getattr(self, name) - if isinstance(existing, Index): - super(DataFrame, self).__setattr__(name, value) - elif name in self.columns: - self[name] = value - else: - object.__setattr__(self, name, value) - except (AttributeError, TypeError): - object.__setattr__(self, name, value) + return self._box_col_values(values, items) + + def _box_col_values(self, values, items): + """ provide boxed values for a column """ + return self._constructor_sliced.from_array(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): # see if we can slice the rows @@ -2100,7 +1907,7 @@ def __setitem__(self, key, value): if indexer is not None: return self._setitem_slice(indexer, value) - if isinstance(key, (np.ndarray, list)): + if isinstance(key, (Series, np.ndarray, list)): self._setitem_array(key, value) elif isinstance(key, DataFrame): self._setitem_frame(key, value) @@ -2138,7 +1945,8 @@ def _setitem_frame(self, key, value): if self._is_mixed_type: if not self._is_numeric_mixed_type: - raise ValueError('Cannot do boolean setting on mixed-type frame') + raise ValueError( + 'Cannot do boolean setting on mixed-type frame') self.where(-key, value, inplace=True) @@ -2168,7 +1976,8 @@ def insert(self, loc, column, value, allow_duplicates=False): value : int, Series, or array-like """ value = self._sanitize_column(column, value) - self._data.insert(loc, column, value, allow_duplicates=allow_duplicates) + self._data.insert( + loc, column, value, allow_duplicates=allow_duplicates) def _sanitize_column(self, key, value): # Need to make sure new columns (which go into the BlockManager as new @@ -2231,22 +2040,12 @@ def _sanitize_column(self, key, value): else: # upcast the scalar dtype, value = _infer_dtype_from_scalar(value) - value = np.array(np.repeat(value, len(self.index)), dtype=dtype) + value = np.array( + np.repeat(value, len(self.index)), dtype=dtype) value = com._possibly_cast_to_datetime(value, dtype) return np.atleast_2d(np.asarray(value)) - def pop(self, item): - """ - Return column and drop from frame. Raise KeyError if not found. - - Returns - ------- - column : Series - """ - return NDFrame.pop(self, item) - - # to support old APIs @property def _series(self): return self._data.get_series_dict() @@ -2438,237 +2237,42 @@ def lookup(self, row_labels, col_labels): #---------------------------------------------------------------------- # Reindexing and alignment - def align(self, other, join='outer', axis=None, level=None, copy=True, - fill_value=NA, method=None, limit=None, fill_axis=0): - """ - Align two DataFrame object on their index and columns with the - specified join method for each axis Index - - Parameters - ---------- - other : DataFrame or Series - join : {'outer', 'inner', 'left', 'right'}, default 'outer' - axis : {0, 1, None}, default None - Align on index (0), columns (1), or both (None) - level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - copy : boolean, default True - Always returns new objects. If copy=False and no reindexing is - required then original objects are returned. - fill_value : scalar, default np.NaN - Value to use for missing values. Defaults to NaN, but can be any - "compatible" value - method : str, default None - limit : int, default None - fill_axis : {0, 1}, default 0 - Filling axis, method and limit - - Returns - ------- - (left, right) : (DataFrame, type of other) - Aligned objects - """ - if axis is not None: - axis = self._get_axis_number(axis) - if isinstance(other, DataFrame): - return self._align_frame(other, join=join, axis=axis, level=level, - copy=copy, fill_value=fill_value, - method=method, limit=limit, - fill_axis=fill_axis) - elif isinstance(other, Series): - return self._align_series(other, join=join, axis=axis, level=level, - copy=copy, fill_value=fill_value, - method=method, limit=limit, - fill_axis=fill_axis) - else: # pragma: no cover - raise TypeError('unsupported type: %s' % type(other)) - - def _align_frame(self, other, join='outer', axis=None, level=None, - copy=True, fill_value=NA, method=None, limit=None, - fill_axis=0): - # defaults - join_index, join_columns = None, None - ilidx, iridx = None, None - clidx, cridx = None, None - - if axis is None or axis == 0: - if not self.index.equals(other.index): - join_index, ilidx, iridx = \ - self.index.join(other.index, how=join, level=level, - return_indexers=True) - - if axis is None or axis == 1: - if not self.columns.equals(other.columns): - join_columns, clidx, cridx = \ - self.columns.join(other.columns, how=join, level=level, - return_indexers=True) - - left = self._reindex_with_indexers(join_index, ilidx, - join_columns, clidx, copy, - fill_value=fill_value) - right = other._reindex_with_indexers(join_index, iridx, - join_columns, cridx, copy, - fill_value=fill_value) - - if method is not None: - left = left.fillna(axis=fill_axis, method=method, limit=limit) - right = right.fillna(axis=fill_axis, method=method, limit=limit) - - return left, right - - def _align_series(self, other, join='outer', axis=None, level=None, - copy=True, fill_value=None, method=None, limit=None, - fill_axis=0): - fdata = self._data - if axis == 0: - join_index = self.index - lidx, ridx = None, None - if not self.index.equals(other.index): - join_index, lidx, ridx = self.index.join(other.index, how=join, - return_indexers=True) - - if lidx is not None: - fdata = fdata.reindex_indexer(join_index, lidx, axis=1) - elif axis == 1: - join_index = self.columns - lidx, ridx = None, None - if not self.columns.equals(other.index): - join_index, lidx, ridx = \ - self.columns.join(other.index, how=join, - return_indexers=True) - - if lidx is not None: - fdata = fdata.reindex_indexer(join_index, lidx, axis=0) - else: - raise ValueError('Must specify axis=0 or 1') - - if copy and fdata is self._data: - fdata = fdata.copy() - - left_result = DataFrame(fdata) - right_result = other if ridx is None else other.reindex(join_index) - - fill_na = notnull(fill_value) or (method is not None) - if fill_na: - return (left_result.fillna(fill_value, method=method, limit=limit, - axis=fill_axis), - right_result.fillna(fill_value, method=method, - limit=limit)) - else: - return left_result, right_result - - def reindex(self, index=None, columns=None, method=None, level=None, - fill_value=NA, limit=None, copy=True, takeable=False): - """Conform DataFrame to new index with optional filling logic, placing - NA/NaN in locations having no value in the previous index. A new object - is produced unless the new index is equivalent to the current one and - copy=False - - Parameters - ---------- - index : array-like, optional - New labels / index to conform to. Preferably an Index object to - avoid duplicating data - columns : array-like, optional - Same usage as index argument - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed DataFrame - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - copy : boolean, default True - Return a new object, even if the passed indexes are the same - level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - fill_value : scalar, default np.NaN - Value to use for missing values. Defaults to NaN, but can be any - "compatible" value - limit : int, default None - Maximum size gap to forward or backward fill - takeable : the labels are locations (and not labels) - - Examples - -------- - >>> df.reindex(index=[date1, date2, date3], columns=['A', 'B', 'C']) - - Returns - ------- - reindexed : same type as calling instance - """ - self._consolidate_inplace() + def _reindex_axes(self, axes, level, limit, method, fill_value, copy, takeable=False): frame = self - if (index is not None and columns is not None - and method is None and level is None - and not self._is_mixed_type): - return self._reindex_multi(index, columns, copy, fill_value) - + columns = axes['columns'] if columns is not None: frame = frame._reindex_columns(columns, copy, level, - fill_value, limit, takeable) + fill_value, limit, takeable=takeable) + index = axes['index'] if index is not None: frame = frame._reindex_index(index, method, copy, level, - fill_value, limit, takeable) + fill_value, limit, takeable=takeable) return frame - def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, - limit=None, fill_value=NA): - """Conform DataFrame to new index with optional filling logic, placing - NA/NaN in locations having no value in the previous index. A new object - is produced unless the new index is equivalent to the current one and - copy=False + def _reindex_index(self, new_index, method, copy, level, fill_value=NA, + limit=None, takeable=False): + new_index, indexer = self.index.reindex(new_index, method, level, + limit=limit, copy_if_needed=True, + takeable=takeable) + return self._reindex_with_indexers({0: [new_index, indexer]}, + copy=copy, fill_value=fill_value) - Parameters - ---------- - index : array-like, optional - New labels / index to conform to. Preferably an Index object to - avoid duplicating data - axis : {0, 1} - 0 -> index (rows) - 1 -> columns - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed DataFrame - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - copy : boolean, default True - Return a new object, even if the passed indexes are the same - level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - limit : int, default None - Maximum size gap to forward or backward fill + def _reindex_columns(self, new_columns, copy, level, fill_value=NA, + limit=None, takeable=False): + new_columns, indexer = self.columns.reindex(new_columns, level=level, + limit=limit, copy_if_needed=True, + takeable=takeable) + return self._reindex_with_indexers({1: [new_columns, indexer]}, + copy=copy, fill_value=fill_value) - Examples - -------- - >>> df.reindex_axis(['A', 'B', 'C'], axis=1) + def _reindex_multi(self, axes, copy, fill_value): + """ we are guaranteed non-Nones in the axes! """ - See also - -------- - DataFrame.reindex, DataFrame.reindex_like - - Returns - ------- - reindexed : same type as calling instance - """ - self._consolidate_inplace() - axis = self._get_axis_number(axis) - if axis == 0: - return self._reindex_index(labels, method, copy, level, - fill_value=fill_value, - limit=limit) - elif axis == 1: - return self._reindex_columns(labels, copy, level, - fill_value=fill_value, - limit=limit) - else: # pragma: no cover - raise ValueError('Must specify axis=0 or 1') - - def _reindex_multi(self, new_index, new_columns, copy, fill_value): - new_index, row_indexer = self.index.reindex(new_index) - new_columns, col_indexer = self.columns.reindex(new_columns) + new_index, row_indexer = self.index.reindex(axes['index']) + new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer @@ -2677,56 +2281,12 @@ def _reindex_multi(self, new_index, new_columns, copy, fill_value): return self._constructor(new_values, index=new_index, columns=new_columns) elif row_indexer is not None: - return self._reindex_with_indexers(new_index, row_indexer, - None, None, copy, fill_value) + return self._reindex_with_indexers({0: [new_index, row_indexer]}, copy=copy, fill_value=fill_value) elif col_indexer is not None: - return self._reindex_with_indexers(None, None, - new_columns, col_indexer, - copy, fill_value) + return self._reindex_with_indexers({1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value) else: return self.copy() if copy else self - def _reindex_index(self, new_index, method, copy, level, fill_value=NA, - limit=None, takeable=False): - new_index, indexer = self.index.reindex(new_index, method, level, - limit=limit, copy_if_needed=True, - takeable=takeable) - return self._reindex_with_indexers(new_index, indexer, None, None, - copy, fill_value) - - def _reindex_columns(self, new_columns, copy, level, fill_value=NA, - limit=None, takeable=False): - new_columns, indexer = self.columns.reindex(new_columns, level=level, - limit=limit, copy_if_needed=True, - takeable=takeable) - return self._reindex_with_indexers(None, None, new_columns, indexer, - copy, fill_value) - - def _reindex_with_indexers(self, index, row_indexer, columns, col_indexer, - copy, fill_value): - new_data = self._data - if row_indexer is not None: - row_indexer = com._ensure_int64(row_indexer) - new_data = new_data.reindex_indexer(index, row_indexer, axis=1, - fill_value=fill_value) - elif index is not None and index is not new_data.axes[1]: - new_data = new_data.copy(deep=copy) - new_data.axes[1] = index - - if col_indexer is not None: - # TODO: speed up on homogeneous DataFrame objects - col_indexer = com._ensure_int64(col_indexer) - new_data = new_data.reindex_indexer(columns, col_indexer, axis=0, - fill_value=fill_value) - elif columns is not None and columns is not new_data.axes[0]: - new_data = new_data.reindex_items(columns, copy=copy, - fill_value=fill_value) - - if copy and new_data is self._data: - new_data = new_data.copy() - - return DataFrame(new_data) - def reindex_like(self, other, method=None, copy=True, limit=None, fill_value=NA): """ @@ -2754,8 +2314,6 @@ def reindex_like(self, other, method=None, copy=True, limit=None, method=method, copy=copy, limit=limit, fill_value=fill_value) - truncate = generic.truncate - def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ @@ -2883,7 +2441,8 @@ def _maybe_cast(values, labels=None): mask = labels == -1 values = values.take(labels) if mask.any(): - values, changed = com._maybe_upcast_putmask(values,mask,np.nan) + values, changed = com._maybe_upcast_putmask( + values, mask, np.nan) return values @@ -2968,7 +2527,8 @@ def take(self, indices, axis=0, convert=True): # check/convert indicies here if convert: axis = self._get_axis_number(axis) - indices = _maybe_convert_indices(indices, len(self._get_axis(axis))) + indices = _maybe_convert_indices( + indices, len(self._get_axis(axis))) if self._is_mixed_type: if axis == 0: @@ -2987,46 +2547,12 @@ def take(self, indices, axis=0, convert=True): else: new_columns = self.columns.take(indices) new_index = self.index - return DataFrame(new_values, index=new_index, - columns=new_columns) + return self._constructor(new_values, index=new_index, + columns=new_columns) #---------------------------------------------------------------------- # Reindex-based selection methods - def filter(self, items=None, like=None, regex=None): - """ - Restrict frame's columns to set of items or wildcard - - Parameters - ---------- - items : list-like - List of columns to restrict to (must not all be present) - like : string - Keep columns where "arg in col == True" - regex : string (regular expression) - Keep columns with re.search(regex, col) == True - - Notes - ----- - Arguments are mutually exclusive, but this is not checked for - - Returns - ------- - DataFrame with filtered columns - """ - import re - if items is not None: - return self.reindex(columns=[r for r in items if r in self]) - elif like: - matchf = lambda x: (like in x if isinstance(x, compat.string_types) - else like in str(x)) - return self.select(matchf, axis=1) - elif regex: - matcher = re.compile(regex) - return self.select(lambda x: matcher.search(x) is not None, axis=1) - else: - raise ValueError('items was None!') - def dropna(self, axis=0, how='any', thresh=None, subset=None): """ Return object with labels on given axis omitted where alternately any @@ -3143,13 +2669,13 @@ def _m8_to_i8(x): if np.iterable(cols) and not isinstance(cols, compat.string_types): if isinstance(cols, tuple): if cols in self.columns: - values = [self[cols]] + values = [self[cols].values] else: values = [_m8_to_i8(self[x].values) for x in cols] else: values = [_m8_to_i8(self[x].values) for x in cols] else: - values = [self[cols]] + values = [self[cols].values] keys = lib.fast_zip_fillna(values) duplicated = lib.duplicated(keys, take_last=take_last) @@ -3374,373 +2900,6 @@ def reorder_levels(self, order, axis=0): result.columns = result.columns.reorder_levels(order) return result - #---------------------------------------------------------------------- - # Filling NA's - - def fillna(self, value=None, method=None, axis=0, inplace=False, - limit=None, downcast=None): - """ - Fill NA/NaN values using the specified method - - Parameters - ---------- - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - value : scalar or dict - Value to use to fill holes (e.g. 0), alternately a dict of values - specifying which value to use for each column (columns not in the - dict will not be filled). This value cannot be a list. - axis : {0, 1}, default 0 - 0: fill column-by-column - 1: fill row-by-row - inplace : boolean, default False - If True, fill the DataFrame in place. Note: this will modify any - other views on this DataFrame, like if you took a no-copy slice of - an existing DataFrame, for example a column in a DataFrame. Returns - a reference to the filled object, which is self if inplace=True - limit : int, default None - Maximum size gap to forward or backward fill - downcast : dict, default is None, a dict of item->dtype of what to - downcast if possible - - See also - -------- - reindex, asfreq - - Returns - ------- - filled : DataFrame - """ - if isinstance(value, (list, tuple)): - raise TypeError('"value" parameter must be a scalar or dict, but ' - 'you passed a "{0}"'.format(type(value).__name__)) - self._consolidate_inplace() - - axis = self._get_axis_number(axis) - if value is None: - if method is None: - raise ValueError('must specify a fill method or value') - if self._is_mixed_type and axis == 1: - if inplace: - raise NotImplementedError() - return self.T.fillna(method=method, limit=limit).T - - method = com._clean_fill_method(method) - new_data = self._data.interpolate(method = method, - axis = axis, - limit = limit, - inplace = inplace, - coerce = True) - else: - if method is not None: - raise ValueError('cannot specify both a fill method and value') - # Float type values - if len(self.columns) == 0: - return self - if isinstance(value, (dict, Series)): - if axis == 1: - raise NotImplementedError('Currently only can fill ' - 'with dict/Series column ' - 'by column') - - result = self if inplace else self.copy() - for k, v in compat.iteritems(value): - if k not in result: - continue - result[k].fillna(v, inplace=True) - return result - else: - new_data = self._data.fillna(value, inplace=inplace, - downcast=downcast) - - if inplace: - self._data = new_data - else: - return self._constructor(new_data) - - def ffill(self, axis=0, inplace=False, limit=None): - return self.fillna(method='ffill', axis=axis, inplace=inplace, - limit=limit) - - def bfill(self, axis=0, inplace=False, limit=None): - return self.fillna(method='bfill', axis=axis, inplace=inplace, - limit=limit) - - def replace(self, to_replace=None, value=None, inplace=False, limit=None, - regex=False, method=None, axis=None): - """ - Replace values given in 'to_replace' with 'value'. - - Parameters - ---------- - to_replace : str, regex, list, dict, Series, numeric, or None - - * str or regex: - - - str: string exactly matching `to_replace` will be replaced - with `value` - - regex: regexs matching `to_replace` will be replaced with - `value` - - * list of str, regex, or numeric: - - - First, if `to_replace` and `value` are both lists, they - **must** be the same length. - - Second, if ``regex=True`` then all of the strings in **both** - lists will be interpreted as regexs otherwise they will match - directly. This doesn't matter much for `value` since there - are only a few possible substitution regexes you can use. - - str and regex rules apply as above. - - * dict: - - - Nested dictionaries, e.g., {'a': {'b': nan}}, are read as - follows: look in column 'a' for the value 'b' and replace it - with nan. You can nest regular expressions as well. Note that - column names (the top-level dictionary keys in a nested - dictionary) **cannot** be regular expressions. - - Keys map to column names and values map to substitution - values. You can treat this as a special case of passing two - lists except that you are specifying the column to search in. - - * None: - - - This means that the ``regex`` argument must be a string, - compiled regular expression, or list, dict, ndarray or Series - of such elements. If `value` is also ``None`` then this - **must** be a nested dictionary or ``Series``. - - See the examples section for examples of each of these. - value : scalar, dict, list, str, regex, default None - Value to use to fill holes (e.g. 0), alternately a dict of values - specifying which value to use for each column (columns not in the - dict will not be filled). Regular expressions, strings and lists or - dicts of such objects are also allowed. - inplace : boolean, default False - If True, fill the DataFrame in place. Note: this will modify any - other views on this DataFrame, like if you took a no-copy slice of - an existing DataFrame, for example a column in a DataFrame. Returns - a reference to the filled object, which is self if inplace=True - limit : int, default None - Maximum size gap to forward or backward fill - regex : bool or same types as `to_replace`, default False - Whether to interpret `to_replace` and/or `value` as regular - expressions. If this is ``True`` then `to_replace` *must* be a - string. Otherwise, `to_replace` must be ``None`` because this - parameter will be interpreted as a regular expression or a list, - dict, or array of regular expressions. - - See also - -------- - reindex, asfreq, fillna - - Returns - ------- - filled : DataFrame - - Raises - ------ - AssertionError - * If `regex` is not a ``bool`` and `to_replace` is not ``None``. - TypeError - * If `to_replace` is a ``dict`` and `value` is not a ``list``, - ``dict``, ``ndarray``, or ``Series`` - * If `to_replace` is ``None`` and `regex` is not compilable into a - regular expression or is a list, dict, ndarray, or Series. - ValueError - * If `to_replace` and `value` are ``list`` s or ``ndarray`` s, but - they are not the same length. - - Notes - ----- - * Regex substitution is performed under the hood with ``re.sub``. The - rules for substitution for ``re.sub`` are the same. - * Regular expressions will only substitute on strings, meaning you - cannot provide, for example, a regular expression matching floating - point numbers and expect the columns in your frame that have a - numeric dtype to be matched. However, if those floating point numbers - *are* strings, then you can do this. - * This method has *a lot* of options. You are encouraged to experiment - and play with this method to gain intuition about how it works. - - """ - if not com.is_bool(regex) and to_replace is not None: - raise AssertionError("'to_replace' must be 'None' if 'regex' is " - "not a bool") - if method is not None: - warnings.warn('the "method" argument is deprecated and will be removed in' - 'v0.13; this argument has no effect') - - if axis is not None: - warnings.warn('the "axis" argument is deprecated and will be removed in' - 'v0.13; this argument has no effect') - - self._consolidate_inplace() - - if value is None: - if not isinstance(to_replace, (dict, Series)): - if not isinstance(regex, (dict, Series)): - raise TypeError('If "to_replace" and "value" are both None' - ' then regex must be a mapping') - to_replace = regex - regex = True - - items = list(to_replace.items()) - keys, values = zip(*items) - - are_mappings = [isinstance(v, (dict, Series)) for v in values] - - if any(are_mappings): - if not all(are_mappings): - raise TypeError("If a nested mapping is passed, all values" - " of the top level mapping must be " - "mappings") - # passed a nested dict/Series - to_rep_dict = {} - value_dict = {} - - for k, v in items: - to_rep_dict[k] = list(v.keys()) - value_dict[k] = list(v.values()) - - to_replace, value = to_rep_dict, value_dict - else: - to_replace, value = keys, values - - return self.replace(to_replace, value, inplace=inplace, - limit=limit, regex=regex) - else: - if not len(self.columns): - return self - - new_data = self._data - if isinstance(to_replace, (dict, Series)): - if isinstance(value, (dict, Series)): # {'A' : NA} -> {'A' : 0} - new_data = self._data - for c, src in compat.iteritems(to_replace): - if c in value and c in self: - new_data = new_data.replace(src, value[c], - filter=[c], - inplace=inplace, - regex=regex) - - elif not isinstance(value, (list, np.ndarray)): # {'A': NA} -> 0 - new_data = self._data - for k, src in compat.iteritems(to_replace): - if k in self: - new_data = new_data.replace(src, value, - filter=[k], - inplace=inplace, - regex=regex) - else: - raise TypeError('Fill value must be scalar, dict, or ' - 'Series') - - elif isinstance(to_replace, (list, np.ndarray)): - # [NA, ''] -> [0, 'missing'] - if isinstance(value, (list, np.ndarray)): - if len(to_replace) != len(value): - raise ValueError('Replacement lists must match ' - 'in length. Expecting %d got %d ' % - (len(to_replace), len(value))) - - new_data = self._data.replace_list(to_replace, value, - inplace=inplace, - regex=regex) - - else: # [NA, ''] -> 0 - new_data = self._data.replace(to_replace, value, - inplace=inplace, regex=regex) - elif to_replace is None: - if not (com.is_re_compilable(regex) or - isinstance(regex, (list, dict, np.ndarray, Series))): - raise TypeError("'regex' must be a string or a compiled " - "regular expression or a list or dict of " - "strings or regular expressions, you " - "passed a {0}".format(type(regex))) - return self.replace(regex, value, inplace=inplace, limit=limit, - regex=True) - else: - - # dest iterable dict-like - if isinstance(value, (dict, Series)): # NA -> {'A' : 0, 'B' : -1} - new_data = self._data - - for k, v in compat.iteritems(value): - if k in self: - new_data = new_data.replace(to_replace, v, - filter=[k], - inplace=inplace, - regex=regex) - - elif not isinstance(value, (list, np.ndarray)): # NA -> 0 - new_data = self._data.replace(to_replace, value, - inplace=inplace, regex=regex) - else: - raise TypeError('Invalid "to_replace" type: ' - '{0}'.format(type(to_replace))) # pragma: no cover - - new_data = new_data.convert(copy=not inplace, convert_numeric=False) - - if inplace: - self._data = new_data - else: - return self._constructor(new_data) - - def interpolate(self, to_replace, method='pad', axis=0, inplace=False, - limit=None): - """Interpolate values according to different methods. - - Parameters - ---------- - to_replace : dict, Series - method : str - axis : int - inplace : bool - limit : int, default None - - Returns - ------- - frame : interpolated - - See Also - -------- - reindex, replace, fillna - """ - warn('DataFrame.interpolate will be removed in v0.13, please use ' - 'either DataFrame.fillna or DataFrame.replace instead', - FutureWarning) - if self._is_mixed_type and axis == 1: - return self.T.replace(to_replace, method=method, limit=limit).T - - method = com._clean_fill_method(method) - - if isinstance(to_replace, (dict, Series)): - if axis == 0: - return self.replace(to_replace, method=method, inplace=inplace, - limit=limit, axis=axis) - elif axis == 1: - obj = self.T - if inplace: - obj.replace(to_replace, method=method, limit=limit, - inplace=inplace, axis=0) - return obj.T - return obj.replace(to_replace, method=method, limit=limit, - inplace=inplace, axis=0).T - else: - raise ValueError('Invalid value for axis') - else: - new_data = self._data.interpolate(method=method, axis=axis, - limit=limit, inplace=inplace, - missing=to_replace, coerce=False) - - if inplace: - self._data = new_data - else: - return self._constructor(new_data) - #---------------------------------------------------------------------- # Rename @@ -3832,11 +2991,6 @@ def _arith_op(left, right): return self._constructor(result, index=new_index, columns=new_columns, copy=False) - def _indexed_same(self, other): - same_index = self.index.equals(other.index) - same_columns = self.columns.equals(other.columns) - return same_index and same_columns - def _combine_series(self, other, func, fill_value=None, axis=None, level=None): if axis is not None: @@ -3880,10 +3034,11 @@ def _combine_match_columns(self, other, func, fill_value=None): if fill_value is not None: raise NotImplementedError - new_data = left._data.eval(func, right, axes = [left.columns, self.index]) + new_data = left._data.eval( + func, right, axes=[left.columns, self.index]) return self._constructor(new_data) - def _combine_const(self, other, func, raise_on_error = True): + def _combine_const(self, other, func, raise_on_error=True): if self.empty: return self @@ -3896,7 +3051,7 @@ def _compare_frame(self, other, func, str_rep): 'DataFrame objects') def _compare(a, b): - return dict([ (col,func(a[col], b[col])) for col in a.columns ]) + return dict([(col, func(a[col], b[col])) for col in a.columns]) new_data = expressions.evaluate(_compare, str_rep, self, other) return self._constructor(data=new_data, index=self.index, @@ -3907,7 +3062,7 @@ def _flex_compare_frame(self, other, func, str_rep, level): self, other = self.align(other, 'outer', level=level) def _compare(a, b): - return dict([ (col,func(a[col], b[col])) for col in a.columns ]) + return dict([(col, func(a[col], b[col])) for col in a.columns]) new_data = expressions.evaluate(_compare, str_rep, self, other) return self._constructor(data=new_data, index=self.index, @@ -3973,7 +3128,7 @@ def combine(self, other, func, fill_value=None, overwrite=True): # if we have different dtypes, possibily promote new_dtype = this_dtype if this_dtype != other_dtype: - new_dtype = com._lcd_dtypes(this_dtype,other_dtype) + new_dtype = com._lcd_dtypes(this_dtype, other_dtype) series = series.astype(new_dtype) otherSeries = otherSeries.astype(new_dtype) @@ -4025,8 +3180,8 @@ def combine_first(self, other): combined : DataFrame """ def combiner(x, y, needs_i8_conversion=False): - x_values = x.values if hasattr(x,'values') else x - y_values = y.values if hasattr(y,'values') else y + x_values = x.values if hasattr(x, 'values') else x + y_values = y.values if hasattr(y, 'values') else y if needs_i8_conversion: mask = isnull(x) x_values = x_values.view('i8') @@ -4086,7 +3241,8 @@ def update(self, other, join='left', overwrite=True, filter_func=None, else: mask = notnull(this) - self[col] = expressions.where(mask, this, that, raise_on_error=True) + self[col] = expressions.where( + mask, this, that, raise_on_error=True) #---------------------------------------------------------------------- # Misc methods @@ -4325,7 +3481,7 @@ def shift(self, periods=1, freq=None, **kwds): #---------------------------------------------------------------------- # Function application - def apply(self, func, axis=0, broadcast=False, raw=False, + def apply(self, func, axis=0, broadcast=False, raw=False, reduce=True, args=(), **kwds): """ Applies function along input axis of DataFrame. Objects passed to @@ -4343,6 +3499,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False, broadcast : bool, default False For aggregation functions, return object of same size with values propagated + reduce : bool, default True, try to apply reduction procedures raw : boolean, default False If False, convert each row or column into a Series. If raw=True the passed function will receive ndarray objects instead. If you are @@ -4386,8 +3543,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False, # How to determine this better? is_reduction = False try: - is_reduction = not isinstance(f(_EMPTY_SERIES), - np.ndarray) + is_reduction = not isinstance(f(_EMPTY_SERIES), Series) except Exception: pass @@ -4399,7 +3555,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False, if raw and not self._is_mixed_type: return self._apply_raw(f, axis) else: - return self._apply_standard(f, axis) + return self._apply_standard(f, axis, reduce=reduce) else: return self._apply_broadcast(f, axis) @@ -4416,21 +3572,26 @@ def _apply_raw(self, func, axis): else: return Series(result, index=self._get_agg_axis(axis)) - def _apply_standard(self, func, axis, ignore_failures=False): - try: + def _apply_standard(self, func, axis, ignore_failures=False, reduce=True): - if self._is_mixed_type: # maybe a hack for now - raise AssertionError('Must be mixed type DataFrame') - values = self.values - dummy = Series(NA, index=self._get_axis(axis), - dtype=values.dtype) + # try to reduce first (by default) + # this only matters if the reduction in values is of different dtype + # e.g. if we want to apply to a SparseFrame, then can't directly reduce + if reduce: + try: - labels = self._get_agg_axis(axis) - result = lib.reduce(values, func, axis=axis, dummy=dummy, - labels=labels) - return Series(result, index=self._get_agg_axis(axis)) - except Exception: - pass + if self._is_mixed_type: # maybe a hack for now + raise AssertionError('Must be mixed type DataFrame') + values = self.values.ravel() + dummy = Series(NA, index=self._get_axis(axis), + dtype=values.dtype) + + labels = self._get_agg_axis(axis) + result = lib.reduce(values, func, axis=axis, dummy=dummy, + labels=labels) + return Series(result, index=self._get_agg_axis(axis)) + except Exception: + pass if axis == 0: series_gen = (self.icol(i) for i in range(len(self.columns))) @@ -4476,7 +3637,6 @@ def _apply_standard(self, func, axis, ignore_failures=False): pass raise e - if len(results) > 0 and _is_sequence(results[0]): if not isinstance(results[0], Series): index = res_columns @@ -4537,8 +3697,8 @@ def applymap(self, func): # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if com.is_datetime64_dtype(x): - x = lib.map_infer(x, lib.Timestamp) - return lib.map_infer(x, func) + x = lib.map_infer(_values_from_object(x), lib.Timestamp) + return lib.map_infer(_values_from_object(x), func) return self.apply(infer) #---------------------------------------------------------------------- @@ -4772,7 +3932,7 @@ def cov(self, min_periods=None): baseCov.fill(np.nan) else: baseCov = np.cov(mat.T) - baseCov = baseCov.reshape((len(cols),len(cols))) + baseCov = baseCov.reshape((len(cols), len(cols))) else: baseCov = _algos.nancorr(com._ensure_float64(mat), cov=True, minp=min_periods) @@ -5268,6 +4428,7 @@ def idxmax(self, axis=0, skipna=True): return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): + """ let's be explict about this """ if axis_num == 0: return self.columns elif axis_num == 1: @@ -5328,7 +4489,7 @@ def clip(self, lower=None, upper=None): # GH 2747 (arguments were reversed) if lower is not None and upper is not None: - lower, upper = min(lower,upper), max(lower,upper) + lower, upper = min(lower, upper), max(lower, upper) return self.apply(lambda x: x.clip(lower=lower, upper=upper)) @@ -5538,78 +4699,8 @@ def combineMult(self, other): """ return self.mul(other, fill_value=1.) - def where(self, cond, other=NA, inplace=False, try_cast=False, raise_on_error=True): - """ - Return a DataFrame with the same shape as self and whose corresponding - entries are from self where cond is True and otherwise are from other. - - Parameters - ---------- - cond : boolean DataFrame or array - other : scalar or DataFrame - inplace : boolean, default False - Whether to perform the operation in place on the data - try_cast : boolean, default False - try to cast the result back to the input type (if possible), - raise_on_error : boolean, default True - Whether to raise on invalid data types (e.g. trying to where on - strings) - - Returns - ------- - wh : DataFrame - """ - if isinstance(cond, DataFrame): - # this already checks for index/column equality - cond = cond.reindex(self.index, columns=self.columns) - else: - if not hasattr(cond, 'shape'): - raise ValueError('where requires an ndarray like object for its ' - 'condition') - if cond.shape != self.shape: - raise ValueError('Array conditional must be same shape as self') - cond = self._constructor(cond, index=self.index, - columns=self.columns) - - if inplace: - cond = -(cond.fillna(True).astype(bool)) - else: - cond = cond.fillna(False).astype(bool) - - if isinstance(other, DataFrame): - _, other = self.align(other, join='left', fill_value=NA) - elif isinstance(other,np.ndarray): - if other.shape != self.shape: - raise ValueError('other must be the same shape as self ' - 'when an ndarray') - other = self._constructor(other, self.index, self.columns) - - if inplace: - # we may have different type blocks come out of putmask, so - # reconstruct the block manager - self._data = self._data.putmask(cond,other,inplace=True) - - else: - new_data = self._data.where(other, cond, - raise_on_error=raise_on_error, - try_cast=try_cast) - - return self._constructor(new_data) - - def mask(self, cond): - """ - Returns copy of self whose values are replaced with nan if the - inverted condition is True - - Parameters - ---------- - cond: boolean DataFrame or array - - Returns - ------- - wh: DataFrame - """ - return self.where(~cond, NA) +DataFrame._setup_axes( + ['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True) _EMPTY_SERIES = Series([]) @@ -5700,6 +4791,7 @@ def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): return create_block_manager_from_arrays(arrays, arr_names, axes) + def extract_index(data): from pandas.core.index import _union_indexes @@ -5763,9 +4855,12 @@ def convert(v): # we could have a 1-dim or 2-dim list here # this is equiv of np.asarray, but does object conversion # and platform dtype preservation - if com.is_list_like(values[0]) or hasattr(values[0],'len'): - values = np.array([ convert(v) for v in values]) - else: + try: + if com.is_list_like(values[0]) or hasattr(values[0], 'len'): + values = np.array([convert(v) for v in values]) + else: + values = convert(values) + except: values = convert(values) else: @@ -5783,7 +4878,7 @@ def convert(v): def _rec_to_dict(arr): - if isinstance(arr, np.ndarray): + if isinstance(arr, (np.ndarray, Series)): columns = list(arr.dtype.names) sdict = dict((k, arr[k]) for k in columns) elif isinstance(arr, DataFrame): @@ -5829,7 +4924,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) - elif isinstance(data, np.ndarray): + elif isinstance(data, (np.ndarray, Series)): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns @@ -5855,18 +4950,23 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): from pandas.core.index import _get_combined_index if columns is None: - columns = _get_combined_index([s.index for s in data]) + columns = _get_combined_index([s.index for s in data if getattr(s,'index',None) is not None ]) indexer_cache = {} aligned_values = [] for s in data: - index = s.index + index = getattr(s,'index',None) + if index is None: + index = _default_index(len(s)) + if id(index) in indexer_cache: indexer = indexer_cache[id(index)] else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) - aligned_values.append(com.take_1d(s.values, indexer)) + + values = _values_from_object(s) + aligned_values.append(com.take_1d(values, indexer)) values = np.vstack(aligned_values) @@ -5910,13 +5010,13 @@ def _convert_object_array(content, columns, coerce_float=False, dtype=None): def _get_names_from_index(data): index = lrange(len(data)) - has_some_name = any([s.name is not None for s in data]) + has_some_name = any([getattr(s,'name',None) is not None for s in data]) if not has_some_name: return index count = 0 for i, s in enumerate(data): - n = s.name + n = getattr(s,'name',None) if n is not None: index[i] = n else: @@ -5960,6 +5060,7 @@ def _homogenize(data, index, dtype=None): return homogenized + def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() @@ -6015,11 +5116,11 @@ def boxplot(self, column=None, by=None, ax=None, fontsize=None, Can be any valid input to groupby by : string or sequence Column in the DataFrame to group by - ax : matplotlib axis object, default None + ax : matplotlib axis object, default None fontsize : int or string - rot : int, default None + rot : int, default None Rotation for ticks - grid : boolean, default None (matlab style default) + grid : boolean, default None (matlab style default) Axis grid lines Returns diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ece7d460c0d33..91c5804d48a78 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1,57 +1,77 @@ # pylint: disable=W0231,E1101 import warnings -from pandas import compat +import operator +import weakref import numpy as np import pandas.lib as lib -from pandas.core.base import PandasObject -from pandas.core.index import MultiIndex +from pandas.core.base import PandasObject +from pandas.core.index import Index, MultiIndex, _ensure_index import pandas.core.indexing as indexing from pandas.core.indexing import _maybe_convert_indices from pandas.tseries.index import DatetimeIndex +from pandas.core.internals import BlockManager import pandas.core.common as com +from pandas import compat from pandas.compat import map, zip +from pandas.core.common import (isnull, notnull, is_list_like, + _values_from_object, + _infer_dtype_from_scalar, _maybe_promote) +class NDFrame(PandasObject): -class PandasError(Exception): - pass - + """ + N-dimensional analogue of DataFrame. Store multi-dimensional in a + size-mutable, labeled data structure -class PandasContainer(PandasObject): + Parameters + ---------- + data : BlockManager + axes : list + copy : boolean, default False + """ + _internal_names = [ + '_data', 'name', '_cacher', '_subtyp', '_index', '_default_kind', '_default_fill_value'] + _internal_names_set = set(_internal_names) + _prop_attributes = [] - _AXIS_NUMBERS = { - 'index': 0, - 'columns': 1 - } + def __init__(self, data, axes=None, copy=False, dtype=None, fastpath=False): - _AXIS_ALIASES = {} - _AXIS_NAMES = dict((v, k) for k, v in compat.iteritems(_AXIS_NUMBERS)) + if not fastpath: + if dtype is not None: + data = data.astype(dtype) + elif copy: + data = data.copy() - def to_pickle(self, path): - """ - Pickle (serialize) object to input file path + if axes is not None: + for i, ax in enumerate(axes): + data = data.reindex_axis(ax, axis=i) - Parameters - ---------- - path : string - File path - """ - from pandas.io.pickle import to_pickle - return to_pickle(self, path) + object.__setattr__(self, '_data', data) + object.__setattr__(self, '_item_cache', {}) - def save(self, path): # TODO remove in 0.13 - from pandas.io.pickle import to_pickle - warnings.warn("save is deprecated, use to_pickle", FutureWarning) - return to_pickle(self, path) + def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): + """ passed a manager and a axes dict """ + for a, axe in axes.items(): + if axe is not None: + mgr = mgr.reindex_axis( + axe, axis=self._get_block_manager_axis(a), copy=False) + + # do not copy BlockManager unless explicitly done + if copy and dtype is None: + mgr = mgr.copy() + elif dtype is not None: + # avoid copy if we can + if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype: + mgr = mgr.astype(dtype) + return mgr - def load(self, path): # TODO remove in 0.13 - from pandas.io.pickle import read_pickle - warnings.warn("load is deprecated, use pd.read_pickle", FutureWarning) - return read_pickle(path) + #---------------------------------------------------------------------- + # Construction - def __hash__(self): - raise TypeError('{0!r} objects are mutable, thus they cannot be' - ' hashed'.format(self.__class__.__name__)) + @property + def _constructor(self): + raise NotImplementedError def __unicode__(self): # unicode representation based upon iterating over self @@ -59,9 +79,134 @@ def __unicode__(self): prepr = '[%s]' % ','.join(map(com.pprint_thing, self)) return '%s(%s)' % (self.__class__.__name__, prepr) + @property + def _constructor_sliced(self): + raise NotImplementedError #---------------------------------------------------------------------- - # Axis name business + # Axis + + @classmethod + def _setup_axes( + cls, axes, info_axis=None, stat_axis=None, aliases=None, slicers=None, + axes_are_reversed=False, build_axes=True, ns=None): + """ provide axes setup for the major PandasObjects + + axes : the names of the axes in order (lowest to highest) + info_axis_num : the axis of the selector dimension (int) + stat_axis_num : the number of axis for the default stats (int) + aliases : other names for a single axis (dict) + slicers : how axes slice to others (dict) + axes_are_reversed : boolean whether to treat passed axes as reversed (DataFrame) + build_axes : setup the axis properties (default True) + """ + + cls._AXIS_ORDERS = axes + cls._AXIS_NUMBERS = dict((a, i) for i, a in enumerate(axes)) + cls._AXIS_LEN = len(axes) + cls._AXIS_ALIASES = aliases or dict() + cls._AXIS_IALIASES = dict((v, k) + for k, v in cls._AXIS_ALIASES.items()) + cls._AXIS_NAMES = dict(enumerate(axes)) + cls._AXIS_SLICEMAP = slicers or None + cls._AXIS_REVERSED = axes_are_reversed + + # typ + setattr(cls, '_typ', cls.__name__.lower()) + + # indexing support + cls._ix = None + + if info_axis is not None: + cls._info_axis_number = info_axis + cls._info_axis_name = axes[info_axis] + + if stat_axis is not None: + cls._stat_axis_number = stat_axis + cls._stat_axis_name = axes[stat_axis] + + # setup the actual axis + if build_axes: + + def set_axis(a, i): + setattr(cls, a, lib.AxisProperty(i)) + + if axes_are_reversed: + m = cls._AXIS_LEN - 1 + for i, a in cls._AXIS_NAMES.items(): + set_axis(a, m - i) + else: + for i, a in cls._AXIS_NAMES.items(): + set_axis(a, i) + + # addtl parms + if isinstance(ns, dict): + for k, v in ns.items(): + setattr(cls, k, v) + + def _construct_axes_dict(self, axes=None, **kwargs): + """ return an axes dictionary for myself """ + d = dict([(a, self._get_axis(a)) for a in (axes or self._AXIS_ORDERS)]) + d.update(kwargs) + return d + + @staticmethod + def _construct_axes_dict_from(self, axes, **kwargs): + """ return an axes dictionary for the passed axes """ + d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)]) + d.update(kwargs) + return d + + def _construct_axes_dict_for_slice(self, axes=None, **kwargs): + """ return an axes dictionary for myself """ + d = dict([(self._AXIS_SLICEMAP[a], self._get_axis(a)) + for a in (axes or self._AXIS_ORDERS)]) + d.update(kwargs) + return d + + def _construct_axes_from_arguments(self, args, kwargs, require_all=False): + """ construct and returns axes if supplied in args/kwargs + if require_all, raise if all axis arguments are not supplied + return a tuple of (axes, kwargs) """ + + # construct the args + args = list(args) + for a in self._AXIS_ORDERS: + + # if we have an alias for this axis + alias = self._AXIS_IALIASES.get(a) + if alias is not None: + if a in kwargs: + if alias in kwargs: + raise Exception( + "arguments are multually exclusive for [%s,%s]" % (a, alias)) + continue + if alias in kwargs: + kwargs[a] = kwargs.pop(alias) + continue + + # look for a argument by position + if a not in kwargs: + try: + kwargs[a] = args.pop(0) + except (IndexError): + if require_all: + raise AssertionError( + "not enough arguments specified!") + + axes = dict([(a, kwargs.get(a)) for a in self._AXIS_ORDERS]) + return axes, kwargs + + @classmethod + def _from_axes(cls, data, axes): + # for construction from BlockManager + if isinstance(data, BlockManager): + return cls(data) + else: + if cls._AXIS_REVERSED: + axes = axes[::-1] + d = cls._construct_axes_dict_from(cls, axes, copy=False) + return cls(data, **d) def _get_axis_number(self, axis): axis = self._AXIS_ALIASES.get(axis, axis) @@ -91,448 +236,308 @@ def _get_axis(self, axis): name = self._get_axis_name(axis) return getattr(self, name) - #---------------------------------------------------------------------- - # Indexers - @classmethod - def _create_indexer(cls, name, indexer): - """ create an indexer like _name in the class """ - iname = '_%s' % name - setattr(cls,iname,None) + def _get_block_manager_axis(self, axis): + """ map the axis to the block_manager axis """ + axis = self._get_axis_number(axis) + if self._AXIS_REVERSED: + m = self._AXIS_LEN - 1 + return m - axis + return axis - def _indexer(self): - if getattr(self,iname,None) is None: - setattr(self,iname,indexer(self, name)) - return getattr(self,iname) + @property + def _info_axis(self): + return getattr(self, self._info_axis_name) - setattr(cls,name,property(_indexer)) + @property + def _stat_axis(self): + return getattr(self, self._stat_axis_name) - def abs(self): - """ - Return an object with absolute value taken. Only applicable to objects - that are all numeric + @property + def shape(self): + return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) - Returns - ------- - abs: type of caller - """ - return np.abs(self) + @property + def axes(self): + """ we do it this way because if we have reversed axes, then + the block manager shows then reversed """ + return [self._get_axis(a) for a in self._AXIS_ORDERS] - def get(self, key, default=None): - """ - Get item from object for given key (DataFrame column, Panel slice, - etc.). Returns default value if not found + @property + def ndim(self): + return self._data.ndim - Parameters - ---------- - key : object + def _expand_axes(self, key): + new_axes = [] + for k, ax in zip(key, self.axes): + if k not in ax: + if type(k) != ax.dtype.type: + ax = ax.astype('O') + new_axes.append(ax.insert(len(ax), k)) + else: + new_axes.append(ax) - Returns - ------- - value : type of items contained in object - """ - try: - return self[key] - except KeyError: - return default + return new_axes - def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, - group_keys=True, squeeze=False): + def _set_axis(self, axis, labels): + self._data.set_axis(axis, labels) + self._clear_item_cache() + + def transpose(self, *args, **kwargs): """ - Group series using mapper (dict or key function, apply given function - to group, return result as series) or by a series of columns + Permute the dimensions of the Object Parameters ---------- - by : mapping function / list of functions, dict, Series, or tuple / - list of column names. - Called on each element of the object index to determine the groups. - If a dict or Series is passed, the Series or dict VALUES will be - used to determine the groups - axis : int, default 0 - level : int, level name, or sequence of such, default None - If the axis is a MultiIndex (hierarchical), group by a particular - level or levels - as_index : boolean, default True - For aggregated output, return object with group labels as the - index. Only relevant for DataFrame input. as_index=False is - effectively "SQL-style" grouped output - sort : boolean, default True - Sort group keys. Get better performance by turning this off - group_keys : boolean, default True - When calling apply, add group keys to index to identify pieces - squeeze : boolean, default False - reduce the dimensionaility of the return type if possible, otherwise - return a consistent type + axes : int or name (or alias) + copy : boolean, default False + Make a copy of the underlying data. Mixed-dtype data will + always result in a copy Examples -------- - # DataFrame result - >>> data.groupby(func, axis=0).mean() - - # DataFrame result - >>> data.groupby(['col1', 'col2'])['col3'].mean() - - # DataFrame with hierarchical index - >>> data.groupby(['col1', 'col2']).mean() + >>> p.transpose(2, 0, 1) + >>> p.transpose(2, 0, 1, copy=True) Returns ------- - GroupBy object + y : same as input """ - from pandas.core.groupby import groupby - axis = self._get_axis_number(axis) - return groupby(self, by, axis=axis, level=level, as_index=as_index, - sort=sort, group_keys=group_keys, - squeeze=squeeze) - def asfreq(self, freq, method=None, how=None, normalize=False): - """ - Convert all TimeSeries inside to specified frequency using DateOffset - objects. Optionally provide fill method to pad/backfill missing values. + # construct the args + axes, kwargs = self._construct_axes_from_arguments( + args, kwargs, require_all=True) + axes_names = tuple([self._get_axis_name(axes[a]) + for a in self._AXIS_ORDERS]) + axes_numbers = tuple([self._get_axis_number(axes[a]) + for a in self._AXIS_ORDERS]) - Parameters - ---------- - freq : DateOffset object, or string - method : {'backfill', 'bfill', 'pad', 'ffill', None} - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill methdo - how : {'start', 'end'}, default end - For PeriodIndex only, see PeriodIndex.asfreq - normalize : bool, default False - Whether to reset output index to midnight + # we must have unique axes + if len(axes) != len(set(axes)): + raise ValueError('Must specify %s unique axes' % self._AXIS_LEN) + + new_axes = self._construct_axes_dict_from( + self, [self._get_axis(x) for x in axes_names]) + new_values = self.values.transpose(axes_numbers) + if kwargs.get('copy') or (len(args) and args[-1]): + new_values = new_values.copy() + return self._constructor(new_values, **new_axes) + + def swapaxes(self, axis1, axis2, copy=True): + """ + Interchange axes and swap values axes appropriately Returns ------- - converted : type of caller + y : same as input """ - from pandas.tseries.resample import asfreq - return asfreq(self, freq, method=method, how=how, - normalize=normalize) + i = self._get_axis_number(axis1) + j = self._get_axis_number(axis2) - def at_time(self, time, asof=False): - """ - Select values at particular time of day (e.g. 9:30AM) + if i == j: + if copy: + return self.copy() + return self - Parameters - ---------- - time : datetime.time or string + mapping = {i: j, j: i} - Returns - ------- - values_at_time : type of caller + new_axes = (self._get_axis(mapping.get(k, k)) + for k in range(self._AXIS_LEN)) + new_values = self.values.swapaxes(i, j) + if copy: + new_values = new_values.copy() + + return self._constructor(new_values, *new_axes) + + def pop(self, item): """ + Return item and drop from frame. Raise KeyError if not found. + """ + result = self[item] + del self[item] + return result + + def squeeze(self): + """ squeeze length 1 dimensions """ try: - indexer = self.index.indexer_at_time(time, asof=asof) - return self.take(indexer, convert=False) - except AttributeError: - raise TypeError('Index must be DatetimeIndex') + return self.ix[tuple([slice(None) if len(a) > 1 else a[0] for a in self.axes])] + except: + return self - def between_time(self, start_time, end_time, include_start=True, - include_end=True): + def swaplevel(self, i, j, axis=0): """ - Select values between particular times of the day (e.g., 9:00-9:30 AM) + Swap levels i and j in a MultiIndex on a particular axis Parameters ---------- - start_time : datetime.time or string - end_time : datetime.time or string - include_start : boolean, default True - include_end : boolean, default True + i, j : int, string (can be mixed) + Level of index to be swapped. Can pass level name as string. Returns ------- - values_between_time : type of caller + swapped : type of caller (new object) """ - try: - indexer = self.index.indexer_between_time( - start_time, end_time, include_start=include_start, - include_end=include_end) - return self.take(indexer, convert=False) - except AttributeError: - raise TypeError('Index must be DatetimeIndex') + axis = self._get_axis_number(axis) + result = self.copy() + labels = result._data.axes[axis] + result._data.set_axis(axis, labels.swaplevel(i, j)) + return result - def resample(self, rule, how=None, axis=0, fill_method=None, - closed=None, label=None, convention='start', - kind=None, loffset=None, limit=None, base=0): + def rename_axis(self, mapper, axis=0, copy=True): """ - Convenience method for frequency conversion and resampling of regular - time-series data. + Alter index and / or columns using input function or functions. + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. Parameters ---------- - rule : the offset string or object representing target conversion - how : string, method for down- or re-sampling, default to 'mean' for - downsampling - axis : int, optional, default 0 - fill_method : string, fill_method for upsampling, default None - closed : {'right', 'left'} - Which side of bin interval is closed - label : {'right', 'left'} - Which bin edge label to label bucket with - convention : {'start', 'end', 's', 'e'} - kind: "period"/"timestamp" - loffset: timedelta - Adjust the resampled time labels - limit: int, default None - Maximum size gap to when reindexing with fill_method - base : int, default 0 - For frequencies that evenly subdivide 1 day, the "origin" of the - aggregated intervals. For example, for '5min' frequency, base could - range from 0 through 4. Defaults to 0 - """ - from pandas.tseries.resample import TimeGrouper - axis = self._get_axis_number(axis) - sampler = TimeGrouper(rule, label=label, closed=closed, how=how, - axis=axis, kind=kind, loffset=loffset, - fill_method=fill_method, convention=convention, - limit=limit, base=base) - return sampler.resample(self) + mapper : dict-like or function, optional + axis : int, default 0 + copy : boolean, default True + Also copy underlying data - def first(self, offset): + Returns + ------- + renamed : type of caller """ - Convenience method for subsetting initial periods of time series data - based on a date offset - - Parameters - ---------- - offset : string, DateOffset, dateutil.relativedelta - - Examples - -------- - ts.last('10D') -> First 10 days - - Returns - ------- - subset : type of caller - """ - from pandas.tseries.frequencies import to_offset - if not isinstance(self.index, DatetimeIndex): - raise NotImplementedError - - if len(self.index) == 0: - return self - - offset = to_offset(offset) - end_date = end = self.index[0] + offset - - # Tick-like, e.g. 3 weeks - if not offset.isAnchored() and hasattr(offset, '_inc'): - if end_date in self.index: - end = self.index.searchsorted(end_date, side='left') - - return self.ix[:end] - - def last(self, offset): - """ - Convenience method for subsetting final periods of time series data - based on a date offset - - Parameters - ---------- - offset : string, DateOffset, dateutil.relativedelta - - Examples - -------- - ts.last('5M') -> Last 5 months + # should move this at some point + from pandas.core.series import _get_rename_function - Returns - ------- - subset : type of caller - """ - from pandas.tseries.frequencies import to_offset - if not isinstance(self.index, DatetimeIndex): - raise NotImplementedError + mapper_f = _get_rename_function(mapper) - if len(self.index) == 0: - return self + if axis == 0: + new_data = self._data.rename_items(mapper_f, copydata=copy) + else: + new_data = self._data.rename_axis(mapper_f, axis=axis) + if copy: + new_data = new_data.copy() - offset = to_offset(offset) + return self._constructor(new_data) - start_date = start = self.index[-1] - offset - start = self.index.searchsorted(start_date, side='right') - return self.ix[start:] + #---------------------------------------------------------------------- + # Comparisons - def select(self, crit, axis=0): - """ - Return data corresponding to axis labels matching criteria + def _indexed_same(self, other): + return all([self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS]) - Parameters - ---------- - crit : function - To be called on each index (label). Should return True or False - axis : int + def __neg__(self): + arr = operator.neg(_values_from_object(self)) + return self._wrap_array(arr, self.axes, copy=False) - Returns - ------- - selection : type of caller - """ - axis_name = self._get_axis_name(axis) - axis = self._get_axis(axis) + def __invert__(self): + arr = operator.inv(_values_from_object(self)) + return self._wrap_array(arr, self.axes, copy=False) - if len(axis) > 0: - new_axis = axis[np.asarray([bool(crit(label)) for label in axis])] - else: - new_axis = axis + #---------------------------------------------------------------------- + # Iteration - return self.reindex(**{axis_name: new_axis}) + def __hash__(self): + raise TypeError('{0!r} objects are mutable, thus they cannot be' + ' hashed'.format(self.__class__.__name__)) - def drop(self, labels, axis=0, level=None): + def __iter__(self): """ - Return new object with labels in requested axis removed - - Parameters - ---------- - labels : array-like - axis : int - level : int or name, default None - For MultiIndex - - Returns - ------- - dropped : type of caller + Iterate over infor axis """ - axis_name = self._get_axis_name(axis) - axis, axis_ = self._get_axis(axis), axis - - if axis.is_unique: - if level is not None: - if not isinstance(axis, MultiIndex): - raise AssertionError('axis must be a MultiIndex') - new_axis = axis.drop(labels, level=level) - else: - new_axis = axis.drop(labels) - dropped = self.reindex(**{axis_name: new_axis}) - try: - dropped.axes[axis_].set_names(axis.names, inplace=True) - except AttributeError: - pass - return dropped + return iter(self._info_axis) - else: - if level is not None: - if not isinstance(axis, MultiIndex): - raise AssertionError('axis must be a MultiIndex') - indexer = -lib.ismember(axis.get_level_values(level), - set(labels)) - else: - indexer = -axis.isin(labels) + def keys(self): + """ return the info axis names """ + return self._info_axis - slicer = [slice(None)] * self.ndim - slicer[self._get_axis_number(axis_name)] = indexer + def iteritems(self): + for h in self._info_axis: + yield h, self[h] - return self.ix[tuple(slicer)] + # originally used to get around 2to3's changes to iteritems. + # Now unnecessary. + def iterkv(self, *args, **kwargs): + warnings.warn("iterkv is deprecated and will be removed in a future " + "release, use ``iteritems`` instead.", DeprecationWarning) + return self.iteritems(*args, **kwargs) - def sort_index(self, axis=0, ascending=True): - """ - Sort object by labels (along an axis) + def __len__(self): + """Returns length of info axis """ + return len(self._info_axis) - Parameters - ---------- - axis : {0, 1} - Sort index/rows versus columns - ascending : boolean, default True - Sort ascending vs. descending + def __contains__(self, key): + """True if the key is in the info axis """ + return key in self._info_axis - Returns - ------- - sorted_obj : type of caller - """ - axis = self._get_axis_number(axis) - axis_name = self._get_axis_name(axis) - labels = self._get_axis(axis) + @property + def empty(self): + return not all(len(self._get_axis(a)) > 0 for a in self._AXIS_ORDERS) - sort_index = labels.argsort() - if not ascending: - sort_index = sort_index[::-1] + def __nonzero__(self): + return not self.empty + __bool__ = __nonzero__ - new_axis = labels.take(sort_index) - return self.reindex(**{axis_name: new_axis}) + #---------------------------------------------------------------------- + # Array Interface - def reindex(self, *args, **kwds): - raise NotImplementedError + def _wrap_array(self, arr, axes, copy=False): + d = self._construct_axes_dict_from(self, axes, copy=copy) + return self._constructor(arr, **d) - def tshift(self, periods=1, freq=None, **kwds): - """ - Shift the time index, using the index's frequency if available + def __array__(self, dtype=None): + return _values_from_object(self) - Parameters - ---------- - periods : int - Number of periods to move, can be positive or negative - freq : DateOffset, timedelta, or time rule string, default None - Increment to use from datetools module or time rule (e.g. 'EOM') + def __array_wrap__(self, result): + d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) + return self._constructor(result, **d) - Notes - ----- - If freq is not specified then tries to use the freq or inferred_freq - attributes of the index. If neither of those attributes exist, a - ValueError is thrown + def to_dense(self): + # compat + return self - Returns - ------- - shifted : Series - """ - if freq is None: - freq = getattr(self.index, 'freq', None) + #---------------------------------------------------------------------- + # Picklability - if freq is None: - freq = getattr(self.index, 'inferred_freq', None) + def __getstate__(self): + return self._data - if freq is None: - msg = 'Freq was not given and was not set in the index' - raise ValueError(msg) + def __setstate__(self, state): - return self.shift(periods, freq, **kwds) + if isinstance(state, BlockManager): + self._data = state + elif isinstance(state, dict): + typ = state.get('_typ') + if typ is not None: - def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, - **kwds): - """ - Percent change over given number of periods + # set in the order of internal names + # to avoid definitional recursion + # e.g. say fill_value needing _data to be + # defined + for k in self._internal_names: + if k in state: + v = state[k] + object.__setattr__(self, k, v) - Parameters - ---------- - periods : int, default 1 - Periods to shift for forming percent change - fill_method : str, default 'pad' - How to handle NAs before computing percent changes - limit : int, default None - The number of consecutive NAs to fill before stopping - freq : DateOffset, timedelta, or offset alias string, optional - Increment to use from time series API (e.g. 'M' or BDay()) + for k, v in state.items(): + if k not in self._internal_names: + object.__setattr__(self, k, v) - Returns - ------- - chg : Series or DataFrame - """ - if fill_method is None: - data = self - else: - data = self.fillna(method=fill_method, limit=limit) - rs = data / data.shift(periods=periods, freq=freq, **kwds) - 1 - if freq is None: - mask = com.isnull(self.values) - np.putmask(rs.values, mask, np.nan) - return rs + else: + self._unpickle_series_compat(state) + elif isinstance(state[0], dict): + if len(state) == 5: + self._unpickle_sparse_frame_compat(state) + else: + self._unpickle_frame_compat(state) + elif len(state) == 4: + self._unpickle_panel_compat(state) + elif len(state) == 2: + self._unpickle_series_compat(state) + else: # pragma: no cover + # old pickling format, for compatibility + self._unpickle_matrix_compat(state) - def to_hdf(self, path_or_buf, key, **kwargs): - """ activate the HDFStore """ - from pandas.io import pytables - return pytables.to_hdf(path_or_buf, key, self, **kwargs) + self._item_cache = {} - def to_clipboard(self): - """ - Attempt to write text representation of object to the system clipboard + #---------------------------------------------------------------------- + # IO - Notes - ----- - Requirements for your platform - - Linux: xclip, or xsel (with gtk or PyQt4 modules) - - Windows: - - OS X: - """ - from pandas.io import clipboard - clipboard.to_clipboard(self) + #---------------------------------------------------------------------- + # I/O Methods def to_json(self, path_or_buf=None, orient=None, date_format='epoch', double_precision=10, force_ascii=True, date_unit='ms'): @@ -566,8 +571,8 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch', - columns : dict like {column -> {index -> value}} - values : just the values array - date_format : string, default 'epoch' - type of date conversion, 'epoch' for timestamp, 'iso' for ISO8601 + date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601) + default is epoch double_precision : The number of decimal places to use when encoding floating point values, default 10. force_ascii : force encoded string to be ASCII, default True. @@ -578,8 +583,7 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch', Returns ------- - result : a JSON compatible string written to the path_or_buf; - if the path_or_buf is none, return a StringIO of the result + same type as input object with filtered info axis """ @@ -592,98 +596,138 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch', force_ascii=force_ascii, date_unit=date_unit) -# install the indexerse -for _name, _indexer in indexing.get_indexers_list(): - PandasContainer._create_indexer(_name,_indexer) - - -class NDFrame(PandasContainer): - """ - N-dimensional analogue of DataFrame. Store multi-dimensional in a - size-mutable, labeled data structure - - Parameters - ---------- - data : BlockManager - axes : list - copy : boolean, default False - """ - # kludge - _default_stat_axis = 0 + def to_hdf(self, path_or_buf, key, **kwargs): + """ activate the HDFStore - def __init__(self, data, axes=None, copy=False, dtype=None): - if dtype is not None: - data = data.astype(dtype) - elif copy: - data = data.copy() + Parameters + ---------- + path_or_buf : the path (string) or buffer to put the store + key : string, an indentifier for the group in the store + mode : optional, {'a', 'w', 'r', 'r+'}, default 'a' + + ``'r'`` + Read-only; no data can be modified. + ``'w'`` + Write; a new file is created (an existing file with the same + name would be deleted). + ``'a'`` + Append; an existing file is opened for reading and writing, + and if the file does not exist it is created. + ``'r+'`` + It is similar to ``'a'``, but the file must already exist. + complevel : int, 1-9, default 0 + If a complib is specified compression will be applied + where possible + complib : {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None + If complevel is > 0 apply compression to objects written + in the store wherever possible + fletcher32 : bool, default False + If applying compression use the fletcher32 checksum - if axes is not None: - for i, ax in enumerate(axes): - data = data.reindex_axis(ax, axis=i) + """ - object.__setattr__(self, '_data', data) - object.__setattr__(self, '_item_cache', {}) + from pandas.io import pytables + return pytables.to_hdf(path_or_buf, key, self, **kwargs) - def astype(self, dtype, copy = True, raise_on_error = True): + def to_pickle(self, path): """ - Cast object to input numpy.dtype - Return a copy when copy = True (be really careful with this!) + Pickle (serialize) object to input file path Parameters ---------- - dtype : numpy.dtype or Python type - raise_on_error : raise on invalid input + path : string + File path + """ + from pandas.io.pickle import to_pickle + return to_pickle(self, path) - Returns - ------- - casted : type of caller + def save(self, path): # TODO remove in 0.13 + import warnings + from pandas.io.pickle import to_pickle + warnings.warn("save is deprecated, use to_pickle", FutureWarning) + return to_pickle(self, path) + + def load(self, path): # TODO remove in 0.13 + import warnings + from pandas.io.pickle import read_pickle + warnings.warn("load is deprecated, use pd.read_pickle", FutureWarning) + return read_pickle(path) + + def to_clipboard(self): """ + Attempt to write text representation of object to the system clipboard - mgr = self._data.astype(dtype, copy = copy, raise_on_error = raise_on_error) - return self._constructor(mgr) + Notes + ----- + Requirements for your platform + - Linux: xclip, or xsel (with gtk or PyQt4 modules) + - Windows: + - OS X: + """ + from pandas.io import clipboard + clipboard.to_clipboard(self) - @property - def axes(self): - return self._data.axes + #---------------------------------------------------------------------- + # Fancy Indexing - @property - def values(self): - return self._data.as_matrix() + @classmethod + def _create_indexer(cls, name, indexer): + """ create an indexer like _name in the class """ + iname = '_%s' % name + setattr(cls, iname, None) - @property - def empty(self): - return not all(len(ax) > 0 for ax in self.axes) + def _indexer(self): + if getattr(self, iname, None) is None: + setattr(self, iname, indexer(self, name)) + return getattr(self, iname) - def __nonzero__(self): - return not self.empty + setattr(cls, name, property(_indexer)) - # Python 3 compat - __bool__ = __nonzero__ + def get(self, key, default=None): + """ + Get item from object for given key (DataFrame column, Panel slice, + etc.). Returns default value if not found - @property - def ndim(self): - return self._data.ndim + Parameters + ---------- + key : object - def _set_axis(self, axis, labels): - self._data.set_axis(axis, labels) - self._clear_item_cache() + Returns + ------- + value : type of items contained in object + """ + try: + return self[key] + except KeyError: + return default def __getitem__(self, item): return self._get_item_cache(item) def _get_item_cache(self, item): cache = self._item_cache - try: - return cache[item] - except Exception: + res = cache.get(item) + if res is None: values = self._data.get(item) res = self._box_item_values(item, values) cache[item] = res - return res + res._cacher = (item,weakref.ref(self)) + return res def _box_item_values(self, key, values): raise NotImplementedError + def _maybe_cache_changed(self, item, value): + """ the object has called back to us saying + maybe it has changed """ + self._data.set(item, value) + + def _maybe_update_cacher(self): + """ see if we need to update our parent cacher """ + cacher = getattr(self,'_cacher',None) + if cacher is not None: + cacher[1]()._maybe_cache_changed(cacher[0],self) + def _clear_item_cache(self): self._item_cache.clear() @@ -719,106 +763,1535 @@ def __delitem__(self, key): # exception: self._data.delete(key) - try: - del self._item_cache[key] - except KeyError: - pass + try: + del self._item_cache[key] + except KeyError: + pass + + def take(self, indices, axis=0, convert=True): + """ + Analogous to ndarray.take + + Parameters + ---------- + indices : list / array of ints + axis : int, default 0 + convert : translate neg to pos indices (default) + + Returns + ------- + taken : type of caller + """ + + # check/convert indicies here + if convert: + axis = self._get_axis_number(axis) + indices = _maybe_convert_indices( + indices, len(self._get_axis(axis))) + + if axis == 0: + labels = self._get_axis(axis) + new_items = labels.take(indices) + new_data = self._data.reindex_axis(new_items, axis=0) + else: + new_data = self._data.take(indices, axis=axis, verify=False) + return self._constructor(new_data) + + def select(self, crit, axis=0): + """ + Return data corresponding to axis labels matching criteria + + Parameters + ---------- + crit : function + To be called on each index (label). Should return True or False + axis : int + + Returns + ------- + selection : type of caller + """ + axis = self._get_axis_number(axis) + axis_name = self._get_axis_name(axis) + axis_values = self._get_axis(axis) + + if len(axis_values) > 0: + new_axis = axis_values[ + np.asarray([bool(crit(label)) for label in axis_values])] + else: + new_axis = axis_values + + return self.reindex(**{axis_name: new_axis}) + + def reindex_like(self, other, method=None, copy=True, limit=None): + """ return an object with matching indicies to myself + + Parameters + ---------- + other : Object + method : string or None + copy : boolean, default True + limit : int, default None + Maximum size gap to forward or backward fill + + Notes + ----- + Like calling s.reindex(index=other.index, columns=other.columns, + method=...) + + Returns + ------- + reindexed : same as input + """ + d = other._construct_axes_dict(method=method) + return self.reindex(**d) + + def drop(self, labels, axis=0, level=None): + """ + Return new object with labels in requested axis removed + + Parameters + ---------- + labels : array-like + axis : int + level : int or name, default None + For MultiIndex + + Returns + ------- + dropped : type of caller + """ + axis_name = self._get_axis_name(axis) + axis, axis_ = self._get_axis(axis), axis + + if axis.is_unique: + if level is not None: + if not isinstance(axis, MultiIndex): + raise AssertionError('axis must be a MultiIndex') + new_axis = axis.drop(labels, level=level) + else: + new_axis = axis.drop(labels) + dropped = self.reindex(**{axis_name: new_axis}) + try: + dropped.axes[axis_].set_names(axis.names, inplace=True) + except AttributeError: + pass + return dropped + + else: + if level is not None: + if not isinstance(axis, MultiIndex): + raise AssertionError('axis must be a MultiIndex') + indexer = -lib.ismember(axis.get_level_values(level), + set(labels)) + else: + indexer = -axis.isin(labels) + + slicer = [slice(None)] * self.ndim + slicer[self._get_axis_number(axis_name)] = indexer + + return self.ix[tuple(slicer)] + + def add_prefix(self, prefix): + """ + Concatenate prefix string with panel items names. + + Parameters + ---------- + prefix : string + + Returns + ------- + with_prefix : type of caller + """ + new_data = self._data.add_prefix(prefix) + return self._constructor(new_data) + + def add_suffix(self, suffix): + """ + Concatenate suffix string with panel items names + + Parameters + ---------- + suffix : string + + Returns + ------- + with_suffix : type of caller + """ + new_data = self._data.add_suffix(suffix) + return self._constructor(new_data) + + def sort_index(self, axis=0, ascending=True): + """ + Sort object by labels (along an axis) + + Parameters + ---------- + axis : {0, 1} + Sort index/rows versus columns + ascending : boolean, default True + Sort ascending vs. descending + + Returns + ------- + sorted_obj : type of caller + """ + axis = self._get_axis_number(axis) + axis_name = self._get_axis_name(axis) + labels = self._get_axis(axis) + + sort_index = labels.argsort() + if not ascending: + sort_index = sort_index[::-1] + + new_axis = labels.take(sort_index) + return self.reindex(**{axis_name: new_axis}) + + def reindex(self, *args, **kwargs): + """Conform DataFrame to new index with optional filling logic, placing + NA/NaN in locations having no value in the previous index. A new object + is produced unless the new index is equivalent to the current one and + copy=False + + Parameters + ---------- + axes : array-like, optional (can be specified in order, or as keywords) + New labels / index to conform to. Preferably an Index object to + avoid duplicating data + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed DataFrame + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + copy : boolean, default True + Return a new object, even if the passed indexes are the same + level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + fill_value : scalar, default np.NaN + Value to use for missing values. Defaults to NaN, but can be any + "compatible" value + limit : int, default None + Maximum size gap to forward or backward fill + takeable : boolean, default False + treat the passed as positional values + + Examples + -------- + >>> df.reindex(index=[date1, date2, date3], columns=['A', 'B', 'C']) + + Returns + ------- + reindexed : same type as calling instance + """ + + # construct the args + axes, kwargs = self._construct_axes_from_arguments(args, kwargs) + method = kwargs.get('method') + level = kwargs.get('level') + copy = kwargs.get('copy', True) + limit = kwargs.get('limit') + fill_value = kwargs.get('fill_value', np.nan) + takeable = kwargs.get('takeable', False) + + self._consolidate_inplace() + + # check if we are a multi reindex + if self._needs_reindex_multi(axes, method, level): + try: + return self._reindex_multi(axes, copy, fill_value) + except: + pass + + # perform the reindex on the axes + if copy and not com._count_not_none(*axes.values()): + return self.copy() + + return self._reindex_axes(axes, level, limit, method, fill_value, copy, takeable=takeable) + + def _reindex_axes(self, axes, level, limit, method, fill_value, copy, takeable=False): + """ perform the reinxed for all the axes """ + obj = self + for a in self._AXIS_ORDERS: + labels = axes[a] + if labels is None: + continue + + # convert to an index if we are not a multi-selection + if level is None: + labels = _ensure_index(labels) + + axis = self._get_axis_number(a) + new_index, indexer = self._get_axis(a).reindex( + labels, level=level, limit=limit, takeable=takeable) + obj = obj._reindex_with_indexers( + {axis: [labels, indexer]}, method, fill_value, copy) + + return obj + + def _needs_reindex_multi(self, axes, method, level): + """ check if we do need a multi reindex """ + return (com._count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type + + def _reindex_multi(self, axes, copy, fill_value): + return NotImplemented + + def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, + limit=None, fill_value=np.nan): + """Conform input object to new index with optional filling logic, placing + NA/NaN in locations having no value in the previous index. A new object + is produced unless the new index is equivalent to the current one and + copy=False + + Parameters + ---------- + index : array-like, optional + New labels / index to conform to. Preferably an Index object to + avoid duplicating data + axis : allowed axis for the input + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed DataFrame + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + copy : boolean, default True + Return a new object, even if the passed indexes are the same + level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + limit : int, default None + Maximum size gap to forward or backward fill + + Examples + -------- + >>> df.reindex_axis(['A', 'B', 'C'], axis=1) + + See also + -------- + DataFrame.reindex, DataFrame.reindex_like + + Returns + ------- + reindexed : same type as calling instance + """ + self._consolidate_inplace() + + axis_name = self._get_axis_name(axis) + axis_values = self._get_axis(axis_name) + new_index, indexer = axis_values.reindex(labels, method, level, + limit=limit, copy_if_needed=True) + return self._reindex_with_indexers({axis: [new_index, indexer]}, method, fill_value, copy) + + def _reindex_with_indexers(self, reindexers, method=None, fill_value=np.nan, copy=False): + + # reindex doing multiple operations on different axes if indiciated + new_data = self._data + for axis in sorted(reindexers.keys()): + index, indexer = reindexers[axis] + baxis = self._get_block_manager_axis(axis) + + # reindex the axis + if method is not None: + new_data = new_data.reindex_axis( + index, method=method, axis=baxis, + fill_value=fill_value, copy=copy) + + elif indexer is not None: + # TODO: speed up on homogeneous DataFrame objects + indexer = com._ensure_int64(indexer) + new_data = new_data.reindex_indexer(index, indexer, axis=baxis, + fill_value=fill_value) + + elif baxis == 0 and index is not None and index is not new_data.axes[baxis]: + new_data = new_data.reindex_items(index, copy=copy, + fill_value=fill_value) + + elif baxis > 0 and index is not None and index is not new_data.axes[baxis]: + new_data = new_data.copy(deep=copy) + new_data.set_axis(baxis, index) + + if copy and new_data is self._data: + new_data = new_data.copy() + + return self._constructor(new_data) + + def _reindex_axis(self, new_index, fill_method, axis, copy): + new_data = self._data.reindex_axis(new_index, axis=axis, + method=fill_method, copy=copy) + + if new_data is self._data and not copy: + return self + else: + return self._constructor(new_data) + + def filter(self, items=None, like=None, regex=None, axis=None): + """ + Restrict the info axis to set of items or wildcard + + Parameters + ---------- + items : list-like + List of info axis to restrict to (must not all be present) + like : string + Keep info axis where "arg in col == True" + regex : string (regular expression) + Keep info axis with re.search(regex, col) == True + + Notes + ----- + Arguments are mutually exclusive, but this is not checked for + + """ + import re + + if axis is None: + axis = self._info_axis_name + axis_name = self._get_axis_name(axis) + axis_values = self._get_axis(axis_name) + + if items is not None: + return self.reindex(**{axis_name: [r for r in items if r in axis_values]}) + elif like: + matchf = lambda x: (like in x if isinstance(x, compat.string_types) + else like in str(x)) + return self.select(matchf, axis=axis_name) + elif regex: + matcher = re.compile(regex) + return self.select(lambda x: matcher.search(x) is not None, axis=axis_name) + else: + raise ValueError('items was None!') + + #---------------------------------------------------------------------- + # Attribute access + + def _propogate_attributes(self, other): + """ propogate attributes from other to self""" + for name in self._prop_attributes: + object.__setattr__(self, name, getattr(other, name, None)) + return self + + def __getattr__(self, name): + """After regular attribute access, try looking up the name of a the info + This allows simpler access to columns for interactive use.""" + if name in self._info_axis: + return self[name] + raise AttributeError("'%s' object has no attribute '%s'" % + (type(self).__name__, name)) + + def __setattr__(self, name, value): + """After regular attribute access, try looking up the name of the info + This allows simpler access to columns for interactive use.""" + if name in self._internal_names_set: + object.__setattr__(self, name, value) + else: + try: + existing = getattr(self, name) + if isinstance(existing, Index): + object.__setattr__(self, name, value) + elif name in self._info_axis: + self[name] = value + else: + object.__setattr__(self, name, value) + except (AttributeError, TypeError): + object.__setattr__(self, name, value) + + #---------------------------------------------------------------------- + # Getting and setting elements + + #---------------------------------------------------------------------- + # Consolidation of internals + + def _consolidate_inplace(self): + f = lambda: self._data.consolidate() + self._data = self._protect_consolidate(f) + + def consolidate(self, inplace=False): + """ + Compute NDFrame with "consolidated" internals (data of each dtype + grouped together in a single ndarray). Mainly an internal API function, + but available here to the savvy user + + Parameters + ---------- + inplace : boolean, default False + If False return new object, otherwise modify existing object + + Returns + ------- + consolidated : type of caller + """ + if inplace: + self._consolidate_inplace() + else: + f = lambda: self._data.consolidate() + cons_data = self._protect_consolidate(f) + if cons_data is self._data: + cons_data = cons_data.copy() + return self._constructor(cons_data) + + @property + def _is_mixed_type(self): + f = lambda: self._data.is_mixed_type + return self._protect_consolidate(f) + + @property + def _is_numeric_mixed_type(self): + f = lambda: self._data.is_numeric_mixed_type + return self._protect_consolidate(f) + + def _protect_consolidate(self, f): + blocks_before = len(self._data.blocks) + result = f() + if len(self._data.blocks) != blocks_before: + self._clear_item_cache() + return result + + #---------------------------------------------------------------------- + # Internal Interface Methods + + def as_matrix(self, columns=None): + """ + Convert the frame to its Numpy-array matrix representation. Columns + are presented in sorted order unless a specific list of columns is + provided. + + NOTE: the dtype will be a lower-common-denominator dtype (implicit upcasting) + that is to say if the dtypes (even of numeric types) are mixed, the one that accomodates all will be chosen + use this with care if you are not dealing with the blocks + + e.g. if the dtypes are float16,float32 -> float32 + float16,float32,float64 -> float64 + int32,uint8 -> int32 + + + Returns + ------- + values : ndarray + If the DataFrame is heterogeneous and contains booleans or objects, + the result will be of dtype=object + """ + self._consolidate_inplace() + if self._AXIS_REVERSED: + return self._data.as_matrix(columns).T + return self._data.as_matrix(columns) + + @property + def values(self): + return self.as_matrix() + + @property + def _get_values(self): + # compat + return self.as_matrix() + + def get_values(self): + """ same as values (but handles sparseness conversions) """ + return self.as_matrix() + + def get_dtype_counts(self): + """ return the counts of dtypes in this frame """ + from pandas import Series + return Series(self._data.get_dtype_counts()) + + def get_ftype_counts(self): + """ return the counts of ftypes in this frame """ + from pandas import Series + return Series(self._data.get_ftype_counts()) + + def as_blocks(self, columns=None): + """ + Convert the frame to a dict of dtype -> Constructor Types that each has a homogeneous dtype. + are presented in sorted order unless a specific list of columns is + provided. + + NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) + + Parameters + ---------- + columns : array-like + Specific column order + + Returns + ------- + values : a list of Object + """ + self._consolidate_inplace() + + bd = dict() + for b in self._data.blocks: + b = b.reindex_items_from(columns or b.items) + bd[str(b.dtype)] = self._constructor( + BlockManager([b], [b.items, self.index])) + return bd + + @property + def blocks(self): + return self.as_blocks() + + def astype(self, dtype, copy=True, raise_on_error=True): + """ + Cast object to input numpy.dtype + Return a copy when copy = True (be really careful with this!) + + Parameters + ---------- + dtype : numpy.dtype or Python type + raise_on_error : raise on invalid input + + Returns + ------- + casted : type of caller + """ + + mgr = self._data.astype( + dtype, copy=copy, raise_on_error=raise_on_error) + return self._constructor(mgr)._propogate_attributes(self) + + def copy(self, deep=True): + """ + Make a copy of this object + + Parameters + ---------- + deep : boolean, default True + Make a deep copy, i.e. also copy data + + Returns + ------- + copy : type of caller + """ + data = self._data + if deep: + data = data.copy() + return self._constructor(data) + + def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True): + """ + Attempt to infer better dtype for object columns + + Parameters + ---------- + convert_dates : if True, attempt to soft convert_dates, if 'coerce', force conversion (and non-convertibles get NaT) + convert_numeric : if True attempt to coerce to numerbers (including strings), non-convertibles get NaN + copy : Boolean, if True, return copy, default is True + + Returns + ------- + converted : asm as input object + """ + return self._constructor(self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, copy=copy)) + + #---------------------------------------------------------------------- + # Filling NA's + + def fillna(self, value=None, method=None, axis=0, inplace=False, + limit=None, downcast=None): + """ + Fill NA/NaN values using the specified method + + Parameters + ---------- + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + value : scalar or dict + Value to use to fill holes (e.g. 0), alternately a dict of values + specifying which value to use for each column (columns not in the + dict will not be filled). This value cannot be a list. + axis : {0, 1}, default 0 + 0: fill column-by-column + 1: fill row-by-row + inplace : boolean, default False + If True, fill the DataFrame in place. Note: this will modify any + other views on this DataFrame, like if you took a no-copy slice of + an existing DataFrame, for example a column in a DataFrame. Returns + a reference to the filled object, which is self if inplace=True + limit : int, default None + Maximum size gap to forward or backward fill + downcast : dict, default is None, a dict of item->dtype of what to + downcast if possible + + See also + -------- + reindex, asfreq + + Returns + ------- + filled : DataFrame + """ + if isinstance(value, (list, tuple)): + raise TypeError('"value" parameter must be a scalar or dict, but ' + 'you passed a "{0}"'.format(type(value).__name__)) + self._consolidate_inplace() + + axis = self._get_axis_number(axis) + if axis + 1 > self._AXIS_LEN: + raise ValueError( + "invalid axis passed for object type {0}".format(type(self))) + + if value is None: + if method is None: + raise ValueError('must specify a fill method or value') + if self._is_mixed_type and axis == 1: + if inplace: + raise NotImplementedError() + return self.T.fillna(method=method, limit=limit).T + + method = com._clean_fill_method(method) + new_data = self._data.interpolate(method=method, + axis=axis, + limit=limit, + inplace=inplace, + coerce=True) + else: + if method is not None: + raise ValueError('cannot specify both a fill method and value') + + if len(self._get_axis(axis)) == 0: + return self + if isinstance(value, (dict, com.ABCSeries)): + if axis == 1: + raise NotImplementedError('Currently only can fill ' + 'with dict/Series column ' + 'by column') + + result = self if inplace else self.copy() + for k, v in compat.iteritems(value): + if k not in result: + continue + obj = result[k] + obj.fillna(v, inplace=True) + obj._maybe_update_cacher() + return result + else: + new_data = self._data.fillna(value, inplace=inplace, + downcast=downcast) + + if inplace: + self._data = new_data + else: + return self._constructor(new_data) + + def ffill(self, axis=0, inplace=False, limit=None): + return self.fillna(method='ffill', axis=axis, inplace=inplace, + limit=limit) + + def bfill(self, axis=0, inplace=False, limit=None): + return self.fillna(method='bfill', axis=axis, inplace=inplace, + limit=limit) + + def replace(self, to_replace=None, value=None, inplace=False, limit=None, + regex=False, method=None, axis=None): + """ + Replace values given in 'to_replace' with 'value'. + + Parameters + ---------- + to_replace : str, regex, list, dict, Series, numeric, or None + + * str or regex: + + - str: string exactly matching `to_replace` will be replaced + with `value` + - regex: regexs matching `to_replace` will be replaced with + `value` + + * list of str, regex, or numeric: + + - First, if `to_replace` and `value` are both lists, they + **must** be the same length. + - Second, if ``regex=True`` then all of the strings in **both** + lists will be interpreted as regexs otherwise they will match + directly. This doesn't matter much for `value` since there + are only a few possible substitution regexes you can use. + - str and regex rules apply as above. + + * dict: + + - Nested dictionaries, e.g., {'a': {'b': nan}}, are read as + follows: look in column 'a' for the value 'b' and replace it + with nan. You can nest regular expressions as well. Note that + column names (the top-level dictionary keys in a nested + dictionary) **cannot** be regular expressions. + - Keys map to column names and values map to substitution + values. You can treat this as a special case of passing two + lists except that you are specifying the column to search in. + + * None: + + - This means that the ``regex`` argument must be a string, + compiled regular expression, or list, dict, ndarray or Series + of such elements. If `value` is also ``None`` then this + **must** be a nested dictionary or ``Series``. + + See the examples section for examples of each of these. + value : scalar, dict, list, str, regex, default None + Value to use to fill holes (e.g. 0), alternately a dict of values + specifying which value to use for each column (columns not in the + dict will not be filled). Regular expressions, strings and lists or + dicts of such objects are also allowed. + inplace : boolean, default False + If True, fill the DataFrame in place. Note: this will modify any + other views on this DataFrame, like if you took a no-copy slice of + an existing DataFrame, for example a column in a DataFrame. Returns + a reference to the filled object, which is self if inplace=True + limit : int, default None + Maximum size gap to forward or backward fill + regex : bool or same types as `to_replace`, default False + Whether to interpret `to_replace` and/or `value` as regular + expressions. If this is ``True`` then `to_replace` *must* be a + string. Otherwise, `to_replace` must be ``None`` because this + parameter will be interpreted as a regular expression or a list, + dict, or array of regular expressions. + + See also + -------- + reindex, asfreq, fillna + + Returns + ------- + filled : DataFrame + + Raises + ------ + AssertionError + * If `regex` is not a ``bool`` and `to_replace` is not ``None``. + TypeError + * If `to_replace` is a ``dict`` and `value` is not a ``list``, + ``dict``, ``ndarray``, or ``Series`` + * If `to_replace` is ``None`` and `regex` is not compilable into a + regular expression or is a list, dict, ndarray, or Series. + ValueError + * If `to_replace` and `value` are ``list`` s or ``ndarray`` s, but + they are not the same length. + + Notes + ----- + * Regex substitution is performed under the hood with ``re.sub``. The + rules for substitution for ``re.sub`` are the same. + * Regular expressions will only substitute on strings, meaning you + cannot provide, for example, a regular expression matching floating + point numbers and expect the columns in your frame that have a + numeric dtype to be matched. However, if those floating point numbers + *are* strings, then you can do this. + * This method has *a lot* of options. You are encouraged to experiment + and play with this method to gain intuition about how it works. + + """ + if not com.is_bool(regex) and to_replace is not None: + raise AssertionError("'to_replace' must be 'None' if 'regex' is " + "not a bool") + if method is not None: + from warnings import warn + warn('the "method" argument is deprecated and will be removed in' + 'v0.13; this argument has no effect') + + if axis is not None: + from warnings import warn + warn('the "axis" argument is deprecated and will be removed in' + 'v0.13; this argument has no effect') + + self._consolidate_inplace() + + def is_dictlike(x): + return isinstance(x, (dict, com.ABCSeries)) + + if value is None: + if not is_dictlike(to_replace): + if not is_dictlike(regex): + raise TypeError('If "to_replace" and "value" are both None' + ' then regex must be a mapping') + to_replace = regex + regex = True + + items = to_replace.items() + keys, values = zip(*items) + + are_mappings = [is_dictlike(v) for v in values] + + if any(are_mappings): + if not all(are_mappings): + raise TypeError("If a nested mapping is passed, all values" + " of the top level mapping must be " + "mappings") + # passed a nested dict/Series + to_rep_dict = {} + value_dict = {} + + for k, v in items: + to_rep_dict[k] = v.keys() + value_dict[k] = v.values() + + to_replace, value = to_rep_dict, value_dict + else: + to_replace, value = keys, values + + return self.replace(to_replace, value, inplace=inplace, + limit=limit, regex=regex) + else: + + # need a non-zero len on all axes + for a in self._AXIS_ORDERS: + if not len(self._get_axis(a)): + return self + + new_data = self._data + if is_dictlike(to_replace): + if is_dictlike(value): # {'A' : NA} -> {'A' : 0} + new_data = self._data + for c, src in compat.iteritems(to_replace): + if c in value and c in self: + new_data = new_data.replace(src, value[c], + filter=[c], + inplace=inplace, + regex=regex) + + # {'A': NA} -> 0 + elif not isinstance(value, (list, np.ndarray)): + new_data = self._data + for k, src in compat.iteritems(to_replace): + if k in self: + new_data = new_data.replace(src, value, + filter=[k], + inplace=inplace, + regex=regex) + else: + raise TypeError('Fill value must be scalar, dict, or ' + 'Series') + + elif isinstance(to_replace, (list, np.ndarray)): + # [NA, ''] -> [0, 'missing'] + if isinstance(value, (list, np.ndarray)): + if len(to_replace) != len(value): + raise ValueError('Replacement lists must match ' + 'in length. Expecting %d got %d ' % + (len(to_replace), len(value))) + + new_data = self._data.replace_list(to_replace, value, + inplace=inplace, + regex=regex) + + else: # [NA, ''] -> 0 + new_data = self._data.replace(to_replace, value, + inplace=inplace, regex=regex) + elif to_replace is None: + if not (com.is_re_compilable(regex) or + isinstance(regex, (list, np.ndarray)) or is_dictlike(regex)): + raise TypeError("'regex' must be a string or a compiled " + "regular expression or a list or dict of " + "strings or regular expressions, you " + "passed a {0}".format(type(regex))) + return self.replace(regex, value, inplace=inplace, limit=limit, + regex=True) + else: + + # dest iterable dict-like + if is_dictlike(value): # NA -> {'A' : 0, 'B' : -1} + new_data = self._data + + for k, v in compat.iteritems(value): + if k in self: + new_data = new_data.replace(to_replace, v, + filter=[k], + inplace=inplace, + regex=regex) + + elif not isinstance(value, (list, np.ndarray)): # NA -> 0 + new_data = self._data.replace(to_replace, value, + inplace=inplace, regex=regex) + else: + raise TypeError('Invalid "to_replace" type: ' + '{0}'.format(type(to_replace))) # pragma: no cover + + new_data = new_data.convert(copy=not inplace, convert_numeric=False) + + if inplace: + self._data = new_data + else: + return self._constructor(new_data) + + def interpolate(self, to_replace, method='pad', axis=0, inplace=False, + limit=None): + """Interpolate values according to different methods. + + Parameters + ---------- + to_replace : dict, Series + method : str + axis : int + inplace : bool + limit : int, default None + + Returns + ------- + frame : interpolated + + See Also + -------- + reindex, replace, fillna + """ + from warnings import warn + warn('DataFrame.interpolate will be removed in v0.13, please use ' + 'either DataFrame.fillna or DataFrame.replace instead', + FutureWarning) + if self._is_mixed_type and axis == 1: + return self.T.replace(to_replace, method=method, limit=limit).T + + method = com._clean_fill_method(method) + + if isinstance(to_replace, (dict, com.ABCSeries)): + if axis == 0: + return self.replace(to_replace, method=method, inplace=inplace, + limit=limit, axis=axis) + elif axis == 1: + obj = self.T + if inplace: + obj.replace(to_replace, method=method, limit=limit, + inplace=inplace, axis=0) + return obj.T + return obj.replace(to_replace, method=method, limit=limit, + inplace=inplace, axis=0).T + else: + raise ValueError('Invalid value for axis') + else: + new_data = self._data.interpolate(method=method, axis=axis, + limit=limit, inplace=inplace, + missing=to_replace, coerce=False) + + if inplace: + self._data = new_data + else: + return self._constructor(new_data) + + #---------------------------------------------------------------------- + # Action Methods + + def abs(self): + """ + Return an object with absolute value taken. Only applicable to objects + that are all numeric + + Returns + ------- + abs: type of caller + """ + obj = np.abs(self) + obj = com._possibly_cast_to_timedelta(obj, coerce=False) + return obj + + def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, + group_keys=True, squeeze=False): + """ + Group series using mapper (dict or key function, apply given function + to group, return result as series) or by a series of columns + + Parameters + ---------- + by : mapping function / list of functions, dict, Series, or tuple / + list of column names. + Called on each element of the object index to determine the groups. + If a dict or Series is passed, the Series or dict VALUES will be + used to determine the groups + axis : int, default 0 + level : int, level name, or sequence of such, default None + If the axis is a MultiIndex (hierarchical), group by a particular + level or levels + as_index : boolean, default True + For aggregated output, return object with group labels as the + index. Only relevant for DataFrame input. as_index=False is + effectively "SQL-style" grouped output + sort : boolean, default True + Sort group keys. Get better performance by turning this off + group_keys : boolean, default True + When calling apply, add group keys to index to identify pieces + squeeze : boolean, default False + reduce the dimensionaility of the return type if possible, otherwise + return a consistent type + + Examples + -------- + # DataFrame result + >>> data.groupby(func, axis=0).mean() + + # DataFrame result + >>> data.groupby(['col1', 'col2'])['col3'].mean() + + # DataFrame with hierarchical index + >>> data.groupby(['col1', 'col2']).mean() + + Returns + ------- + GroupBy object + + """ + + from pandas.core.groupby import groupby + axis = self._get_axis_number(axis) + return groupby(self, by, axis=axis, level=level, as_index=as_index, + sort=sort, group_keys=group_keys, squeeze=squeeze) + + def asfreq(self, freq, method=None, how=None, normalize=False): + """ + Convert all TimeSeries inside to specified frequency using DateOffset + objects. Optionally provide fill method to pad/backfill missing values. + + Parameters + ---------- + freq : DateOffset object, or string + method : {'backfill', 'bfill', 'pad', 'ffill', None} + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill methdo + how : {'start', 'end'}, default end + For PeriodIndex only, see PeriodIndex.asfreq + normalize : bool, default False + Whether to reset output index to midnight + + Returns + ------- + converted : type of caller + """ + from pandas.tseries.resample import asfreq + return asfreq(self, freq, method=method, how=how, + normalize=normalize) + + def at_time(self, time, asof=False): + """ + Select values at particular time of day (e.g. 9:30AM) + + Parameters + ---------- + time : datetime.time or string + + Returns + ------- + values_at_time : type of caller + """ + try: + indexer = self.index.indexer_at_time(time, asof=asof) + return self.take(indexer, convert=False) + except AttributeError: + raise TypeError('Index must be DatetimeIndex') + + def between_time(self, start_time, end_time, include_start=True, + include_end=True): + """ + Select values between particular times of the day (e.g., 9:00-9:30 AM) + + Parameters + ---------- + start_time : datetime.time or string + end_time : datetime.time or string + include_start : boolean, default True + include_end : boolean, default True + + Returns + ------- + values_between_time : type of caller + """ + try: + indexer = self.index.indexer_between_time( + start_time, end_time, include_start=include_start, + include_end=include_end) + return self.take(indexer, convert=False) + except AttributeError: + raise TypeError('Index must be DatetimeIndex') + + def resample(self, rule, how=None, axis=0, fill_method=None, + closed=None, label=None, convention='start', + kind=None, loffset=None, limit=None, base=0): + """ + Convenience method for frequency conversion and resampling of regular + time-series data. + + Parameters + ---------- + rule : the offset string or object representing target conversion + how : string, method for down- or re-sampling, default to 'mean' for + downsampling + axis : int, optional, default 0 + fill_method : string, fill_method for upsampling, default None + closed : {'right', 'left'} + Which side of bin interval is closed + label : {'right', 'left'} + Which bin edge label to label bucket with + convention : {'start', 'end', 's', 'e'} + kind: "period"/"timestamp" + loffset: timedelta + Adjust the resampled time labels + limit: int, default None + Maximum size gap to when reindexing with fill_method + base : int, default 0 + For frequencies that evenly subdivide 1 day, the "origin" of the + aggregated intervals. For example, for '5min' frequency, base could + range from 0 through 4. Defaults to 0 + """ + from pandas.tseries.resample import TimeGrouper + axis = self._get_axis_number(axis) + sampler = TimeGrouper(rule, label=label, closed=closed, how=how, + axis=axis, kind=kind, loffset=loffset, + fill_method=fill_method, convention=convention, + limit=limit, base=base) + return sampler.resample(self) + + def first(self, offset): + """ + Convenience method for subsetting initial periods of time series data + based on a date offset + + Parameters + ---------- + offset : string, DateOffset, dateutil.relativedelta + + Examples + -------- + ts.last('10D') -> First 10 days + + Returns + ------- + subset : type of caller + """ + from pandas.tseries.frequencies import to_offset + if not isinstance(self.index, DatetimeIndex): + raise NotImplementedError + + if len(self.index) == 0: + return self + + offset = to_offset(offset) + end_date = end = self.index[0] + offset + + # Tick-like, e.g. 3 weeks + if not offset.isAnchored() and hasattr(offset, '_inc'): + if end_date in self.index: + end = self.index.searchsorted(end_date, side='left') + + return self.ix[:end] + + def last(self, offset): + """ + Convenience method for subsetting final periods of time series data + based on a date offset + + Parameters + ---------- + offset : string, DateOffset, dateutil.relativedelta + + Examples + -------- + ts.last('5M') -> Last 5 months + + Returns + ------- + subset : type of caller + """ + from pandas.tseries.frequencies import to_offset + if not isinstance(self.index, DatetimeIndex): + raise NotImplementedError + + if len(self.index) == 0: + return self + + offset = to_offset(offset) + + start_date = start = self.index[-1] - offset + start = self.index.searchsorted(start_date, side='right') + return self.ix[start:] + + def align(self, other, join='outer', axis=None, level=None, copy=True, + fill_value=np.nan, method=None, limit=None, fill_axis=0): + """ + Align two object on their axes with the + specified join method for each axis Index + + Parameters + ---------- + other : DataFrame or Series + join : {'outer', 'inner', 'left', 'right'}, default 'outer' + axis : allowed axis of the other object, default None + Align on index (0), columns (1), or both (None) + level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + copy : boolean, default True + Always returns new objects. If copy=False and no reindexing is + required then original objects are returned. + fill_value : scalar, default np.NaN + Value to use for missing values. Defaults to NaN, but can be any + "compatible" value + method : str, default None + limit : int, default None + fill_axis : {0, 1}, default 0 + Filling axis, method and limit + + Returns + ------- + (left, right) : (type of input, type of other) + Aligned objects + """ + from pandas import DataFrame, Series + + if isinstance(other, DataFrame): + return self._align_frame(other, join=join, axis=axis, level=level, + copy=copy, fill_value=fill_value, + method=method, limit=limit, + fill_axis=fill_axis) + elif isinstance(other, Series): + return self._align_series(other, join=join, axis=axis, level=level, + copy=copy, fill_value=fill_value, + method=method, limit=limit, + fill_axis=fill_axis) + else: # pragma: no cover + raise TypeError('unsupported type: %s' % type(other)) + + def _align_frame(self, other, join='outer', axis=None, level=None, + copy=True, fill_value=np.nan, method=None, limit=None, + fill_axis=0): + # defaults + join_index, join_columns = None, None + ilidx, iridx = None, None + clidx, cridx = None, None + + if axis is None or axis == 0: + if not self.index.equals(other.index): + join_index, ilidx, iridx = \ + self.index.join(other.index, how=join, level=level, + return_indexers=True) + + if axis is None or axis == 1: + if not self.columns.equals(other.columns): + join_columns, clidx, cridx = \ + self.columns.join(other.columns, how=join, level=level, + return_indexers=True) + + left = self._reindex_with_indexers({0: [join_index, ilidx], + 1: [join_columns, clidx]}, + copy=copy, fill_value=fill_value) + right = other._reindex_with_indexers({0: [join_index, iridx], + 1: [join_columns, cridx]}, + copy=copy, fill_value=fill_value) + + if method is not None: + left = left.fillna(axis=fill_axis, method=method, limit=limit) + right = right.fillna(axis=fill_axis, method=method, limit=limit) + + return left, right + + def _align_series(self, other, join='outer', axis=None, level=None, + copy=True, fill_value=None, method=None, limit=None, + fill_axis=0): + from pandas import DataFrame + + fdata = self._data + if axis == 0: + join_index = self.index + lidx, ridx = None, None + if not self.index.equals(other.index): + join_index, lidx, ridx = self.index.join(other.index, how=join, + return_indexers=True) + + if lidx is not None: + fdata = fdata.reindex_indexer(join_index, lidx, axis=1) + elif axis == 1: + join_index = self.columns + lidx, ridx = None, None + if not self.columns.equals(other.index): + join_index, lidx, ridx = \ + self.columns.join(other.index, how=join, + return_indexers=True) + + if lidx is not None: + fdata = fdata.reindex_indexer(join_index, lidx, axis=0) + else: + raise ValueError('Must specify axis=0 or 1') + + if copy and fdata is self._data: + fdata = fdata.copy() + + left_result = DataFrame(fdata) + right_result = other if ridx is None else other.reindex(join_index) + + fill_na = notnull(fill_value) or (method is not None) + if fill_na: + return (left_result.fillna(fill_value, method=method, limit=limit, + axis=fill_axis), + right_result.fillna(fill_value, method=method, + limit=limit)) + else: + return left_result, right_result + + def where(self, cond, other=np.nan, inplace=False, try_cast=False, raise_on_error=True): + """ + Return an object of same shape as self and whose corresponding + entries are from self where cond is True and otherwise are from other. + + Parameters + ---------- + cond : boolean DataFrame or array + other : scalar or DataFrame + inplace : boolean, default False + Whether to perform the operation in place on the data + try_cast : boolean, default False + try to cast the result back to the input type (if possible), + raise_on_error : boolean, default True + Whether to raise on invalid data types (e.g. trying to where on + strings) + + Returns + ------- + wh : DataFrame + """ + if isinstance(cond, NDFrame): + cond = cond.reindex(**self._construct_axes_dict()) + else: + if not hasattr(cond, 'shape'): + raise ValueError('where requires an ndarray like object for its ' + 'condition') + if cond.shape != self.shape: + raise ValueError( + 'Array conditional must be same shape as self') + cond = self._constructor(cond, **self._construct_axes_dict()) + + if inplace: + cond = -(cond.fillna(True).astype(bool)) + else: + cond = cond.fillna(False).astype(bool) + + # try to align + try_quick = True + if hasattr(other, 'align'): + + # align with me + if other.ndim <= self.ndim: + + _, other = self.align(other, join='left', fill_value=np.nan) + + # slice me out of the other + else: + raise NotImplemented + + elif is_list_like(other): + + if self.ndim == 1: + + # try to set the same dtype as ourselves + new_other = np.array(other, dtype=self.dtype) + if not (new_other == np.array(other)).all(): + other = np.array(other) + + # we can't use our existing dtype + # because of incompatibilities + try_quick = False + else: + other = new_other + else: + + other = np.array(other) + + if isinstance(other, np.ndarray): + + if other.shape != self.shape: - # originally used to get around 2to3's changes to iteritems. - # Now unnecessary. - def iterkv(self, *args, **kwargs): - warnings.warn("iterkv is deprecated and will be removed in a future " - "release, use ``iteritems`` instead.", DeprecationWarning) - return self.iteritems(*args, **kwargs) + if self.ndim == 1: - def get_dtype_counts(self): - """ return the counts of dtypes in this frame """ - from pandas import Series - return Series(self._data.get_dtype_counts()) + icond = cond.values - def pop(self, item): - """ - Return item and drop from frame. Raise KeyError if not found. - """ - result = self[item] - del self[item] - return result + # GH 2745 / GH 4192 + # treat like a scalar + if len(other) == 1: + other = np.array(other[0]) - def squeeze(self): - """ squeeze length 1 dimensions """ - try: - return self.ix[tuple([ slice(None) if len(a) > 1 else a[0] for a in self.axes ])] - except: - return self + # GH 3235 + # match True cond to other + elif len(cond[icond]) == len(other): - def _expand_axes(self, key): - new_axes = [] - for k, ax in zip(key, self.axes): - if k not in ax: - if type(k) != ax.dtype.type: - ax = ax.astype('O') - new_axes.append(ax.insert(len(ax), k)) + # try to not change dtype at first (if try_quick) + if try_quick: + + try: + new_other = _values_from_object(self).copy() + new_other[icond] = other + other = new_other + except: + try_quick = False + + # let's create a new (if we failed at the above + # or not try_quick + if not try_quick: + + dtype, fill_value = _maybe_promote(other.dtype) + new_other = np.empty(len(icond), dtype=dtype) + new_other.fill(fill_value) + com._maybe_upcast_putmask(new_other, icond, other) + other = new_other + + else: + raise ValueError( + 'Length of replacements must equal series length') + + else: + raise ValueError('other must be the same shape as self ' + 'when an ndarray') + + # we are the same shape, so create an actual object for alignment else: - new_axes.append(ax) + other = self._constructor(other, **self._construct_axes_dict()) - return new_axes + if inplace: + # we may have different type blocks come out of putmask, so + # reconstruct the block manager + self._data = self._data.putmask(cond, other, inplace=True) - #---------------------------------------------------------------------- - # Consolidation of internals + else: + new_data = self._data.where( + other, cond, raise_on_error=raise_on_error, try_cast=try_cast) - def _consolidate_inplace(self): - f = lambda: self._data.consolidate() - self._data = self._protect_consolidate(f) + return self._constructor(new_data) - def consolidate(self, inplace=False): + def mask(self, cond): """ - Compute NDFrame with "consolidated" internals (data of each dtype - grouped together in a single ndarray). Mainly an internal API function, - but available here to the savvy user + Returns copy of self whose values are replaced with nan if the + inverted condition is True Parameters ---------- - inplace : boolean, default False - If False return new object, otherwise modify existing object + cond: boolean object or array Returns ------- - consolidated : type of caller + wh: same as input """ - if inplace: - self._consolidate_inplace() - else: - f = lambda: self._data.consolidate() - cons_data = self._protect_consolidate(f) - if cons_data is self._data: - cons_data = cons_data.copy() - return self._constructor(cons_data) + return self.where(~cond, np.nan) - @property - def _is_mixed_type(self): - f = lambda: self._data.is_mixed_type - return self._protect_consolidate(f) - - @property - def _is_numeric_mixed_type(self): - f = lambda: self._data.is_numeric_mixed_type - return self._protect_consolidate(f) - - def _protect_consolidate(self, f): - blocks_before = len(self._data.blocks) - result = f() - if len(self._data.blocks) != blocks_before: - self._clear_item_cache() - return result + def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, + **kwds): + """ + Percent change over given number of periods - def _reindex_axis(self, new_index, fill_method, axis, copy): - new_data = self._data.reindex_axis(new_index, axis=axis, - method=fill_method, copy=copy) + Parameters + ---------- + periods : int, default 1 + Periods to shift for forming percent change + fill_method : str, default 'pad' + How to handle NAs before computing percent changes + limit : int, default None + The number of consecutive NAs to fill before stopping + freq : DateOffset, timedelta, or offset alias string, optional + Increment to use from time series API (e.g. 'M' or BDay()) - if new_data is self._data and not copy: - return self + Returns + ------- + chg : Series or DataFrame + """ + if fill_method is None: + data = self else: - return self._constructor(new_data) + data = self.fillna(method=fill_method, limit=limit) + rs = data / data.shift(periods=periods, freq=freq, **kwds) - 1 + if freq is None: + mask = com.isnull(_values_from_object(self)) + np.putmask(rs.values, mask, np.nan) + return rs def cumsum(self, axis=None, skipna=True): """ @@ -837,13 +2310,13 @@ def cumsum(self, axis=None, skipna=True): y : DataFrame """ if axis is None: - axis = self._default_stat_axis + axis = self._stat_axis_number else: axis = self._get_axis_number(axis) - y = self.values.copy() + y = _values_from_object(self).copy() if not issubclass(y.dtype.type, np.integer): - mask = np.isnan(self.values) + mask = np.isnan(_values_from_object(self)) if skipna: np.putmask(y, mask, 0.) @@ -856,9 +2329,6 @@ def cumsum(self, axis=None, skipna=True): result = y.cumsum(axis) return self._wrap_array(result, self.axes, copy=False) - def _wrap_array(self, array, axes, copy=False): - raise NotImplementedError - def cumprod(self, axis=None, skipna=True): """ Return cumulative product over requested axis as DataFrame @@ -876,13 +2346,13 @@ def cumprod(self, axis=None, skipna=True): y : DataFrame """ if axis is None: - axis = self._default_stat_axis + axis = self._stat_axis_number else: axis = self._get_axis_number(axis) - y = self.values.copy() + y = _values_from_object(self).copy() if not issubclass(y.dtype.type, np.integer): - mask = np.isnan(self.values) + mask = np.isnan(_values_from_object(self)) if skipna: np.putmask(y, mask, 1.) @@ -911,13 +2381,13 @@ def cummax(self, axis=None, skipna=True): y : DataFrame """ if axis is None: - axis = self._default_stat_axis + axis = self._stat_axis_number else: axis = self._get_axis_number(axis) - y = self.values.copy() + y = _values_from_object(self).copy() if not issubclass(y.dtype.type, np.integer): - mask = np.isnan(self.values) + mask = np.isnan(_values_from_object(self)) if skipna: np.putmask(y, mask, -np.inf) @@ -947,13 +2417,13 @@ def cummin(self, axis=None, skipna=True): y : DataFrame """ if axis is None: - axis = self._default_stat_axis + axis = self._stat_axis_number else: axis = self._get_axis_number(axis) - y = self.values.copy() + y = _values_from_object(self).copy() if not issubclass(y.dtype.type, np.integer): - mask = np.isnan(self.values) + mask = np.isnan(_values_from_object(self)) if skipna: np.putmask(y, mask, np.inf) @@ -966,136 +2436,76 @@ def cummin(self, axis=None, skipna=True): result = np.minimum.accumulate(y, axis) return self._wrap_array(result, self.axes, copy=False) - def copy(self, deep=True): - """ - Make a copy of this object - - Parameters - ---------- - deep : boolean, default True - Make a deep copy, i.e. also copy data - - Returns - ------- - copy : type of caller - """ - data = self._data - if deep: - data = data.copy() - return self._constructor(data) - - def swaplevel(self, i, j, axis=0): + def tshift(self, periods=1, freq=None, **kwds): """ - Swap levels i and j in a MultiIndex on a particular axis + Shift the time index, using the index's frequency if available Parameters ---------- - i, j : int, string (can be mixed) - Level of index to be swapped. Can pass level name as string. - - Returns - ------- - swapped : type of caller (new object) - """ - axis = self._get_axis_number(axis) - result = self.copy() - labels = result._data.axes[axis] - result._data.set_axis(axis, labels.swaplevel(i, j)) - return result - - def add_prefix(self, prefix): - """ - Concatenate prefix string with panel items names. + periods : int + Number of periods to move, can be positive or negative + freq : DateOffset, timedelta, or time rule string, default None + Increment to use from datetools module or time rule (e.g. 'EOM') - Parameters - ---------- - prefix : string + Notes + ----- + If freq is not specified then tries to use the freq or inferred_freq + attributes of the index. If neither of those attributes exist, a + ValueError is thrown Returns ------- - with_prefix : type of caller + shifted : Series """ - new_data = self._data.add_prefix(prefix) - return self._constructor(new_data) + if freq is None: + freq = getattr(self.index, 'freq', None) - def add_suffix(self, suffix): - """ - Concatenate suffix string with panel items names + if freq is None: + freq = getattr(self.index, 'inferred_freq', None) - Parameters - ---------- - suffix : string + if freq is None: + msg = 'Freq was not given and was not set in the index' + raise ValueError(msg) - Returns - ------- - with_suffix : type of caller - """ - new_data = self._data.add_suffix(suffix) - return self._constructor(new_data) + return self.shift(periods, freq, **kwds) - def rename_axis(self, mapper, axis=0, copy=True): - """ - Alter index and / or columns using input function or functions. - Function / dict values must be unique (1-to-1). Labels not contained in - a dict / Series will be left as-is. + def truncate(self, before=None, after=None, copy=True): + """Function truncate a sorted DataFrame / Series before and/or after + some particular dates. Parameters ---------- - mapper : dict-like or function, optional - axis : int, default 0 - copy : boolean, default True - Also copy underlying data - - See also - -------- - DataFrame.rename + before : date + Truncate before date + after : date + Truncate after date Returns ------- - renamed : type of caller + truncated : type of caller """ - # should move this at some point - from pandas.core.series import _get_rename_function - - mapper_f = _get_rename_function(mapper) - - axis = self._get_axis_number(axis) - if axis == 0: - new_data = self._data.rename_items(mapper_f, copydata=copy) - else: - new_data = self._data.rename_axis(mapper_f, axis=axis) - if copy: - new_data = new_data.copy() - return self._constructor(new_data) + # if we have a date index, convert to dates, otherwise + # treat like a slice + if self.index.is_all_dates: + from pandas.tseries.tools import to_datetime + before = to_datetime(before) + after = to_datetime(after) - def take(self, indices, axis=0, convert=True): - """ - Analogous to ndarray.take + if before is not None and after is not None: + if before > after: + raise AssertionError('Truncate: %s must be after %s' % + (before, after)) - Parameters - ---------- - indices : list / array of ints - axis : int, default 0 - convert : translate neg to pos indices (default) + result = self.ix[before:after] - Returns - ------- - taken : type of caller - """ + if isinstance(self.index, MultiIndex): + result.index = self.index.truncate(before, after) - # check/convert indicies here - if convert: - axis = self._get_axis_number(axis) - indices = _maybe_convert_indices(indices, len(self._get_axis(axis))) + if copy: + result = result.copy() - if axis == 0: - labels = self._get_axis(axis) - new_items = labels.take(indices) - new_data = self._data.reindex_axis(new_items, axis=0) - else: - new_data = self._data.take(indices, axis=axis, verify=False) - return self._constructor(new_data) + return result def tz_convert(self, tz, axis=0, copy=True): """ @@ -1170,44 +2580,6 @@ def tz_localize(self, tz, axis=0, copy=True): return new_obj -# Good for either Series or DataFrame - - -def truncate(self, before=None, after=None, copy=True): - """Function truncate a sorted DataFrame / Series before and/or after - some particular dates. - - Parameters - ---------- - before : date - Truncate before date - after : date - Truncate after date - copy : boolean, default True - - Returns - ------- - truncated : type of caller - """ - - # if we have a date index, convert to dates, otherwise - # treat like a slice - if self.index.is_all_dates: - from pandas.tseries.tools import to_datetime - before = to_datetime(before) - after = to_datetime(after) - - if before is not None and after is not None: - if before > after: - raise AssertionError('Truncate: %s must be after %s' % - (before, after)) - - result = self.ix[before:after] - - if isinstance(self.index, MultiIndex): - result.index = self.index.truncate(before, after) - - if copy: - result = result.copy() - - return result +# install the indexerse +for _name, _indexer in indexing.get_indexers_list(): + NDFrame._create_indexer(_name, _indexer) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index e12795682460c..d85ef1abd0fbc 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -230,7 +230,7 @@ def name(self): @property def _selection_list(self): - if not isinstance(self._selection, (list, tuple, np.ndarray)): + if not isinstance(self._selection, (list, tuple, Series, np.ndarray)): return [self._selection] return self._selection @@ -279,7 +279,7 @@ def get_group(self, name, obj=None): obj = self.obj inds = self.indices[name] - return obj.take(inds, axis=self.axis) + return obj.take(inds, axis=self.axis, convert=False) def __iter__(self): """ @@ -377,7 +377,11 @@ def median(self): except GroupByError: raise except Exception: # pragma: no cover - f = lambda x: x.median(axis=self.axis) + + def f(x): + if isinstance(x, np.ndarray): + x = Series(x) + return x.median(axis=self.axis) return self._python_agg_general(f) def std(self, ddof=1): @@ -894,9 +898,9 @@ def _aggregate_series_fast(self, obj, func): group_index, _, ngroups = self.group_info # avoids object / Series creation overhead - dummy = obj[:0].copy() + dummy = obj._get_values(slice(None,0)).to_dense() indexer = _algos.groupsort_indexer(group_index, ngroups)[0] - obj = obj.take(indexer) + obj = obj.take(indexer, convert=False) group_index = com.take_nd(group_index, indexer, allow_fill=False) grouper = lib.SeriesGrouper(obj, func, group_index, ngroups, dummy) @@ -904,19 +908,18 @@ def _aggregate_series_fast(self, obj, func): return result, counts def _aggregate_series_pure_python(self, obj, func): + group_index, _, ngroups = self.group_info counts = np.zeros(ngroups, dtype=int) result = None - group_index, _, ngroups = self.group_info - splitter = get_splitter(obj, group_index, ngroups, axis=self.axis) for label, group in splitter: res = func(group) if result is None: - if isinstance(res, np.ndarray) or isinstance(res, list): + if isinstance(res, (Series, np.ndarray)) or isinstance(res, list): raise ValueError('Function does not reduce') result = np.empty(ngroups, dtype='O') @@ -1035,6 +1038,7 @@ def apply(self, f, data, axis=0, keep_internal=False): # group might be modified group_axes = _get_axes(group) res = f(group) + if not _is_indexed_like(res, group_axes): mutated = True @@ -1198,7 +1202,7 @@ def __init__(self, index, grouper=None, name=None, level=None, self.name = factor.name # no level passed - if not isinstance(self.grouper, np.ndarray): + if not isinstance(self.grouper, (Series, np.ndarray)): self.grouper = self.index.map(self.grouper) if not (hasattr(self.grouper,"__len__") and \ len(self.grouper) == len(self.index)): @@ -1283,7 +1287,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True): # what are we after, exactly? match_axis_length = len(keys) == len(group_axis) any_callable = any(callable(g) or isinstance(g, dict) for g in keys) - any_arraylike = any(isinstance(g, (list, tuple, np.ndarray)) + any_arraylike = any(isinstance(g, (list, tuple, Series, np.ndarray)) for g in keys) try: @@ -1348,7 +1352,7 @@ def _convert_grouper(axis, grouper): return grouper.values else: return grouper.reindex(axis).values - elif isinstance(grouper, (list, np.ndarray)): + elif isinstance(grouper, (list, Series, np.ndarray)): if len(grouper) != len(axis): raise AssertionError('Grouper and axis must be same length') return grouper @@ -1508,7 +1512,7 @@ def _aggregate_named(self, func, *args, **kwargs): for name, group in self: group.name = name output = func(group, *args, **kwargs) - if isinstance(output, np.ndarray): + if isinstance(output, (Series, np.ndarray)): raise Exception('Must produce aggregated value') result[name] = self._try_cast(output, group) @@ -1796,7 +1800,7 @@ def _aggregate_generic(self, func, *args, **kwargs): obj = self._obj_with_exclusions result = {} - if axis != obj._het_axis: + if axis != obj._info_axis_number: try: for name, data in self: # for name in self.indices: @@ -1826,9 +1830,10 @@ def _aggregate_item_by_item(self, func, *args, **kwargs): cannot_agg = [] for item in obj: try: - colg = SeriesGroupBy(obj[item], selection=item, + data = obj[item] + colg = SeriesGroupBy(data, selection=item, grouper=self.grouper) - result[item] = colg.aggregate(func, *args, **kwargs) + result[item] = self._try_cast(colg.aggregate(func, *args, **kwargs), data) except ValueError: cannot_agg.append(item) continue @@ -1884,7 +1889,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): else: key_index = Index(keys, name=key_names[0]) - if isinstance(values[0], np.ndarray): + if isinstance(values[0], (np.ndarray, Series)): if isinstance(values[0], Series): applied_index = self.obj._get_axis(self.axis) all_indexed_same = _all_indexes_same([x.index for x in values]) @@ -2115,7 +2120,7 @@ def __getitem__(self, key): if self._selection is not None: raise Exception('Column(s) %s already selected' % self._selection) - if isinstance(key, (list, tuple, np.ndarray)) or not self.as_index: + if isinstance(key, (list, tuple, Series, np.ndarray)) or not self.as_index: return DataFrameGroupBy(self.obj, self.grouper, selection=key, grouper=self.grouper, exclusions=self.exclusions, @@ -2345,7 +2350,7 @@ def __iter__(self): yield i, self._chop(sdata, slice(start, end)) def _get_sorted_data(self): - return self.data.take(self.sort_idx, axis=self.axis) + return self.data.take(self.sort_idx, axis=self.axis, convert=False) def _chop(self, sdata, slice_obj): return sdata[slice_obj] @@ -2361,7 +2366,7 @@ class ArraySplitter(DataSplitter): class SeriesSplitter(DataSplitter): def _chop(self, sdata, slice_obj): - return sdata._get_values(slice_obj) + return sdata._get_values(slice_obj).to_dense() class FrameSplitter(DataSplitter): diff --git a/pandas/core/index.py b/pandas/core/index.py index 7be19302d88d5..73aff7bcab953 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -14,6 +14,7 @@ from pandas.util.decorators import cache_readonly, deprecate from pandas.core.common import isnull import pandas.core.common as com +from pandas.core.common import _values_from_object from pandas.core.config import get_option import warnings @@ -85,7 +86,15 @@ class Index(FrozenNDArray): _engine_type = _index.ObjectEngine - def __new__(cls, data, dtype=None, copy=False, name=None, **kwargs): + def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False, + **kwargs): + + # no class inference! + if fastpath: + subarr = data.view(cls) + subarr.name = name + return subarr + from pandas.tseries.period import PeriodIndex if isinstance(data, np.ndarray): if issubclass(data.dtype.type, np.datetime64): @@ -129,10 +138,9 @@ def __new__(cls, data, dtype=None, copy=False, name=None, **kwargs): return Int64Index(subarr.astype('i8'), copy=copy, name=name) elif inferred != 'string': if (inferred.startswith('datetime') or - tslib.is_timestamp_array(subarr)): + tslib.is_timestamp_array(subarr)): from pandas.tseries.index import DatetimeIndex - return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) - + return DatetimeIndex(data, copy=copy, name=name, **kwargs) elif inferred == 'period': return PeriodIndex(subarr, name=name, **kwargs) @@ -306,6 +314,9 @@ def _mpl_repr(self): def values(self): return np.asarray(self) + def get_values(self): + return self.values + @property def is_monotonic(self): return self._engine.is_monotonic @@ -407,6 +418,15 @@ def __getitem__(self, key): return Index(result, name=self.name) + def _getitem_slice(self, key): + """ getitem for a bool/sliceable, fallback to standard getitem """ + try: + arr_idx = self.view(np.ndarray) + result = arr_idx[key] + return self.__class__(result, name=self.name, fastpath=True) + except: + return self.__getitem__(key) + def append(self, other): """ Append a collection of Index options together @@ -776,21 +796,23 @@ def get_loc(self, key): ------- loc : int if unique index, possibly slice or mask if not """ - return self._engine.get_loc(key) + return self._engine.get_loc(_values_from_object(key)) def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ + s = _values_from_object(series) + k = _values_from_object(key) try: - return self._engine.get_value(series, key) + return self._engine.get_value(s, k) except KeyError as e1: if len(self) > 0 and self.inferred_type == 'integer': raise try: - return tslib.get_value_box(series, key) + return tslib.get_value_box(s, key) except IndexError: raise except TypeError: @@ -812,7 +834,7 @@ def set_value(self, arr, key, value): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - self._engine.set_value(arr, key, value) + self._engine.set_value(_values_from_object(arr), _values_from_object(key), value) def get_level_values(self, level): """ @@ -1402,12 +1424,45 @@ class Int64Index(Index): _engine_type = _index.Int64Engine - def __new__(cls, data, dtype=None, copy=False, name=None): + def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False): + + if fastpath: + subarr = data.view(cls) + subarr.name = name + return subarr + if not isinstance(data, np.ndarray): if np.isscalar(data): raise ValueError('Index(...) must be called with a collection ' 'of some kind, %s was passed' % repr(data)) + if not isinstance(data, np.ndarray): + if np.isscalar(data): + raise ValueError('Index(...) must be called with a collection ' + 'of some kind, %s was passed' % repr(data)) + + # other iterable of some kind + if not isinstance(data, (list, tuple)): + data = list(data) + data = np.asarray(data) + + if issubclass(data.dtype.type, compat.string_types): + raise TypeError('String dtype not supported, you may need ' + 'to explicitly cast to int') + elif issubclass(data.dtype.type, np.integer): + # don't force the upcast as we may be dealing + # with a platform int + if dtype is None or not issubclass(np.dtype(dtype).type, np.integer): + dtype = np.int64 + + subarr = np.array(data, dtype=dtype, copy=copy) + else: + subarr = np.array(data, dtype=np.int64, copy=copy) + if len(data) > 0: + if (subarr != data).any(): + raise TypeError('Unsafe NumPy casting, you must ' + 'explicitly cast') + # other iterable of some kind if not isinstance(data, (list, tuple)): data = list(data) @@ -1805,8 +1860,10 @@ def get_value(self, series, key): from pandas.core.series import Series # Label-based + s = _values_from_object(series) + k = _values_from_object(key) try: - return self._engine.get_value(series, key) + return self._engine.get_value(s, k) except KeyError as e1: try: # TODO: what if a level contains tuples?? @@ -1819,7 +1876,7 @@ def get_value(self, series, key): pass try: - return _index.get_value_at(series, key) + return _index.get_value_at(s, k) except IndexError: raise except TypeError: @@ -2067,6 +2124,8 @@ def __getitem__(self, key): return result + _getitem_slice = __getitem__ + def take(self, indexer, axis=None): """ Analogous to ndarray.take @@ -2480,7 +2539,7 @@ def get_loc(self, key): if isinstance(key, tuple): if len(key) == self.nlevels: if self.is_unique: - return self._engine.get_loc(key) + return self._engine.get_loc(_values_from_object(key)) else: return slice(*self.slice_locs(key, key)) else: @@ -2546,7 +2605,7 @@ def _drop_levels(indexer, levels): if not any(isinstance(k, slice) for k in key): if len(key) == self.nlevels: if self.is_unique: - return self._engine.get_loc(key), None + return self._engine.get_loc(_values_from_object(key)), None else: indexer = slice(*self.slice_locs(key, key)) return indexer, self[indexer] diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index a4d2cffc3dd23..11818a4fea7c8 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -6,6 +6,8 @@ from pandas.compat import range, zip import pandas.compat as compat import pandas.core.common as com +from pandas.core.common import (_is_bool_indexer, + ABCSeries, ABCDataFrame, ABCPanel) import pandas.lib as lib import numpy as np @@ -30,6 +32,7 @@ class IndexingError(Exception): class _NDFrameIndexer(object): + _exception = KeyError def __init__(self, obj, name): self.obj = obj @@ -100,27 +103,25 @@ def _convert_tuple(self, key): return tuple(keyidx) def _setitem_with_indexer(self, indexer, value): - from pandas import Panel, DataFrame, Series # also has the side effect of consolidating in-place - # mmm, spaghetti if self.obj._is_mixed_type: if not isinstance(indexer, tuple): indexer = self._tuplify(indexer) - if isinstance(value, Series): + if isinstance(value, ABCSeries): value = self._align_series(indexer, value) - het_axis = self.obj._het_axis - het_idx = indexer[het_axis] + info_axis = self.obj._info_axis_number + info_idx = indexer[info_axis] - if com.is_integer(het_idx): - het_idx = [het_idx] + if com.is_integer(info_idx): + info_idx = [info_idx] - plane_indexer = indexer[:het_axis] + indexer[het_axis + 1:] - item_labels = self.obj._get_axis(het_axis) + plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:] + item_labels = self.obj._get_axis(info_axis) def setter(item, v): data = self.obj[item] @@ -129,12 +130,12 @@ def setter(item, v): result, changed = com._maybe_upcast_indexer(values,plane_indexer,v,dtype=getattr(data,'dtype',None)) self.obj[item] = result - labels = item_labels[het_idx] + labels = item_labels[info_idx] if _is_list_like(value): # we have an equal len Frame - if isinstance(value, DataFrame) and value.ndim > 1: + if isinstance(value, ABCDataFrame) and value.ndim > 1: for item in labels: @@ -175,19 +176,16 @@ def setter(item, v): if isinstance(indexer, tuple): indexer = _maybe_convert_ix(*indexer) - if isinstance(value, Series): + if isinstance(value, ABCSeries): value = self._align_series(indexer, value) - if isinstance(value, DataFrame): + elif isinstance(value, ABCDataFrame): value = self._align_frame(indexer, value) - if isinstance(value, Panel): + if isinstance(value, ABCPanel): value = self._align_panel(indexer, value) - # 2096 - values = self.obj.values - if np.prod(values.shape): - values[indexer] = value + self.obj._data = self.obj._data.setitem(indexer,value) def _align_series(self, indexer, ser): # indexer to assign Series can be tuple or scalar @@ -321,22 +319,14 @@ def _multi_take_opportunity(self, tup): return True def _multi_take(self, tup): - from pandas.core.frame import DataFrame - from pandas.core.panel import Panel - from pandas.core.panel4d import Panel4D - - if isinstance(self.obj, DataFrame): - index = self._convert_for_reindex(tup[0], axis=0) - columns = self._convert_for_reindex(tup[1], axis=1) - return self.obj.reindex(index=index, columns=columns) - elif isinstance(self.obj, Panel4D): - conv = [self._convert_for_reindex(x, axis=i) - for i, x in enumerate(tup)] - return self.obj.reindex(labels=tup[0], items=tup[1], major=tup[2], minor=tup[3]) - elif isinstance(self.obj, Panel): - conv = [self._convert_for_reindex(x, axis=i) - for i, x in enumerate(tup)] - return self.obj.reindex(items=tup[0], major=tup[1], minor=tup[2]) + """ create the reindex map for our objects, raise the _exception if we can't create the indexer """ + + try: + o = self.obj + d = dict([ (a,self._convert_for_reindex(t, axis=o._get_axis_number(a))) for t, a in zip(tup, o._AXIS_ORDERS) ]) + return o.reindex(**d) + except: + raise self._exception def _convert_for_reindex(self, key, axis=0): labels = self.obj._get_axis(axis) @@ -359,7 +349,6 @@ def _convert_for_reindex(self, key, axis=0): return keyarr def _getitem_lowerdim(self, tup): - from pandas.core.frame import DataFrame ax0 = self.obj._get_axis(0) # a bit kludgy @@ -404,7 +393,7 @@ def _getitem_lowerdim(self, tup): # unfortunately need an odious kludge here because of # DataFrame transposing convention - if (isinstance(section, DataFrame) and i > 0 + if (isinstance(section, ABCDataFrame) and i > 0 and len(new_key) == 2): a, b = new_key new_key = b, a @@ -515,11 +504,7 @@ def _reindex(keys, level=None): if axis+1 > ndim: raise AssertionError("invalid indexing error with non-unique index") - args = [None] * (2*ndim) - args[2*axis] = new_labels - args[2*axis+1] = new_indexer - - result = result._reindex_with_indexers(*args, copy=False, fill_value=np.nan) + result = result._reindex_with_indexers({ axis : [ new_labels, new_indexer ] }, copy=True) return result @@ -1039,20 +1024,20 @@ def _check_bool_indexer(ax, key): # this function assumes that com._is_bool_indexer(key) == True result = key - if _is_series(key) and not key.index.equals(ax): + if isinstance(key, ABCSeries) and not key.index.equals(ax): result = result.reindex(ax) - mask = com.isnull(result) + mask = com.isnull(result.values) if mask.any(): raise IndexingError('Unalignable boolean Series key provided') - # com._is_bool_indexer has already checked for nulls in the case of an - # object array key, so no check needed here - result = np.asarray(result, dtype=bool) - return result + result = result.astype(bool).values -def _is_series(obj): - from pandas.core.series import Series - return isinstance(obj, Series) + else: + # com._is_bool_indexer has already checked for nulls in the case of an + # object array key, so no check needed here + result = np.asarray(result, dtype=bool) + + return result def _maybe_convert_indices(indices, n): @@ -1073,9 +1058,10 @@ def _maybe_convert_ix(*args): """ We likely want to take the cross-product """ + ixify = True for arg in args: - if not isinstance(arg, (np.ndarray, list)): + if not isinstance(arg, (np.ndarray, list, ABCSeries)): ixify = False if ixify: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 56a6c8081d556..f1578303e6db0 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1,27 +1,33 @@ import itertools import re from datetime import datetime +import copy +from collections import defaultdict -from numpy import nan import numpy as np from pandas.core.base import PandasObject from pandas.core.common import (_possibly_downcast_to_dtype, isnull, _NS_DTYPE, - _TD_DTYPE) + _TD_DTYPE, ABCSeries, ABCSparseSeries, + is_list_like) from pandas.core.index import (Index, MultiIndex, _ensure_index, _handle_legacy_indexes) from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices import pandas.core.common as com +from pandas.sparse.array import _maybe_to_sparse, SparseArray import pandas.lib as lib import pandas.tslib as tslib import pandas.core.expressions as expressions +from pandas.util.decorators import cache_readonly from pandas.tslib import Timestamp from pandas import compat from pandas.compat import range, lrange, lmap, callable, map, zip +from pandas.util import rwproperty class Block(PandasObject): + """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure @@ -32,10 +38,17 @@ class Block(PandasObject): is_numeric = False is_bool = False is_object = False + is_sparse = False _can_hold_na = False _downcast_dtype = None + _can_consolidate = True + _verify_integrity = True + _ftype = 'dense' - def __init__(self, values, items, ref_items, ndim=2, fastpath=False, placement=None): + def __init__(self, values, items, ref_items, ndim=None, fastpath=False, placement=None): + + if ndim is None: + ndim = values.ndim if values.ndim != ndim: raise ValueError('Wrong number of dimensions') @@ -58,28 +71,47 @@ def __init__(self, values, items, ref_items, ndim=2, fastpath=False, placement=N def _gi(self, arg): return self.values[arg] + @property + def _consolidate_key(self): + return (self._can_consolidate, self.dtype.name) + + @property + def _is_single_block(self): + return self.ndim == 1 + + @property + def fill_value(self): + return np.nan + @property def ref_locs(self): if self._ref_locs is None: - indexer = self.ref_items.get_indexer(self.items) - indexer = com._ensure_platform_int(indexer) - if (indexer == -1).any(): - raise AssertionError('Some block items were not in block ' - 'ref_items') + # we have a single block, maybe have duplicates + # but indexer is easy + # also if we are not really reindexing, just numbering + if self._is_single_block or self.ref_items.equals(self.items): + indexer = np.arange(len(self.items)) + else: + + indexer = self.ref_items.get_indexer(self.items) + indexer = com._ensure_platform_int(indexer) + if (indexer == -1).any(): + raise AssertionError('Some block items were not in block ' + 'ref_items') self._ref_locs = indexer return self._ref_locs def reset_ref_locs(self): """ reset the block ref_locs """ - self._ref_locs = np.empty(len(self.items),dtype='int64') + self._ref_locs = np.empty(len(self.items), dtype='int64') def set_ref_locs(self, placement): """ explicity set the ref_locs indexer, only necessary for duplicate indicies """ if placement is None: self._ref_locs = None else: - self._ref_locs = np.array(placement,dtype='int64', copy=True) + self._ref_locs = np.array(placement, dtype='int64', copy=True) def set_ref_items(self, ref_items, maybe_rename=True): """ @@ -94,10 +126,20 @@ def set_ref_items(self, ref_items, maybe_rename=True): self.ref_items = ref_items def __unicode__(self): - shape = ' x '.join([com.pprint_thing(s) for s in self.shape]) - name = type(self).__name__ - result = '%s: %s, %s, dtype %s' % ( - name, com.pprint_thing(self.items), shape, self.dtype) + + # don't want to print out all of the items here + name = com.pprint_thing(self.__class__.__name__) + if self._is_single_block: + + result = '%s: %s dtype: %s' % ( + name, len(self), self.dtype) + + else: + + shape = ' x '.join([com.pprint_thing(s) for s in self.shape]) + result = '%s: %s, %s, dtype: %s' % ( + name, com.pprint_thing(self.items), shape, self.dtype) + return result def __contains__(self, item): @@ -118,6 +160,10 @@ def __setstate__(self, state): self.values = values self.ndim = values.ndim + def _slice(self, slicer): + """ return a slice of my values """ + return self.values[slicer] + @property def shape(self): return self.values.shape @@ -130,11 +176,9 @@ def itemsize(self): def dtype(self): return self.values.dtype - def copy(self, deep=True): - values = self.values - if deep: - values = values.copy() - return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True, placement=self._ref_locs) + @property + def ftype(self): + return "%s:%s" % (self.dtype, self._ftype) def merge(self, other): if not self.ref_items.equals(other.ref_items): @@ -145,16 +189,19 @@ def merge(self, other): # union_ref = self.ref_items + other.ref_items return _merge_blocks([self, other], self.ref_items) - def reindex_axis(self, indexer, axis=1, fill_value=np.nan, mask_info=None): + def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, limit=None, mask_info=None): """ Reindex using pre-computed indexer information """ if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) + if fill_value is None: + fill_value = self.fill_value new_values = com.take_nd(self.values, indexer, axis, fill_value=fill_value, mask_info=mask_info) - return make_block(new_values, self.items, self.ref_items, fastpath=True, - placement=self._ref_locs) + return make_block( + new_values, self.items, self.ref_items, ndim=self.ndim, fastpath=True, + placement=self._ref_locs) def reindex_items_from(self, new_ref_items, copy=True): """ @@ -177,7 +224,7 @@ def reindex_items_from(self, new_ref_items, copy=True): new_values = com.take_nd(self.values, masked_idx, axis=0, allow_fill=False) new_items = self.items.take(masked_idx) - return make_block(new_values, new_items, new_ref_items, fastpath=True) + return make_block(new_values, new_items, new_ref_items, ndim=self.ndim, fastpath=True) def get(self, item): loc = self.items.get_loc(item) @@ -206,25 +253,56 @@ def delete(self, item): loc = self.items.get_loc(item) new_items = self.items.delete(loc) new_values = np.delete(self.values, loc, 0) - return make_block(new_values, new_items, self.ref_items, klass=self.__class__, fastpath=True) + return make_block(new_values, new_items, self.ref_items, ndim=self.ndim, klass=self.__class__, fastpath=True) + + def split_block_at(self, item): + """ + Split block into zero or more blocks around columns with given label, + for "deleting" a column without having to copy data by returning views + on the original array. + + Returns + ------- + generator of Block + """ + loc = self.items.get_loc(item) + + if type(loc) == slice or type(loc) == int: + mask = [True] * len(self) + mask[loc] = False + else: # already a mask, inverted + mask = -loc + + for s, e in com.split_ranges(mask): + yield make_block(self.values[s:e], + self.items[s:e].copy(), + self.ref_items, + ndim=self.ndim, + klass=self.__class__, + fastpath=True) def fillna(self, value, inplace=False, downcast=None): if not self._can_hold_na: if inplace: - return self + return [self] else: - return self.copy() + return [self.copy()] - new_values = self.values if inplace else self.values.copy() - mask = com.isnull(new_values) - np.putmask(new_values, mask, value) + mask = com.isnull(self.values) + value = self._try_fill(value) + blocks = self.putmask(mask, value, inplace=inplace) + + # possibily downcast the blocks + if not downcast: + return blocks + + result_blocks = [] + for b in blocks: + result_blocks.extend(b.downcast()) - block = make_block(new_values, self.items, self.ref_items, fastpath=True) - if downcast: - block = block.downcast() - return block + return result_blocks - def downcast(self, dtypes = None): + def downcast(self, dtypes=None): """ try to downcast each item to the dict of dtypes if present """ if dtypes is None: @@ -234,47 +312,85 @@ def downcast(self, dtypes = None): blocks = [] for i, item in enumerate(self.items): - dtype = dtypes.get(item,self._downcast_dtype) + dtype = dtypes.get(item, self._downcast_dtype) if dtype is None: nv = _block_shape(values[i]) - blocks.append(make_block(nv, [ item ], self.ref_items)) + blocks.append(make_block(nv, [item], self.ref_items)) continue nv = _possibly_downcast_to_dtype(values[i], np.dtype(dtype)) nv = _block_shape(nv) - blocks.append(make_block(nv, [ item ], self.ref_items)) + blocks.append(make_block(nv, [item], self.ref_items)) return blocks - def astype(self, dtype, copy = True, raise_on_error = True, values = None): + def astype(self, dtype, copy=False, raise_on_error=True, values=None): + return self._astype(dtype, copy=copy, raise_on_error=raise_on_error, + values=values) + + def _astype(self, dtype, copy=False, raise_on_error=True, values=None, + klass=None): """ Coerce to the new type (if copy=True, return a new copy) raise on an except if raise == True """ + dtype = np.dtype(dtype) + if self.dtype == dtype: + if copy: + return self.copy() + return self + try: + # force the copy here if values is None: - values = com._astype_nansafe(self.values, dtype, copy = copy) - newb = make_block(values, self.items, self.ref_items, fastpath=True) + values = com._astype_nansafe(self.values, dtype, copy=True) + newb = make_block( + values, self.items, self.ref_items, ndim=self.ndim, + fastpath=True, dtype=dtype, klass=klass) except: if raise_on_error is True: raise newb = self.copy() if copy else self if newb.is_numeric and self.is_numeric: - if (newb.shape != self.shape or - (not copy and newb.itemsize < self.itemsize)): + if newb.shape != self.shape: raise TypeError("cannot set astype for copy = [%s] for dtype " "(%s [%s]) with smaller itemsize that current " "(%s [%s])" % (copy, self.dtype.name, - self.itemsize, newb.dtype.name, newb.itemsize)) - return newb + self.itemsize, newb.dtype.name, newb.itemsize)) + return [ newb ] - def convert(self, copy = True, **kwargs): + def convert(self, copy=True, **kwargs): """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock here! """ - return self.copy() if copy else self + return [ self.copy() ] if copy else [ self ] + + def prepare_for_merge(self, **kwargs): + """ a regular block is ok to merge as is """ + return self + + def post_merge(self, items, **kwargs): + """ we are non-sparse block, try to convert to a sparse block(s) """ + overlap = set(items.keys()) & set(self.items) + if len(overlap): + overlap = _ensure_index(overlap) + + new_blocks = [] + for item in overlap: + dtypes = set(items[item]) + + # this is a safe bet with multiple dtypes + dtype = list(dtypes)[0] if len(dtypes) == 1 else np.float64 + + b = make_block( + SparseArray(self.get(item), dtype=dtype), [item], self.ref_items) + new_blocks.append(b) + + return new_blocks + + return self def _can_hold_element(self, value): raise NotImplementedError() @@ -295,17 +411,31 @@ def _try_coerce_result(self, result): """ reverse of try_coerce_args """ return result + def _try_fill(self, value): + return value + def to_native_types(self, slicer=None, na_rep='', **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: - values = values[:,slicer] - values = np.array(values,dtype=object) + values = values[:, slicer] + values = np.array(values, dtype=object) mask = isnull(values) values[mask] = na_rep return values.tolist() + #### block actions #### + def copy(self, deep=True, ref_items=None): + values = self.values + if deep: + values = values.copy() + if ref_items is None: + ref_items = self.ref_items + return make_block( + values, self.items, ref_items, ndim=self.ndim, klass=self.__class__, + fastpath=True, placement=self._ref_locs) + def replace(self, to_replace, value, inplace=False, filter=None, regex=False): """ replace the to_replace value with value, possible to create new @@ -320,10 +450,36 @@ def replace(self, to_replace, value, inplace=False, filter=None, if not mask.any(): if inplace: - return [ self ] - return [ self.copy() ] + return [self] + return [self.copy()] return self.putmask(mask, value, inplace=inplace) + def setitem(self, indexer, value): + """ set the value inplace; return a new block (of a possibly different dtype) + indexer is a direct slice/positional indexer; value must be a compaitable shape """ + + values = self.values + if self.ndim == 2: + values = values.T + + # 2-d (DataFrame) are represented as a transposed array + if self._can_hold_element(value): + try: + values[indexer] = value + return [ self ] + except (IndexError): + return [ self ] + except: + pass + + # create an indexing mask, the putmask which potentially changes the dtype + indices = np.arange(np.prod(values.shape)).reshape(values.shape) + mask = indices[indexer] == indices + if self.ndim == 2: + mask = mask.T + + return self.putmask(mask, value, inplace=True) + def putmask(self, mask, new, inplace=False): """ putmask the data to the block; it is possible that we may create a new dtype of block return the resulting block(s) """ @@ -332,13 +488,14 @@ def putmask(self, mask, new, inplace=False): # may need to align the new if hasattr(new, 'reindex_axis'): - axis = getattr(new, '_het_axis', 0) + axis = getattr(new, '_info_axis_number', 0) new = new.reindex_axis(self.items, axis=axis, copy=False).values.T # may need to align the mask if hasattr(mask, 'reindex_axis'): - axis = getattr(mask, '_het_axis', 0) - mask = mask.reindex_axis(self.items, axis=axis, copy=False).values.T + axis = getattr(mask, '_info_axis_number', 0) + mask = mask.reindex_axis( + self.items, axis=axis, copy=False).values.T if self._can_hold_element(new): new = self._try_cast(new) @@ -349,36 +506,76 @@ def putmask(self, mask, new, inplace=False): # need to go column by column new_blocks = [] - for i, item in enumerate(self.items): - - m = mask[i] - # need a new block - if m.any(): + def create_block(v, m, n, item, reshape=True): + """ return a new block, try to preserve dtype if possible """ - n = new[i] if isinstance(new, np.ndarray) else new + # n should the length of the mask or a scalar here + if np.isscalar(n): + n = np.array([n] * len(m)) - # type of the new block - dtype, _ = com._maybe_promote(np.array(n).dtype) - - # we need to exiplicty astype here to make a copy - nv = new_values[i].astype(dtype) + # see if we are only masking values that if putted + # will work in the current dtype + nv = None + try: + nn = n[m] + nn_at = nn.astype(self.dtype) + if (nn == nn_at).all(): + nv = v.copy() + nv[mask] = nn_at + except: + pass - # we create a new block type + # change the dtype + if nv is None: + dtype, _ = com._maybe_promote(n.dtype) + nv = v.astype(dtype) np.putmask(nv, m, n) + if reshape: + nv = _block_shape(nv) + return make_block(nv, [item], self.ref_items) else: - nv = new_values[i] if inplace else new_values[i].copy() + return make_block(nv, item, self.ref_items) + + if self.ndim > 1: + for i, item in enumerate(self.items): + m = mask[i] + v = new_values[i] + + # need a new block + if m.any(): + + n = new[i] if isinstance( + new, np.ndarray) else np.array(new) + + # type of the new block + dtype, _ = com._maybe_promote(n.dtype) + + # we need to exiplicty astype here to make a copy + n = n.astype(dtype) + + block = create_block(v, m, n, item) - nv = _block_shape(nv) - new_blocks.append(make_block(nv, Index([ item ]), self.ref_items, fastpath=True)) + else: + nv = v if inplace else v.copy() + nv = _block_shape(nv) + block = make_block( + nv, Index([item]), self.ref_items, fastpath=True) + + new_blocks.append(block) + + else: + + new_blocks.append( + create_block(new_values, mask, new, self.items, reshape=False)) return new_blocks if inplace: - return [ self ] + return [self] - return [ make_block(new_values, self.items, self.ref_items, fastpath=True) ] + return [make_block(new_values, self.items, self.ref_items, fastpath=True)] def interpolate(self, method='pad', axis=0, inplace=False, limit=None, missing=None, coerce=False): @@ -388,43 +585,31 @@ def interpolate(self, method='pad', axis=0, inplace=False, if coerce: if not self._can_hold_na: if inplace: - return self + return [self] else: - return self.copy() + return [self.copy()] values = self.values if inplace else self.values.copy() - - if values.ndim != 2: - raise NotImplementedError - - transf = (lambda x: x) if axis == 0 else (lambda x: x.T) - - if missing is None: - mask = None - else: # todo create faster fill func without masking - mask = com.mask_missing(transf(values), missing) - - if method == 'pad': - com.pad_2d(transf(values), limit=limit, mask=mask) - else: - com.backfill_2d(transf(values), limit=limit, mask=mask) - - return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True) + values = com.interpolate_2d(values, method, axis, limit, missing) + return [make_block(values, self.items, self.ref_items, ndim=self.ndim, klass=self.__class__, fastpath=True)] def take(self, indexer, ref_items, axis=1): if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) new_values = com.take_nd(self.values, indexer, axis=axis, allow_fill=False) - return make_block(new_values, self.items, ref_items, klass=self.__class__, fastpath=True) + return [make_block(new_values, self.items, ref_items, ndim=self.ndim, klass=self.__class__, fastpath=True)] - def get_values(self, dtype): + def get_values(self, dtype=None): return self.values + def get_merge_length(self): + return len(self.values) + def diff(self, n): """ return block for the diff of the values """ new_values = com.diff(self.values, n, axis=1) - return make_block(new_values, self.items, self.ref_items, fastpath=True) + return [make_block(new_values, self.items, self.ref_items, ndim=self.ndim, fastpath=True)] def shift(self, indexer, periods): """ shift the block by periods, possibly upcast """ @@ -437,9 +622,9 @@ def shift(self, indexer, periods): new_values[:, :periods] = fill_value else: new_values[:, periods:] = fill_value - return make_block(new_values, self.items, self.ref_items, fastpath=True) + return [make_block(new_values, self.items, self.ref_items, ndim=self.ndim, fastpath=True)] - def eval(self, func, other, raise_on_error = True, try_cast = False): + def eval(self, func, other, raise_on_error=True, try_cast=False): """ evaluate the block; return result block from the result @@ -458,8 +643,9 @@ def eval(self, func, other, raise_on_error = True, try_cast = False): # see if we can align other if hasattr(other, 'reindex_axis'): - axis = getattr(other, '_het_axis', 0) - other = other.reindex_axis(self.items, axis=axis, copy=True).values + axis = getattr(other, '_info_axis_number', 0) + other = other.reindex_axis( + self.items, axis=axis, copy=False).values # make sure that we can broadcast is_transposed = False @@ -469,16 +655,16 @@ def eval(self, func, other, raise_on_error = True, try_cast = False): is_transposed = True values, other = self._try_coerce_args(values, other) - args = [ values, other ] + args = [values, other] try: result = self._try_coerce_result(func(*args)) except (Exception) as detail: if raise_on_error: raise TypeError('Could not operate [%s] with block values [%s]' - % (repr(other),str(detail))) + % (repr(other), str(detail))) else: # return the values - result = np.empty(values.shape,dtype='O') + result = np.empty(values.shape, dtype='O') result.fill(np.nan) if not isinstance(result, np.ndarray): @@ -492,9 +678,9 @@ def eval(self, func, other, raise_on_error = True, try_cast = False): if try_cast: result = self._try_cast_result(result) - return make_block(result, self.items, self.ref_items, fastpath=True) + return [make_block(result, self.items, self.ref_items, ndim=self.ndim, fastpath=True)] - def where(self, other, cond, raise_on_error = True, try_cast = False): + def where(self, other, cond, raise_on_error=True, try_cast=False): """ evaluate the block; return result block(s) from the result @@ -513,8 +699,8 @@ def where(self, other, cond, raise_on_error = True, try_cast = False): values = self.values # see if we can align other - if hasattr(other,'reindex_axis'): - axis = getattr(other,'_het_axis',0) + if hasattr(other, 'reindex_axis'): + axis = getattr(other, '_info_axis_number', 0) other = other.reindex_axis(self.items, axis=axis, copy=True).values # make sure that we can broadcast @@ -525,10 +711,11 @@ def where(self, other, cond, raise_on_error = True, try_cast = False): is_transposed = True # see if we can align cond - if not hasattr(cond,'shape'): - raise ValueError("where must have a condition that is ndarray like") - if hasattr(cond,'reindex_axis'): - axis = getattr(cond,'_het_axis',0) + if not hasattr(cond, 'shape'): + raise ValueError( + "where must have a condition that is ndarray like") + if hasattr(cond, 'reindex_axis'): + axis = getattr(cond, '_info_axis_number', 0) cond = cond.reindex_axis(self.items, axis=axis, copy=True).values else: cond = cond.values @@ -537,10 +724,10 @@ def where(self, other, cond, raise_on_error = True, try_cast = False): if hasattr(values, 'ndim'): if values.ndim != cond.ndim or values.shape == cond.shape[::-1]: values = values.T - is_transposed = not is_transposed + is_transposed = not is_transposed # our where function - def func(c,v,o): + def func(c, v, o): if c.ravel().all(): return v @@ -550,16 +737,17 @@ def func(c,v,o): except (Exception) as detail: if raise_on_error: raise TypeError('Could not operate [%s] with block values [%s]' - % (repr(o),str(detail))) + % (repr(o), str(detail))) else: # return the values - result = np.empty(v.shape,dtype='float64') + result = np.empty(v.shape, dtype='float64') result.fill(np.nan) return result # see if we can operate on the entire block, or need item-by-item - result = func(cond,values,other) - if self._can_hold_na: + # or if we are a single block (ndim == 1) + result = func(cond, values, other) + if self._can_hold_na or self.ndim == 1: if not isinstance(result, np.ndarray): raise TypeError('Could not compare [%s] with block values' @@ -572,7 +760,7 @@ def func(c,v,o): if try_cast: result = self._try_cast_result(result) - return make_block(result, self.items, self.ref_items) + return make_block(result, self.items, self.ref_items, ndim=self.ndim) # might need to separate out blocks axis = cond.ndim - 1 @@ -591,6 +779,7 @@ def func(c,v,o): return result_blocks + class NumericBlock(Block): is_numeric = True _can_hold_na = True @@ -598,11 +787,13 @@ class NumericBlock(Block): def _try_cast_result(self, result): return _possibly_downcast_to_dtype(result, self.dtype) + class FloatBlock(NumericBlock): _downcast_dtype = 'int64' def _can_hold_element(self, element): - if isinstance(element, np.ndarray): + if is_list_like(element): + element = np.array(element) return issubclass(element.dtype.type, (np.floating, np.integer)) return isinstance(element, (float, int)) @@ -617,13 +808,14 @@ def to_native_types(self, slicer=None, na_rep='', float_format=None, **kwargs): values = self.values if slicer is not None: - values = values[:,slicer] - values = np.array(values,dtype=object) + values = values[:, slicer] + values = np.array(values, dtype=object) mask = isnull(values) values[mask] = na_rep if float_format: imask = (-mask).ravel() - values.flat[imask] = np.array([ float_format % val for val in values.ravel()[imask] ]) + values.flat[imask] = np.array( + [float_format % val for val in values.ravel()[imask]]) return values.tolist() def should_store(self, value): @@ -651,7 +843,8 @@ class IntBlock(NumericBlock): _can_hold_na = False def _can_hold_element(self, element): - if isinstance(element, np.ndarray): + if is_list_like(element): + element = np.array(element) return issubclass(element.dtype.type, np.integer) return com.is_integer(element) @@ -670,6 +863,9 @@ class BoolBlock(NumericBlock): _can_hold_na = False def _can_hold_element(self, element): + if is_list_like(element): + element = np.array(element) + return issubclass(element.dtype.type, np.integer) return isinstance(element, (int, bool)) def _try_cast(self, element): @@ -698,16 +894,7 @@ def is_bool(self): """ we can be a bool if we have only bool values but are of type object """ return lib.is_bool_array(self.values.ravel()) - def astype(self, dtype, copy=True, raise_on_error=True, values=None): - """ allow astypes to datetime64[ns],timedelta64[ns] with coercion """ - dtype = np.dtype(dtype) - if dtype == _NS_DTYPE or dtype == _TD_DTYPE: - values = com._possibly_convert_datetime(self.values,dtype) - else: - values = None - return super(ObjectBlock, self).astype(dtype=dtype,copy=copy,raise_on_error=raise_on_error,values=values) - - def convert(self, convert_dates = True, convert_numeric = True, copy = True): + def convert(self, convert_dates=True, convert_numeric=True, copy=True, by_item=True): """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we ARE an ObjectBlock!!!!! @@ -718,15 +905,26 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True): # attempt to create new type blocks is_unique = self.items.is_unique blocks = [] - for i, c in enumerate(self.items): - values = self.iget(i) + if by_item: + + for i, c in enumerate(self.items): + values = self.iget(i) + + values = com._possibly_convert_objects( + values, convert_dates=convert_dates, convert_numeric=convert_numeric) + values = _block_shape(values) + items = self.items.take([i]) + placement = None if is_unique else [i] + newb = make_block( + values, items, self.ref_items, ndim=self.ndim, placement=placement) + blocks.append(newb) + + else: - values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric) - values = _block_shape(values) - items = self.items.take([i]) - placement = None if is_unique else [i] - newb = make_block(values, items, self.ref_items, fastpath=True, placement=placement) - blocks.append(newb) + values = com._possibly_convert_objects( + self.values, convert_dates=convert_dates, convert_numeric=convert_numeric) + blocks.append( + make_block(values, self.items, self.ref_items, ndim=self.ndim)) return blocks @@ -805,9 +1003,12 @@ def _replace_single(self, to_replace, value, inplace=False, filter=None, else: # if the thing to replace is not a string or compiled regex call # the superclass method -> to_replace is some kind of object - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex) + result = super(ObjectBlock, self).replace(to_replace, value, + inplace=inplace, + filter=filter, regex=regex) + if not isinstance(result, list): + result = [result] + return result new_values = self.values if inplace else self.values.copy() @@ -844,17 +1045,20 @@ def re_replacer(s): class DatetimeBlock(Block): _can_hold_na = True - def __init__(self, values, items, ref_items, ndim=2, fastpath=True, placement=None): + def __init__(self, values, items, ref_items, fastpath=False, placement=None, **kwargs): if values.dtype != _NS_DTYPE: values = tslib.cast_to_nanoseconds(values) super(DatetimeBlock, self).__init__(values, items, ref_items, - ndim=ndim, fastpath=fastpath, placement=placement) + fastpath=True, placement=placement, **kwargs) def _gi(self, arg): return lib.Timestamp(self.values[arg]) def _can_hold_element(self, element): + if is_list_like(element): + element = np.array(element) + return element.dtype == _NS_DTYPE return com.is_integer(element) or isinstance(element, datetime) def _try_cast(self, element): @@ -881,33 +1085,59 @@ def _try_coerce_result(self, result): """ reverse of try_coerce_args """ if isinstance(result, np.ndarray): if result.dtype == 'i8': - result = tslib.array_to_datetime(result.astype(object).ravel()).reshape(result.shape) + result = tslib.array_to_datetime( + result.astype(object).ravel()).reshape(result.shape) elif isinstance(result, np.integer): result = lib.Timestamp(result) return result + def _try_fill(self, value): + """ if we are a NaT, return the actual fill value """ + if isinstance(value, type(tslib.NaT)): + value = tslib.iNaT + return value + + def fillna(self, value, inplace=False, downcast=None): + values = self.values if inplace else self.values.copy() + mask = com.isnull(self.values) + value = self._try_fill(value) + np.putmask(values,mask,value) + return [self if inplace else make_block(values, self.items, + self.ref_items, fastpath=True)] + def to_native_types(self, slicer=None, na_rep=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: - values = values[:,slicer] + values = values[:, slicer] mask = isnull(values) - rvalues = np.empty(values.shape,dtype=object) + rvalues = np.empty(values.shape, dtype=object) if na_rep is None: na_rep = 'NaT' rvalues[mask] = na_rep imask = (-mask).ravel() if self.dtype == 'datetime64[ns]': - rvalues.flat[imask] = np.array([ Timestamp(val)._repr_base for val in values.ravel()[imask] ],dtype=object) + rvalues.flat[imask] = np.array( + [Timestamp(val)._repr_base for val in values.ravel()[imask]], dtype=object) elif self.dtype == 'timedelta64[ns]': - rvalues.flat[imask] = np.array([ lib.repr_timedelta64(val) for val in values.ravel()[imask] ],dtype=object) + rvalues.flat[imask] = np.array([lib.repr_timedelta64(val) + for val in values.ravel()[imask]], dtype=object) return rvalues.tolist() def should_store(self, value): return issubclass(value.dtype.type, np.datetime64) + def astype(self, dtype, copy=False, raise_on_error=True): + """ + handle convert to object as a special case + """ + klass = None + if np.dtype(dtype).type == np.object_: + klass = ObjectBlock + return self._astype(dtype, copy=copy, raise_on_error=raise_on_error, klass=klass) + def set(self, item, value): """ Modify Block in-place with new item value @@ -923,53 +1153,274 @@ def set(self, item, value): self.values[loc] = value - def get_values(self, dtype): + def get_values(self, dtype=None): if dtype == object: flat_i8 = self.values.ravel().view(np.int64) res = tslib.ints_to_pydatetime(flat_i8) return res.reshape(self.values.shape) return self.values -def make_block(values, items, ref_items, klass=None, fastpath=False, placement=None): + +class SparseBlock(Block): + + """ implement as a list of sparse arrays of the same dtype """ + __slots__ = ['items', 'ref_items', '_ref_locs', 'ndim', 'values'] + is_sparse = True + is_numeric = True + _can_hold_na = True + _can_consolidate = False + _verify_integrity = False + _ftype = 'sparse' + + def __init__(self, values, items, ref_items, ndim=None, fastpath=False, placement=None): + + # kludgetastic + if ndim is not None: + if ndim == 1: + ndim = 1 + elif ndim > 2: + ndim = ndim + else: + if len(items) != 1: + ndim = 1 + else: + ndim = 2 + self.ndim = ndim + + self._ref_locs = None + self.values = values + if fastpath: + self.items = items + self.ref_items = ref_items + else: + self.items = _ensure_index(items) + self.ref_items = _ensure_index(ref_items) + + @property + def shape(self): + return (len(self.items), self.sp_index.length) + + @property + def itemsize(self): + return self.dtype.itemsize + + @rwproperty.getproperty + def fill_value(self): + return self.values.fill_value + + @rwproperty.setproperty + def fill_value(self, v): + # we may need to upcast our fill to match our dtype + if issubclass(self.dtype.type, np.floating): + v = float(v) + self.values.fill_value = v + + @rwproperty.getproperty + def sp_values(self): + return self.values.sp_values + + @rwproperty.setproperty + def sp_values(self, v): + # reset the sparse values + self.values = SparseArray( + v, sparse_index=self.sp_index, kind=self.kind, dtype=v.dtype, fill_value=self.fill_value, copy=False) + + @property + def sp_index(self): + return self.values.sp_index + + @property + def kind(self): + return self.values.kind + + def __len__(self): + try: + return self.sp_index.length + except: + return 0 + + def should_store(self, value): + return isinstance(value, SparseArray) + + def prepare_for_merge(self, **kwargs): + """ create a dense block """ + return make_block(self.get_values(), self.items, self.ref_items) + + def post_merge(self, items, **kwargs): + return self + + def set(self, item, value): + self.values = value + + def get(self, item): + if self.ndim == 1: + loc = self.items.get_loc(item) + return self.values[loc] + else: + return self.values + + def _slice(self, slicer): + """ return a slice of my values (but densify first) """ + return self.get_values()[slicer] + + def get_values(self, dtype=None): + """ need to to_dense myself (and always return a ndim sized object) """ + values = self.values.to_dense() + if values.ndim == self.ndim - 1: + values = values.reshape((1,) + values.shape) + return values + + def get_merge_length(self): + return 1 + + def make_block( + self, values, items=None, ref_items=None, sparse_index=None, kind=None, dtype=None, fill_value=None, + copy=False, fastpath=True): + """ return a new block """ + if dtype is None: + dtype = self.dtype + if fill_value is None: + fill_value = self.fill_value + if items is None: + items = self.items + if ref_items is None: + ref_items = self.ref_items + new_values = SparseArray(values, sparse_index=sparse_index, + kind=kind or self.kind, dtype=dtype, fill_value=fill_value, copy=copy) + return make_block(new_values, items, ref_items, ndim=self.ndim, fastpath=fastpath) + + def interpolate(self, method='pad', axis=0, inplace=False, + limit=None, missing=None, **kwargs): + + values = com.interpolate_2d( + self.values.to_dense(), method, axis, limit, missing) + return self.make_block(values, self.items, self.ref_items) + + def fillna(self, value, inplace=False, downcast=None): + # we may need to upcast our fill to match our dtype + if issubclass(self.dtype.type, np.floating): + value = float(value) + values = self.values if inplace else self.values.copy() + return [ self.make_block(values.get_values(value), fill_value=value) ] + + def shift(self, indexer, periods): + """ shift the block by periods """ + + new_values = self.values.to_dense().take(indexer) + # convert integer to float if necessary. need to do a lot more than + # that, handle boolean etc also + new_values, fill_value = com._maybe_upcast(new_values) + if periods > 0: + new_values[:periods] = fill_value + else: + new_values[periods:] = fill_value + return [ self.make_block(new_values) ] + + def take(self, indexer, ref_items, axis=1): + """ going to take our items + along the long dimension""" + if axis < 1: + raise AssertionError('axis must be at least 1, got %d' % axis) + + return [ self.make_block(self.values.take(indexer)) ] + + def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, limit=None, mask_info=None): + """ + Reindex using pre-computed indexer information + """ + if axis < 1: + raise AssertionError('axis must be at least 1, got %d' % axis) + + # taking on the 0th axis always here + if fill_value is None: + fill_value = self.fill_value + return self.make_block(self.values.take(indexer), items=self.items, fill_value=fill_value) + + def reindex_items_from(self, new_ref_items, copy=True): + """ + Reindex to only those items contained in the input set of items + + E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'], + then the resulting items will be ['b'] + + Returns + ------- + reindexed : Block + """ + + # 2-d + if self.ndim >= 2: + if self.items[0] not in self.ref_items: + return None + return self.make_block(self.values, ref_items=new_ref_items, copy=copy) + + # 1-d + new_ref_items, indexer = self.items.reindex(new_ref_items) + if indexer is None: + indexer = np.arange(len(self.items)) + + return self.make_block(com.take_1d(self.values.values, indexer), items=new_ref_items, ref_items=new_ref_items, copy=copy) + + def sparse_reindex(self, new_index): + """ sparse reindex and return a new block + current reindex only works for float64 dtype! """ + values = self.values + values = values.sp_index.to_int_index().reindex( + values.sp_values.astype('float64'), values.fill_value, new_index) + return self.make_block(values, sparse_index=new_index) + + def split_block_at(self, item): + if len(self.items) == 1 and item == self.items[0]: + return [] + return super(SparseBlock, self).split_block_at(self, item) + + +def make_block(values, items, ref_items, klass=None, ndim=None, dtype=None, fastpath=False, placement=None): if klass is None: - dtype = values.dtype + dtype = dtype or values.dtype vtype = dtype.type - if issubclass(vtype, np.floating): + if isinstance(values, SparseArray): + klass = SparseBlock + elif issubclass(vtype, np.floating): klass = FloatBlock - elif issubclass(vtype, np.complexfloating): - klass = ComplexBlock - elif issubclass(vtype, np.datetime64): - klass = DatetimeBlock - elif issubclass(vtype, np.integer): + elif issubclass(vtype, np.integer) and not issubclass(vtype, np.datetime64): klass = IntBlock elif dtype == np.bool_: klass = BoolBlock + elif issubclass(vtype, np.datetime64): + klass = DatetimeBlock + elif issubclass(vtype, np.complexfloating): + klass = ComplexBlock - # try to infer a datetimeblock - if klass is None and np.prod(values.shape): - flat = values.ravel() - inferred_type = lib.infer_dtype(flat) - if inferred_type == 'datetime': + # try to infer a DatetimeBlock, or set to an ObjectBlock + else: - # we have an object array that has been inferred as datetime, so - # convert it - try: - values = tslib.array_to_datetime(flat).reshape(values.shape) - klass = DatetimeBlock - except: # it already object, so leave it - pass + if np.prod(values.shape): + flat = values.ravel() + inferred_type = lib.infer_dtype(flat) + if inferred_type == 'datetime': - if klass is None: - klass = ObjectBlock + # we have an object array that has been inferred as datetime, so + # convert it + try: + values = tslib.array_to_datetime( + flat).reshape(values.shape) + klass = DatetimeBlock + except: # it already object, so leave it + pass + + if klass is None: + klass = ObjectBlock - return klass(values, items, ref_items, ndim=values.ndim, fastpath=fastpath, placement=placement) + return klass(values, items, ref_items, ndim=ndim, fastpath=fastpath, placement=placement) # TODO: flexible with index=None and/or items=None class BlockManager(PandasObject): + """ Core internal data structure to implement DataFrame @@ -985,22 +1436,24 @@ class BlockManager(PandasObject): ----- This is *not* a public API class """ - __slots__ = ['axes', 'blocks', '_known_consolidated', '_is_consolidated', '_ref_locs', '_items_map'] + __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', + '_is_consolidated', '_has_sparse', '_ref_locs', '_items_map'] - def __init__(self, blocks, axes, do_integrity_check=True): + def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True): self.axes = [_ensure_index(ax) for ax in axes] self.blocks = blocks - ndim = len(axes) + ndim = self.ndim for block in blocks: - if ndim != block.values.ndim: + if not block.is_sparse and ndim != block.ndim: raise AssertionError(('Number of Block dimensions (%d) must ' 'equal number of axes (%d)') - % (block.values.ndim, ndim)) + % (block.ndim, ndim)) if do_integrity_check: self._verify_integrity() + self._has_sparse = False self._consolidate_check() # we have a duplicate items index, setup the block maps @@ -1008,8 +1461,8 @@ def __init__(self, blocks, axes, do_integrity_check=True): self._set_ref_locs(do_refs=True) @classmethod - def make_empty(self): - return BlockManager([], [[], []]) + def make_empty(cls): + return cls([], [[], []]) def __nonzero__(self): return True @@ -1017,9 +1470,17 @@ def __nonzero__(self): # Python3 compat __bool__ = __nonzero__ + @property + def shape(self): + if getattr(self, '_shape', None) is None: + self._shape = tuple(len(ax) for ax in self.axes) + return self._shape + @property def ndim(self): - return len(self.axes) + if getattr(self, '_ndim', None) is None: + self._ndim = len(self.axes) + return self._ndim def set_axis(self, axis, value, maybe_rename=True, check_axis=True): cur_axis = self.axes[axis] @@ -1030,6 +1491,7 @@ def set_axis(self, axis, value, maybe_rename=True, check_axis=True): % (len(value), len(cur_axis))) self.axes[axis] = value + self._shape = None if axis == 0: @@ -1044,7 +1506,6 @@ def set_axis(self, axis, value, maybe_rename=True, check_axis=True): # set/reset ref_locs based on the new index self._set_ref_locs(labels=value, do_refs=True) - def _reset_ref_locs(self): """ take the current _ref_locs and reset ref_locs on the blocks to correctly map, ignoring Nones; @@ -1059,7 +1520,7 @@ def _reset_ref_locs(self): b.reset_ref_locs() self._rebuild_ref_locs() - self._ref_locs = None + self._ref_locs = None self._items_map = None def _rebuild_ref_locs(self): @@ -1102,10 +1563,10 @@ def _set_ref_locs(self, labels=None, do_refs=False): # we are going to a non-unique index # we have ref_locs on the block at this point - if (not is_unique and do_refs) or do_refs=='force': + if (not is_unique and do_refs) or do_refs == 'force': # create the items map - im = getattr(self,'_items_map',None) + im = getattr(self, '_items_map', None) if im is None: im = dict() @@ -1118,25 +1579,25 @@ def _set_ref_locs(self, labels=None, do_refs=False): except: raise AssertionError("cannot create BlockManager._ref_locs because " "block [%s] with duplicate items [%s] " - "does not have _ref_locs set" % (block,labels)) + "does not have _ref_locs set" % (block, labels)) - m = maybe_create_block_in_items_map(im,block) + m = maybe_create_block_in_items_map(im, block) for i, item in enumerate(block.items): m[i] = rl[i] self._items_map = im # create the _ref_loc map here - rl = [ None] * len(labels) + rl = [None] * len(labels) for block, items in im.items(): for i, loc in enumerate(items): - rl[loc] = (block,i) + rl[loc] = (block, i) self._ref_locs = rl return rl # return our cached _ref_locs (or will compute again # when we recreate the block manager if needed - return getattr(self,'_ref_locs',None) + return getattr(self, '_ref_locs', None) def get_items_map(self, use_cached=True): """ @@ -1148,7 +1609,7 @@ def get_items_map(self, use_cached=True): # cache check if use_cached: - im = getattr(self,'_items_map',None) + im = getattr(self, '_items_map', None) if im is not None: return im @@ -1161,17 +1622,16 @@ def get_items_map(self, use_cached=True): axis = self.axes[0] for block in self.blocks: - m = maybe_create_block_in_items_map(im,block) + m = maybe_create_block_in_items_map(im, block) for i, item in enumerate(block.items): m[i] = axis.get_loc(item) - # use the ref_locs to construct the map else: for i, (block, idx) in enumerate(rl): - m = maybe_create_block_in_items_map(im,block) + m = maybe_create_block_in_items_map(im, block) m[idx] = i self._items_map = im @@ -1187,7 +1647,15 @@ def get_dtype_counts(self): self._consolidate_inplace() counts = dict() for b in self.blocks: - counts[b.dtype.name] = counts.get(b.dtype,0) + b.shape[0] + counts[b.dtype.name] = counts.get(b.dtype.name, 0) + b.shape[0] + return counts + + def get_ftype_counts(self): + """ return a dict of the counts of dtypes in BlockManager """ + self._consolidate_inplace() + counts = dict() + for b in self.blocks: + counts[b.ftype] = counts.get(b.ftype, 0) + b.shape[0] return counts def __getstate__(self): @@ -1204,20 +1672,24 @@ def __setstate__(self, state): self.axes = [_ensure_index(ax) for ax in ax_arrays] self.axes = _handle_legacy_indexes(self.axes) - self._is_consolidated = False - self._known_consolidated = False - blocks = [] for values, items in zip(bvalues, bitems): blk = make_block(values, items, self.axes[0]) blocks.append(blk) self.blocks = blocks + self._post_setstate() + + def _post_setstate(self): + self._is_consolidated = False + self._known_consolidated = False + self._set_has_sparse() + def __len__(self): return len(self.items) def __unicode__(self): - output = 'BlockManager' + output = com.pprint_thing(self.__class__.__name__) for i, ax in enumerate(self.axes): if i == 0: output += '\nItems: %s' % ax @@ -1228,10 +1700,6 @@ def __unicode__(self): output += '\n%s' % com.pprint_thing(block) return output - @property - def shape(self): - return tuple(len(ax) for ax in self.axes) - def _verify_integrity(self): mgr_shape = self.shape tot_items = sum(len(x.items) for x in self.blocks) @@ -1239,9 +1707,9 @@ def _verify_integrity(self): if block.ref_items is not self.items: raise AssertionError("Block ref_items must be BlockManager " "items") - if block.values.shape[1:] != mgr_shape[1:]: - construction_error(tot_items,block.values.shape[1:],self.axes) - + if not block.is_sparse and block.values.shape[1:] != mgr_shape[1:]: + construction_error( + tot_items, block.values.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError('Number of manager items must equal union of ' 'block items\n# manager items: {0}, # ' @@ -1258,8 +1726,9 @@ def apply(self, f, *args, **kwargs): filter : list, if supplied, only call the block if the filter is in the block """ - axes = kwargs.pop('axes',None) + axes = kwargs.pop('axes', None) filter = kwargs.get('filter') + do_integrity_check = kwargs.pop('do_integrity_check', False) result_blocks = [] for blk in self.blocks: if filter is not None: @@ -1270,13 +1739,14 @@ def apply(self, f, *args, **kwargs): if callable(f): applied = f(blk, *args, **kwargs) else: - applied = getattr(blk,f)(*args, **kwargs) + applied = getattr(blk, f)(*args, **kwargs) - if isinstance(applied,list): + if isinstance(applied, list): result_blocks.extend(applied) else: result_blocks.append(applied) - bm = self.__class__(result_blocks, axes or self.axes) + bm = self.__class__( + result_blocks, axes or self.axes, do_integrity_check=do_integrity_check) bm._consolidate_inplace() return bm @@ -1286,6 +1756,9 @@ def where(self, *args, **kwargs): def eval(self, *args, **kwargs): return self.apply('eval', *args, **kwargs) + def setitem(self, *args, **kwargs): + return self.apply('setitem', *args, **kwargs) + def putmask(self, *args, **kwargs): return self.apply('putmask', *args, **kwargs) @@ -1318,24 +1791,29 @@ def replace_list(self, src_lst, dest_lst, inplace=False, regex=False): # figure out our mask a-priori to avoid repeated replacements values = self.as_matrix() + def comp(s): if isnull(s): return isnull(values) return values == s - masks = [ comp(s) for i, s in enumerate(src_lst) ] + masks = [comp(s) for i, s in enumerate(src_lst)] result_blocks = [] for blk in self.blocks: # its possible to get multiple result blocks here # replace ALWAYS will return a list - rb = [ blk if inplace else blk.copy() ] + rb = [blk if inplace else blk.copy()] for i, (s, d) in enumerate(zip(src_lst, dest_lst)): new_rb = [] for b in rb: if b.dtype == np.object_: - new_rb.extend(b.replace(s, d, inplace=inplace, - regex=regex)) + result = b.replace(s, d, inplace=inplace, + regex=regex) + if isinstance(result, list): + new_rb.extend(result) + else: + new_rb.append(result) else: # get our mask for this element, sized to this # particular block @@ -1351,6 +1829,29 @@ def comp(s): bm._consolidate_inplace() return bm + def prepare_for_merge(self, *args, **kwargs): + """ prepare for merging, return a new block manager with Sparse -> Dense """ + self._consolidate_inplace() + if self._has_sparse: + return self.apply('prepare_for_merge', *args, **kwargs) + return self + + def post_merge(self, objs, **kwargs): + """ try to sparsify items that were previously sparse """ + is_sparse = defaultdict(list) + for o in objs: + for blk in o._data.blocks: + if blk.is_sparse: + + # record the dtype of each item + for i in blk.items: + is_sparse[i].append(blk.dtype) + + if len(is_sparse): + return self.apply('post_merge', items=is_sparse) + + return self + def is_consolidated(self): """ Return True if more than one block with the same dtype @@ -1360,9 +1861,13 @@ def is_consolidated(self): return self._is_consolidated def _consolidate_check(self): - dtypes = [blk.dtype.type for blk in self.blocks] - self._is_consolidated = len(dtypes) == len(set(dtypes)) + ftypes = [blk.ftype for blk in self.blocks] + self._is_consolidated = len(ftypes) == len(set(ftypes)) self._known_consolidated = True + self._set_has_sparse() + + def _set_has_sparse(self): + self._has_sparse = any((blk.is_sparse for blk in self.blocks)) @property def is_mixed_type(self): @@ -1374,63 +1879,104 @@ def is_mixed_type(self): def is_numeric_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() - return all([ block.is_numeric for block in self.blocks ]) + return all([block.is_numeric for block in self.blocks]) + + def get_block_map(self, copy=False, typ=None, columns=None, is_numeric=False, is_bool=False): + """ return a dictionary mapping the ftype -> block list + + Parameters + ---------- + typ : return a list/dict + copy : copy if indicated + columns : a column filter list + filter if the type is indicated """ + + # short circuit - mainly for merging + if typ == 'dict' and columns is None and not is_numeric and not is_bool and not copy: + bm = defaultdict(list) + for b in self.blocks: + bm[str(b.ftype)].append(b) + return bm + + self._consolidate_inplace() + + if is_numeric: + filter_blocks = lambda block: block.is_numeric + elif is_bool: + filter_blocks = lambda block: block.is_bool + else: + filter_blocks = lambda block: True + + def filter_columns(b): + if columns: + if not columns in b.items: + return None + b = b.reindex_items_from(columns) + return b - def get_numeric_data(self, copy=False, type_list=None, as_blocks = False): + maybe_copy = lambda b: b.copy() if copy else b + + def maybe_copy(b): + if copy: + b = b.copy() + return b + + if typ == 'list': + bm = [] + for b in self.blocks: + if filter_blocks(b): + b = filter_columns(b) + if b is not None: + bm.append(maybe_copy(b)) + + else: + if typ == 'dtype': + key = lambda b: b.dtype + else: + key = lambda b: b.ftype + bm = defaultdict(list) + for b in self.blocks: + if filter_blocks(b): + b = filter_columns(b) + if b is not None: + bm[str(key(b))].append(maybe_copy(b)) + return bm + + def get_bool_data(self, **kwargs): + kwargs['is_bool'] = True + return self.get_data(**kwargs) + + def get_numeric_data(self, **kwargs): + kwargs['is_numeric'] = True + return self.get_data(**kwargs) + + def get_data(self, copy=False, columns=None, **kwargs): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks - type_list : tuple of type, default None - Numeric types by default (Float/Complex/Int but not Datetime) """ - if type_list is None: - filter_blocks = lambda block: block.is_numeric - else: - type_list = self._get_clean_block_types(type_list) - filter_blocks = lambda block: isinstance(block, type_list) - - maybe_copy = lambda b: b.copy() if copy else b - num_blocks = [maybe_copy(b) for b in self.blocks if filter_blocks(b)] - if as_blocks: - return num_blocks + blocks = self.get_block_map( + typ='list', copy=copy, columns=columns, **kwargs) + if len(blocks) == 0: + return self.__class__.make_empty() - if len(num_blocks) == 0: - return BlockManager.make_empty() + return self.combine(blocks) - indexer = np.sort(np.concatenate([b.ref_locs for b in num_blocks])) + def combine(self, blocks): + """ reutrn a new manager with the blocks """ + indexer = np.sort(np.concatenate([b.ref_locs for b in blocks])) new_items = self.items.take(indexer) new_blocks = [] - for b in num_blocks: + for b in blocks: b = b.copy(deep=False) b.ref_items = new_items new_blocks.append(b) new_axes = list(self.axes) new_axes[0] = new_items - return BlockManager(new_blocks, new_axes, do_integrity_check=False) - - def _get_clean_block_types(self, type_list): - if not isinstance(type_list, tuple): - try: - type_list = tuple(type_list) - except TypeError: - type_list = (type_list,) - - type_map = {int: IntBlock, float: FloatBlock, - complex: ComplexBlock, - np.datetime64: DatetimeBlock, - datetime: DatetimeBlock, - bool: BoolBlock, - object: ObjectBlock} - - type_list = tuple([type_map.get(t, t) for t in type_list]) - return type_list - - def get_bool_data(self, copy=False, as_blocks=False): - return self.get_numeric_data(copy=copy, type_list=(BoolBlock,), - as_blocks=as_blocks) + return self.__class__(new_blocks, new_axes, do_integrity_check=False) def get_slice(self, slobj, axis=0, raise_on_error=False): new_axes = list(self.axes) @@ -1444,7 +1990,7 @@ def get_slice(self, slobj, axis=0, raise_on_error=False): new_items = new_axes[0] if len(self.blocks) == 1: blk = self.blocks[0] - newb = make_block(blk.values[slobj], + newb = make_block(blk._slice(slobj), new_items, new_items, klass=blk.__class__, @@ -1456,7 +2002,7 @@ def get_slice(self, slobj, axis=0, raise_on_error=False): else: new_blocks = self._slice_blocks(slobj, axis) - bm = BlockManager(new_blocks, new_axes, do_integrity_check=False) + bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) bm._consolidate_inplace() return bm @@ -1468,12 +2014,13 @@ def _slice_blocks(self, slobj, axis): slicer = tuple(slicer) for block in self.blocks: - newb = make_block(block.values[slicer], + newb = make_block(block._slice(slicer), block.items, block.ref_items, klass=block.__class__, fastpath=True, placement=block._ref_locs) + newb.set_ref_locs(block._ref_locs) new_blocks.append(newb) return new_blocks @@ -1501,10 +2048,8 @@ def copy(self, deep=True): ------- copy : BlockManager """ - copy_blocks = [block.copy(deep=deep) for block in self.blocks] - # copy_axes = [ax.copy() for ax in self.axes] - copy_axes = list(self.axes) - return BlockManager(copy_blocks, copy_axes, do_integrity_check=False) + new_axes = list(self.axes) + return self.apply('copy', axes=new_axes, deep=deep, do_integrity_check=False) def as_matrix(self, items=None): if len(self.blocks) == 0: @@ -1513,7 +2058,7 @@ def as_matrix(self, items=None): blk = self.blocks[0] if items is None or blk.items.equals(items): # if not, then just call interleave per below - mat = blk.values + mat = blk.get_values() else: mat = self.reindex_items(items).as_matrix() else: @@ -1600,7 +2145,7 @@ def xs(self, key, axis=1, copy=True): klass=block.__class__, fastpath=True)] - return BlockManager(new_blocks, new_axes) + return self.__class__(new_blocks, new_axes) def fast_2d_xs(self, loc, copy=False): """ @@ -1639,8 +2184,9 @@ def consolidate(self): if self.is_consolidated(): return self - new_blocks = _consolidate(self.blocks, self.items) - return BlockManager(new_blocks, self.axes) + bm = self.__class__(self.blocks, self.axes) + bm._consolidate_inplace() + return bm def _consolidate_inplace(self): if not self.is_consolidated(): @@ -1654,6 +2200,7 @@ def _consolidate_inplace(self): self._is_consolidated = True self._known_consolidated = True + self._set_has_sparse() def get(self, item): if self.items.is_unique: @@ -1667,17 +2214,17 @@ def get(self, item): if com.is_integer(indexer): b, loc = ref_locs[indexer] - values = [ b.iget(loc) ] - index = Index([ self.items[indexer] ]) + values = [b.iget(loc)] + index = Index([self.items[indexer]]) # we have a multiple result, potentially across blocks else: - values = [ block.iget(i) for block, i in ref_locs[indexer] ] + values = [block.iget(i) for block, i in ref_locs[indexer]] index = self.items[indexer] # create and return a new block manager - axes = [ index ] + self.axes[1:] + axes = [index] + self.axes[1:] blocks = form_blocks(values, index, axes) mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() @@ -1729,10 +2276,12 @@ def set(self, item, value): Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ - value = _block_shape(value,self.ndim-1) - if value.shape[1:] != self.shape[1:]: - raise AssertionError('Shape of new values must be compatible ' - 'with manager shape') + if not isinstance(value, SparseArray): + if value.ndim == self.ndim - 1: + value = value.reshape((1,) + value.shape) + if value.shape[1:] != self.shape[1:]: + raise AssertionError('Shape of new values must be compatible ' + 'with manager shape') def _set_item(item, arr): i, block = self._find_block(item) @@ -1767,7 +2316,8 @@ def _set_item(item, arr): for i, (l, arr) in enumerate(zip(loc, value)): # insert the item - self.insert(l, item, arr[None, :], allow_duplicates=True) + self.insert( + l, item, arr[None, :], allow_duplicates=True) # reset the _ref_locs on indiviual blocks # rebuild ref_locs @@ -1777,7 +2327,6 @@ def _set_item(item, arr): self._rebuild_ref_locs() - else: for i, (item, arr) in enumerate(zip(subset, value)): _set_item(item, arr[None, :]) @@ -1815,11 +2364,12 @@ def insert(self, loc, item, value, allow_duplicates=False): self._known_consolidated = False # clear the internal ref_loc mappings if necessary - if loc != len(self.items)-1 and new_items.is_unique: + if loc != len(self.items) - 1 and new_items.is_unique: self.set_items_clear(new_items) def set_items_norename(self, value): self.set_axis(0, value, maybe_rename=False, check_axis=False) + self._shape = None def set_items_clear(self, value): """ clear the ref_locs on all blocks """ @@ -1833,7 +2383,7 @@ def _delete_from_all_blocks(self, loc, item): # possibily convert to an indexer loc = _possibly_convert_to_indexer(loc) - if isinstance(loc, (list,tuple,np.ndarray)): + if isinstance(loc, (list, tuple, np.ndarray)): for l in loc: for i, b in enumerate(self.blocks): if item in b.items: @@ -1851,51 +2401,57 @@ def _delete_from_block(self, i, item): so after this function, _ref_locs and _items_map (if used) are correct for the items, None fills holes in _ref_locs """ - block = self.blocks.pop(i) - ref_locs = self._set_ref_locs() - prev_items_map = self._items_map.pop(block) if ref_locs is not None else None - - # compute the split mask - loc = block.items.get_loc(item) - if type(loc) == slice or com.is_integer(loc): - mask = np.array([True] * len(block)) - mask[loc] = False - else: # already a mask, inverted - mask = -loc + block = self.blocks.pop(i) + ref_locs = self._set_ref_locs() + prev_items_map = self._items_map.pop( + block) if ref_locs is not None else None - # split the block - counter = 0 - for s, e in com.split_ranges(mask): + # if we can't consolidate, then we are removing this block in its + # entirey + if block._can_consolidate: - sblock = make_block(block.values[s:e], - block.items[s:e].copy(), - block.ref_items, - klass=block.__class__, - fastpath=True) + # compute the split mask + loc = block.items.get_loc(item) + if type(loc) == slice or com.is_integer(loc): + mask = np.array([True] * len(block)) + mask[loc] = False + else: # already a mask, inverted + mask = -loc - self.blocks.append(sblock) + # split the block + counter = 0 + for s, e in com.split_ranges(mask): - # update the _ref_locs/_items_map - if ref_locs is not None: + sblock = make_block(block.values[s:e], + block.items[s:e].copy(), + block.ref_items, + klass=block.__class__, + fastpath=True) - # fill the item_map out for this sub-block - m = maybe_create_block_in_items_map(self._items_map,sblock) - for j, itm in enumerate(sblock.items): + self.blocks.append(sblock) - # is this item masked (e.g. was deleted)? - while (True): + # update the _ref_locs/_items_map + if ref_locs is not None: - if counter > len(mask) or mask[counter]: - break - else: - counter += 1 + # fill the item_map out for this sub-block + m = maybe_create_block_in_items_map( + self._items_map, sblock) + for j, itm in enumerate(sblock.items): - # find my mapping location - m[j] = prev_items_map[counter] - counter += 1 + # is this item masked (e.g. was deleted)? + while (True): - # set the ref_locs in this block - sblock.set_ref_locs(m) + if counter > len(mask) or mask[counter]: + break + else: + counter += 1 + + # find my mapping location + m[j] = prev_items_map[counter] + counter += 1 + + # set the ref_locs in this block + sblock.set_ref_locs(m) # reset the ref_locs to the new structure if ref_locs is not None: @@ -1946,7 +2502,7 @@ def _check_have(self, item): if item not in self.items: raise KeyError('no item named %s' % com.pprint_thing(item)) - def reindex_axis(self, new_axis, method=None, axis=0, copy=True): + def reindex_axis(self, new_axis, method=None, axis=0, fill_value=None, limit=None, copy=True): new_axis = _ensure_index(new_axis) cur_axis = self.axes[axis] @@ -1954,6 +2510,7 @@ def reindex_axis(self, new_axis, method=None, axis=0, copy=True): if copy: result = self.copy(deep=True) result.axes[axis] = new_axis + result._shape = None if axis == 0: # patch ref_items, #1823 @@ -1968,12 +2525,13 @@ def reindex_axis(self, new_axis, method=None, axis=0, copy=True): if method is not None: raise AssertionError('method argument not supported for ' 'axis == 0') - return self.reindex_items(new_axis) + return self.reindex_items(new_axis, copy=copy, fill_value=fill_value) - new_axis, indexer = cur_axis.reindex(new_axis, method, copy_if_needed=True) - return self.reindex_indexer(new_axis, indexer, axis=axis) + new_axis, indexer = cur_axis.reindex( + new_axis, method, copy_if_needed=True) + return self.reindex_indexer(new_axis, indexer, axis=axis, fill_value=fill_value) - def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan): + def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=None): """ pandas-indexer with -1's only. """ @@ -1982,12 +2540,13 @@ def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan): new_blocks = [] for block in self.blocks: - newb = block.reindex_axis(indexer, axis=axis, fill_value=fill_value) + newb = block.reindex_axis( + indexer, axis=axis, fill_value=fill_value) new_blocks.append(newb) new_axes = list(self.axes) new_axes[axis] = new_axis - return BlockManager(new_blocks, new_axes) + return self.__class__(new_blocks, new_axes) def _reindex_indexer_items(self, new_items, indexer, fill_value): # TODO: less efficient than I'd like @@ -2022,9 +2581,9 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value): new_blocks.append(na_block) new_blocks = _consolidate(new_blocks, new_items) - return BlockManager(new_blocks, new_axes) + return self.__class__(new_blocks, new_axes) - def reindex_items(self, new_items, copy=True, fill_value=np.nan): + def reindex_items(self, new_items, copy=True, fill_value=None): """ """ @@ -2032,7 +2591,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): data = self if not data.is_consolidated(): data = data.consolidate() - return data.reindex_items(new_items) + return data.reindex_items(new_items, copy=copy, fill_value=fill_value) # TODO: this part could be faster (!) new_items, indexer = self.items.reindex(new_items, copy_if_needed=True) @@ -2053,6 +2612,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): if len(newb.items) > 0: new_blocks.append(newb) + # add a na block if we are missing items mask = indexer == -1 if mask.any(): extra_items = new_items[mask] @@ -2061,11 +2621,13 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): new_blocks.append(na_block) new_blocks = _consolidate(new_blocks, new_items) - return BlockManager(new_blocks, new_axes) + return self.__class__(new_blocks, new_axes) - def _make_na_block(self, items, ref_items, fill_value=np.nan): + def _make_na_block(self, items, ref_items, fill_value=None): # TODO: infer dtypes other than float64 from fill_value + if fill_value is None: + fill_value = np.nan block_shape = list(self.shape) block_shape[0] = len(items) @@ -2079,11 +2641,14 @@ def take(self, indexer, new_index=None, axis=1, verify=True): if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) + if isinstance(indexer, list): + indexer = np.array(indexer) + indexer = com._ensure_platform_int(indexer) n = len(self.axes[axis]) if verify: - indexer = _maybe_convert_indices(indexer, n) + indexer = _maybe_convert_indices(indexer, n) if ((indexer == -1) | (indexer >= n)).any(): raise Exception('Indices must be nonzero and less than ' @@ -2094,7 +2659,7 @@ def take(self, indexer, new_index=None, axis=1, verify=True): new_index = self.axes[axis].take(indexer) new_axes[axis] = new_index - return self.apply('take',axes=new_axes,indexer=indexer,ref_items=new_axes[0],axis=axis) + return self.apply('take', axes=new_axes, indexer=indexer, ref_items=new_axes[0], axis=axis) def merge(self, other, lsuffix=None, rsuffix=None): if not self._is_indexed_like(other): @@ -2107,8 +2672,7 @@ def merge(self, other, lsuffix=None, rsuffix=None): new_axes[0] = cons_items consolidated = _consolidate(this.blocks + other.blocks, cons_items) - - return BlockManager(consolidated, new_axes) + return self.__class__(consolidated, new_axes) def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True): to_rename = self.items.intersection(other.items) @@ -2149,7 +2713,8 @@ def rename_axis(self, mapper, axis=1): index = self.axes[axis] if isinstance(index, MultiIndex): - new_axis = MultiIndex.from_tuples([tuple(mapper(y) for y in x) for x in index], names=index.names) + new_axis = MultiIndex.from_tuples( + [tuple(mapper(y) for y in x) for x in index], names=index.names) else: new_axis = Index([mapper(x) for x in index], name=index.name) @@ -2158,7 +2723,7 @@ def rename_axis(self, mapper, axis=1): new_axes = list(self.axes) new_axes[axis] = new_axis - return BlockManager(self.blocks, new_axes) + return self.__class__(self.blocks, new_axes) def rename_items(self, mapper, copydata=True): if isinstance(self.items, MultiIndex): @@ -2166,7 +2731,7 @@ def rename_items(self, mapper, copydata=True): new_items = MultiIndex.from_tuples(items, names=self.items.names) else: items = [mapper(x) for x in self.items] - new_items = Index(items, names=self.items.names) + new_items = Index(items, name=self.items.name) new_blocks = [] for block in self.blocks: @@ -2175,7 +2740,7 @@ def rename_items(self, mapper, copydata=True): new_blocks.append(newb) new_axes = list(self.axes) new_axes[0] = new_items - return BlockManager(new_blocks, new_axes) + return self.__class__(new_blocks, new_axes) def add_prefix(self, prefix): f = (('%s' % prefix) + '%s').__mod__ @@ -2207,18 +2772,156 @@ def item_dtypes(self): mask = np.zeros(len(self.items), dtype=bool) for i, blk in enumerate(self.blocks): indexer = self.items.get_indexer(blk.items) - result.put(indexer, blk.values.dtype.name) + result.put(indexer, blk.dtype.name) mask.put(indexer, 1) if not (mask.all()): raise AssertionError('Some items were not in any block') return result +class SingleBlockManager(BlockManager): + + """ manage a single block with """ + ndim = 1 + _is_consolidated = True + _known_consolidated = True + __slots__ = ['axes', 'blocks', '_block', + '_values', '_shape', '_has_sparse'] + + def __init__(self, block, axis, do_integrity_check=False, fastpath=True): + + if isinstance(axis, list): + if len(axis) != 1: + raise ValueError( + "cannot create SingleBlockManager with more than 1 axis") + axis = axis[0] + + # passed from constructor, single block, single axis + if fastpath: + self.axes = [axis] + if isinstance(block, list): + if len(block) != 1: + raise ValueError( + "cannot create SingleBlockManager with more than 1 block") + block = block[0] + if not isinstance(block, Block): + block = make_block(block, axis, axis, ndim=1, fastpath=True) + + else: + + self.axes = [_ensure_index(axis)] + + # create the block here + if isinstance(block, list): + + # provide consolidation to the interleaved_dtype + if len(block) > 1: + dtype = _interleaved_dtype(block) + block = [b.astype(dtype) for b in block] + block = _consolidate(block, axis) + + if len(block) != 1: + raise ValueError( + "cannot create SingleBlockManager with more than 1 block") + block = block[0] + + if not isinstance(block, Block): + block = make_block(block, axis, axis, ndim=1, fastpath=True) + + self.blocks = [block] + self._block = self.blocks[0] + self._values = self._block.values + self._has_sparse = self._block.is_sparse + + def _post_setstate(self): + self._block = self.blocks[0] + self._values = self._block.values + + @property + def shape(self): + if getattr(self, '_shape', None) is None: + self._shape = tuple([len(self.axes[0])]) + return self._shape + + def reindex(self, new_axis, method=None, limit=None, copy=True): + + # if we are the same and don't copy, just return + if not copy and self.index.equals(new_axis): + return self + block = self._block.reindex_items_from(new_axis, copy=copy) + + if method is not None or limit is not None: + block = block.interpolate(method=method, limit=limit) + mgr = SingleBlockManager(block, new_axis) + mgr._consolidate_inplace() + return mgr + + def get_slice(self, slobj, raise_on_error=False): + if raise_on_error: + _check_slice_bounds(slobj, self.index) + return self.__class__(self._block._slice(slobj), self.index._getitem_slice(slobj), fastpath=True) + + def set_axis(self, axis, value): + cur_axis = self.axes[axis] + value = _ensure_index(value) + + if len(value) != len(cur_axis): + raise Exception('Length mismatch (%d vs %d)' + % (len(value), len(cur_axis))) + self.axes[axis] = value + self._shape = None + self._block.set_ref_items(self.items, maybe_rename=True) + + def set_ref_items(self, ref_items, maybe_rename=True): + """ we can optimize and our ref_locs are always equal to ref_items """ + if maybe_rename: + self.items = ref_items + self.ref_items = ref_items + + @property + def index(self): + return self.axes[0] + + def convert(self, *args, **kwargs): + """ convert the whole block as one """ + kwargs['by_item'] = False + return self.apply('convert', *args, **kwargs) + + @property + def dtype(self): + return self._block.dtype + + @property + def ftype(self): + return self._block.ftype + + @property + def values(self): + return self._values.view() + + @property + def itemsize(self): + return self._block.itemsize + + @property + def _can_hold_na(self): + return self._block._can_hold_na + + def is_consolidated(self): + return True + + def _consolidate_check(self): + pass + + def _consolidate_inplace(self): + pass + + def construction_error(tot_items, block_shape, axes): """ raise a helpful message about our construction """ raise ValueError("Shape of passed values is %s, indices imply %s" % ( - tuple(map(int, [tot_items] + list(block_shape))), - tuple(map(int, [len(ax) for ax in axes])))) + tuple(map(int, [tot_items] + list(block_shape))), + tuple(map(int, [len(ax) for ax in axes])))) def create_block_manager_from_blocks(blocks, axes): @@ -2227,16 +2930,18 @@ def create_block_manager_from_blocks(blocks, axes): # if we are passed values, make the blocks if len(blocks) == 1 and not isinstance(blocks[0], Block): placement = None if axes[0].is_unique else np.arange(len(axes[0])) - blocks = [ make_block(blocks[0], axes[0], axes[0], placement=placement) ] + blocks = [ + make_block(blocks[0], axes[0], axes[0], placement=placement)] mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() return mgr except (ValueError): - blocks = [ getattr(b,'values',b) for b in blocks ] + blocks = [getattr(b, 'values', b) for b in blocks] tot_items = sum(b.shape[0] for b in blocks) - construction_error(tot_items,blocks[0].shape[1:],axes) + construction_error(tot_items, blocks[0].shape[1:], axes) + def create_block_manager_from_arrays(arrays, names, axes): try: @@ -2245,14 +2950,15 @@ def create_block_manager_from_arrays(arrays, names, axes): mgr._consolidate_inplace() return mgr except (ValueError): - construction_error(len(arrays),arrays[0].shape[1:],axes) + construction_error(len(arrays), arrays[0].shape[1:], axes) -def maybe_create_block_in_items_map(im,block): + +def maybe_create_block_in_items_map(im, block): """ create/return the block in an items_map """ try: return im[block] except: - im[block] = l = [ None ] * len(block.items) + im[block] = l = [None] * len(block.items) return l @@ -2263,7 +2969,7 @@ def form_blocks(arrays, names, axes): if len(arrays) < len(items): nn = set(names) - extra_items = Index([ i for i in items if i not in nn ]) + extra_items = Index([i for i in items if i not in nn]) else: extra_items = [] @@ -2274,9 +2980,13 @@ def form_blocks(arrays, names, axes): int_items = [] bool_items = [] object_items = [] + sparse_items = [] datetime_items = [] + for i, (k, v) in enumerate(zip(names, arrays)): - if issubclass(v.dtype.type, np.floating): + if isinstance(v, (SparseArray, ABCSparseSeries)): + sparse_items.append((i, k, v)) + elif issubclass(v.dtype.type, np.floating): float_items.append((i, k, v)) elif issubclass(v.dtype.type, np.complexfloating): complex_items.append((i, k, v)) @@ -2307,7 +3017,8 @@ def form_blocks(arrays, names, axes): blocks.extend(float_blocks) if len(complex_items): - complex_blocks = _simple_blockify(complex_items, items, np.complex128, is_unique=is_unique) + complex_blocks = _simple_blockify( + complex_items, items, np.complex128, is_unique=is_unique) blocks.extend(complex_blocks) if len(int_items): @@ -2315,26 +3026,34 @@ def form_blocks(arrays, names, axes): blocks.extend(int_blocks) if len(datetime_items): - datetime_blocks = _simple_blockify(datetime_items, items, _NS_DTYPE, is_unique=is_unique) + datetime_blocks = _simple_blockify( + datetime_items, items, _NS_DTYPE, is_unique=is_unique) blocks.extend(datetime_blocks) if len(bool_items): - bool_blocks = _simple_blockify(bool_items, items, np.bool_, is_unique=is_unique) + bool_blocks = _simple_blockify( + bool_items, items, np.bool_, is_unique=is_unique) blocks.extend(bool_blocks) if len(object_items) > 0: - object_blocks = _simple_blockify(object_items, items, np.object_, is_unique=is_unique) + object_blocks = _simple_blockify( + object_items, items, np.object_, is_unique=is_unique) blocks.extend(object_blocks) + if len(sparse_items) > 0: + sparse_blocks = _sparse_blockify(sparse_items, items) + blocks.extend(sparse_blocks) + if len(extra_items): shape = (len(extra_items),) + tuple(len(x) for x in axes[1:]) # empty items -> dtype object block_values = np.empty(shape, dtype=object) - block_values.fill(nan) + block_values.fill(np.nan) placement = None if is_unique else np.arange(len(extra_items)) - na_block = make_block(block_values, extra_items, items, placement=placement) + na_block = make_block( + block_values, extra_items, items, placement=placement) blocks.append(na_block) return blocks @@ -2349,11 +3068,12 @@ def _simple_blockify(tuples, ref_items, dtype, is_unique=True): values = values.astype(dtype) if is_unique: - placement=None + placement = None block = make_block(values, block_items, ref_items, placement=placement) - return [ block ] + return [block] -def _multi_blockify(tuples, ref_items, dtype = None, is_unique=True): + +def _multi_blockify(tuples, ref_items, dtype=None, is_unique=True): """ return an array of blocks that potentially have different dtypes """ # group by dtype @@ -2362,28 +3082,45 @@ def _multi_blockify(tuples, ref_items, dtype = None, is_unique=True): new_blocks = [] for dtype, tup_block in grouper: - block_items, values, placement = _stack_arrays(list(tup_block), ref_items, dtype) + block_items, values, placement = _stack_arrays( + list(tup_block), ref_items, dtype) if is_unique: - placement=None + placement = None block = make_block(values, block_items, ref_items, placement=placement) new_blocks.append(block) return new_blocks + +def _sparse_blockify(tuples, ref_items, dtype=None): + """ return an array of blocks that potentially have different dtypes (and are sparse) """ + + new_blocks = [] + for i, names, array in tuples: + + if not isinstance(names, (list, tuple)): + names = [names] + items = ref_items[ref_items.isin(names)] + + array = _maybe_to_sparse(array) + block = make_block( + array, items, ref_items, klass=SparseBlock, fastpath=True) + new_blocks.append(block) + + return new_blocks + + def _stack_arrays(tuples, ref_items, dtype): - from pandas.core.series import Series # fml def _asarray_compat(x): - # asarray shouldn't be called on SparseSeries - if isinstance(x, Series): + if isinstance(x, ABCSeries): return x.values else: return np.asarray(x) def _shape_compat(x): - # sparseseries - if isinstance(x, Series): + if isinstance(x, ABCSeries): return len(x), else: return x.shape @@ -2401,7 +3138,7 @@ def _shape_compat(x): if ref_items.is_unique: items = ref_items[ref_items.isin(names)] else: - items = _ensure_index([ n for n in names if n in ref_items ]) + items = _ensure_index([n for n in names if n in ref_items]) if len(items) != len(stacked): raise Exception("invalid names passed _stack_arrays") @@ -2420,9 +3157,9 @@ def _blocks_to_series_dict(blocks, index=None): def _interleaved_dtype(blocks): - if not len(blocks): return None + if not len(blocks): + return None - from collections import defaultdict counts = defaultdict(lambda: []) for x in blocks: counts[type(x)].append(x) @@ -2441,6 +3178,7 @@ def _lcd_dtype(l): have_float = len(counts[FloatBlock]) > 0 have_complex = len(counts[ComplexBlock]) > 0 have_dt64 = len(counts[DatetimeBlock]) > 0 + have_sparse = len(counts[SparseBlock]) > 0 have_numeric = have_float or have_complex or have_int if (have_object or @@ -2454,7 +3192,7 @@ def _lcd_dtype(l): # if we are mixing unsigned and signed, then return # the next biggest int type (if we can) lcd = _lcd_dtype(counts[IntBlock]) - kinds = set([ i.dtype.kind for i in counts[IntBlock] ]) + kinds = set([i.dtype.kind for i in counts[IntBlock]]) if len(kinds) == 1: return lcd @@ -2463,7 +3201,7 @@ def _lcd_dtype(l): # return 1 bigger on the itemsize if unsinged if lcd.kind == 'u': - return np.dtype('int%s' % (lcd.itemsize*8*2)) + return np.dtype('int%s' % (lcd.itemsize * 8 * 2)) return lcd elif have_dt64 and not have_float and not have_complex: @@ -2471,48 +3209,57 @@ def _lcd_dtype(l): elif have_complex: return np.dtype('c16') else: - return _lcd_dtype(counts[FloatBlock]) + return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock]) + def _consolidate(blocks, items): """ - Merge blocks having same dtype + Merge blocks having same dtype, exclude non-consolidating blocks """ - get_dtype = lambda x: x.dtype.name - # sort by dtype - grouper = itertools.groupby(sorted(blocks, key=get_dtype), - lambda x: x.dtype) + # sort by _can_consolidate, dtype + gkey = lambda x: x._consolidate_key + grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks = [] - for dtype, group_blocks in grouper: - new_block = _merge_blocks(list(group_blocks), items, dtype) - new_blocks.append(new_block) + for (_can_consolidate, dtype), group_blocks in grouper: + merged_blocks = _merge_blocks( + list(group_blocks), items, dtype=dtype, _can_consolidate=_can_consolidate) + if isinstance(merged_blocks, list): + new_blocks.extend(merged_blocks) + else: + new_blocks.append(merged_blocks) return new_blocks -def _merge_blocks(blocks, items, dtype=None): +def _merge_blocks(blocks, items, dtype=None, _can_consolidate=True): if len(blocks) == 1: return blocks[0] - if dtype is None: - if len(set([ b.dtype for b in blocks ])) != 1: - raise AssertionError("_merge_blocks are invalid!") - dtype = blocks[0].dtype + if _can_consolidate: - new_values = _vstack([ b.values for b in blocks ], dtype) - new_items = blocks[0].items.append([b.items for b in blocks[1:]]) - new_block = make_block(new_values, new_items, items) + if dtype is None: + if len(set([b.dtype for b in blocks])) != 1: + raise AssertionError("_merge_blocks are invalid!") + dtype = blocks[0].dtype - # unique, can reindex - if items.is_unique: - return new_block.reindex_items_from(items) + new_values = _vstack([b.values for b in blocks], dtype) + new_items = blocks[0].items.append([b.items for b in blocks[1:]]) + new_block = make_block(new_values, new_items, items) - # merge the ref_locs - new_ref_locs = [ b._ref_locs for b in blocks ] - if all([ x is not None for x in new_ref_locs ]): - new_block.set_ref_locs(np.concatenate(new_ref_locs)) - return new_block + # unique, can reindex + if items.is_unique: + return new_block.reindex_items_from(items) + + # merge the ref_locs + new_ref_locs = [b._ref_locs for b in blocks] + if all([x is not None for x in new_ref_locs]): + new_block.set_ref_locs(np.concatenate(new_ref_locs)) + return new_block + + # no merge + return blocks def _block_shape(values, ndim=1, shape=None): @@ -2523,6 +3270,7 @@ def _block_shape(values, ndim=1, shape=None): values = values.reshape(tuple((1,) + shape)) return values + def _vstack(to_stack, dtype): # work around NumPy 1.6 bug @@ -2533,6 +3281,7 @@ def _vstack(to_stack, dtype): else: return np.vstack(to_stack) + def _possibly_convert_to_indexer(loc): if com._is_bool_indexer(loc): loc = [i for i, v in enumerate(loc) if v] diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 23cc4fe31eba1..3e247caae9c42 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -5,7 +5,7 @@ import numpy as np -from pandas.core.common import isnull, notnull +from pandas.core.common import isnull, notnull, _values_from_object import pandas.core.common as com import pandas.lib as lib import pandas.algos as algos @@ -131,6 +131,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, isfinite=F """ utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy """ + values = _values_from_object(values) if isfinite: mask = _isfinite(values) else: @@ -232,7 +233,7 @@ def get_median(x): mask = notnull(x) if not skipna and not mask.all(): return np.nan - return algos.median(x[mask]) + return algos.median(_values_from_object(x[mask])) if values.dtype != np.float64: values = values.astype('f8') diff --git a/pandas/core/panel.py b/pandas/core/panel.py index d89121b1309b4..bca6f985ac689 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -8,9 +8,8 @@ import operator import sys import numpy as np -from pandas.core.common import (PandasError, _mut_exclusive, - _try_sort, _default_index, - _infer_dtype_from_scalar, +from pandas.core.common import (PandasError, + _try_sort, _default_index, _infer_dtype_from_scalar, notnull) from pandas.core.categorical import Categorical from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -146,6 +145,7 @@ def f(self, other): class Panel(NDFrame): + """ Represents wide format panel data, stored as 3-dimensional array @@ -164,70 +164,11 @@ class Panel(NDFrame): Copy data from inputs. Only affects DataFrame / 2d ndarray input """ - _AXIS_ORDERS = ['items', 'major_axis', 'minor_axis'] - _AXIS_NUMBERS = dict((a, i) for i, a in enumerate(_AXIS_ORDERS)) - _AXIS_ALIASES = { - 'major': 'major_axis', - 'minor': 'minor_axis' - } - _AXIS_NAMES = dict(enumerate(_AXIS_ORDERS)) - _AXIS_SLICEMAP = { - 'major_axis': 'index', - 'minor_axis': 'columns' - } - _AXIS_LEN = len(_AXIS_ORDERS) - - # major - _default_stat_axis = 1 - - # info axis - _het_axis = 0 - _info_axis = _AXIS_ORDERS[_het_axis] - - items = lib.AxisProperty(0) - major_axis = lib.AxisProperty(1) - minor_axis = lib.AxisProperty(2) - - # return the type of the slice constructor - _constructor_sliced = DataFrame - - def _construct_axes_dict(self, axes=None, **kwargs): - """ Return an axes dictionary for myself """ - d = dict([(a, getattr(self, a)) for a in (axes or self._AXIS_ORDERS)]) - d.update(kwargs) - return d - - @staticmethod - def _construct_axes_dict_from(self, axes, **kwargs): - """ Return an axes dictionary for the passed axes """ - d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)]) - d.update(kwargs) - return d - - def _construct_axes_dict_for_slice(self, axes=None, **kwargs): - """ Return an axes dictionary for myself """ - d = dict([(self._AXIS_SLICEMAP[a], getattr(self, a)) - for a in (axes or self._AXIS_ORDERS)]) - d.update(kwargs) - return d - - __add__ = _arith_method(operator.add, '__add__') - __sub__ = _arith_method(operator.sub, '__sub__') - __truediv__ = _arith_method(operator.truediv, '__truediv__') - __floordiv__ = _arith_method(operator.floordiv, '__floordiv__') - __mul__ = _arith_method(operator.mul, '__mul__') - __pow__ = _arith_method(operator.pow, '__pow__') - - __radd__ = _arith_method(operator.add, '__radd__') - __rmul__ = _arith_method(operator.mul, '__rmul__') - __rsub__ = _arith_method(lambda x, y: y - x, '__rsub__') - __rtruediv__ = _arith_method(lambda x, y: y / x, '__rtruediv__') - __rfloordiv__ = _arith_method(lambda x, y: y // x, '__rfloordiv__') - __rpow__ = _arith_method(lambda x, y: y ** x, '__rpow__') + @property + def _constructor(self): + return type(self) - if not compat.PY3: - __div__ = _arith_method(operator.div, '__div__') - __rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__') + _constructor_sliced = DataFrame def __init__(self, data=None, items=None, major_axis=None, minor_axis=None, copy=False, dtype=None): @@ -263,17 +204,8 @@ def _init_data(self, data, copy, dtype, **kwargs): NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype) - @classmethod - def _from_axes(cls, data, axes): - # for construction from BlockManager - if isinstance(data, BlockManager): - return cls(data) - else: - d = cls._construct_axes_dict_from(cls, axes, copy=False) - return cls(data, **d) - def _init_dict(self, data, axes, dtype=None): - haxis = axes.pop(self._het_axis) + haxis = axes.pop(self._info_axis_number) # prefilter if haxis passed if haxis is not None: @@ -282,7 +214,7 @@ def _init_dict(self, data, axes, dtype=None): in compat.iteritems(data) if k in haxis) else: ks = list(data.keys()) - if not isinstance(data,OrderedDict): + if not isinstance(data, OrderedDict): ks = _try_sort(ks) haxis = Index(ks) @@ -317,10 +249,6 @@ def _init_dict(self, data, axes, dtype=None): def _init_arrays(self, arrays, arr_names, axes): return create_block_manager_from_arrays(arrays, arr_names, axes) - @property - def shape(self): - return tuple([len(getattr(self, a)) for a in self._AXIS_ORDERS]) - @classmethod def from_dict(cls, data, intersect=False, orient='items', dtype=None): """ @@ -344,7 +272,6 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): ------- Panel """ - orient = orient.lower() if orient == 'minor': new_data = OrderedDefaultdict(dict) @@ -357,18 +284,37 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype) ks = list(d['data'].keys()) - if not isinstance(d['data'],OrderedDict): + if not isinstance(d['data'], OrderedDict): ks = list(sorted(ks)) - d[cls._info_axis] = Index(ks) + d[cls._info_axis_name] = Index(ks) return cls(**d) + # Comparison methods + __add__ = _arith_method(operator.add, '__add__') + __sub__ = _arith_method(operator.sub, '__sub__') + __truediv__ = _arith_method(operator.truediv, '__truediv__') + __floordiv__ = _arith_method(operator.floordiv, '__floordiv__') + __mul__ = _arith_method(operator.mul, '__mul__') + __pow__ = _arith_method(operator.pow, '__pow__') + + __radd__ = _arith_method(operator.add, '__radd__') + __rmul__ = _arith_method(operator.mul, '__rmul__') + __rsub__ = _arith_method(lambda x, y: y - x, '__rsub__') + __rtruediv__ = _arith_method(lambda x, y: y / x, '__rtruediv__') + __rfloordiv__ = _arith_method(lambda x, y: y // x, '__rfloordiv__') + __rpow__ = _arith_method(lambda x, y: y ** x, '__rpow__') + + if not compat.PY3: + __div__ = _arith_method(operator.div, '__div__') + __rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__') + def __getitem__(self, key): - if isinstance(getattr(self, self._info_axis), MultiIndex): + if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) return super(Panel, self).__getitem__(key) def _getitem_multilevel(self, key): - info = getattr(self, self._info_axis) + info = self._info_axis loc = info.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_index = info[loc] @@ -378,7 +324,7 @@ def _getitem_multilevel(self, key): new_values = self.values[slices] d = self._construct_axes_dict(self._AXIS_ORDERS[1:]) - d[self._info_axis] = result_index + d[self._info_axis_name] = result_index result = self._constructor(new_values, **d) return result else: @@ -402,32 +348,18 @@ def _init_matrix(self, data, axes, dtype=None, copy=False): ax = _ensure_index(ax) fixed_axes.append(ax) - return create_block_manager_from_blocks([ values ], fixed_axes) - - #---------------------------------------------------------------------- - # Array interface - - def __array__(self, dtype=None): - return self.values - - def __array_wrap__(self, result): - d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) - return self._constructor(result, **d) + return create_block_manager_from_blocks([values], fixed_axes) #---------------------------------------------------------------------- # Comparison methods - def _indexed_same(self, other): - return all([getattr(self, a).equals(getattr(other, a)) - for a in self._AXIS_ORDERS]) - def _compare_constructor(self, other, func): if not self._indexed_same(other): raise Exception('Can only compare identically-labeled ' 'same type objects') new_data = {} - for col in getattr(self, self._info_axis): + for col in self._info_axis: new_data[col] = func(self[col], other[col]) d = self._construct_axes_dict(copy=False) @@ -438,12 +370,6 @@ def _compare_constructor(self, other, func): __or__ = _arith_method(operator.or_, '__or__') __xor__ = _arith_method(operator.xor, '__xor__') - def __neg__(self): - return -1 * self - - def __invert__(self): - return -1 * self - # Comparison methods __eq__ = _comp_method(operator.eq, '__eq__') __ne__ = _comp_method(operator.ne, '__ne__') @@ -489,13 +415,6 @@ def axis_pretty(a): [class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS]) return output - def __iter__(self): - return iter(getattr(self, self._info_axis)) - - def iteritems(self): - for h in getattr(self, self._info_axis): - yield h, self[h] - def _get_plane_axes(self, axis): """ Get my plane axes: these are already @@ -516,10 +435,6 @@ def _get_plane_axes(self, axis): return index, columns - def _wrap_array(self, arr, axes, copy=False): - d = self._construct_axes_dict_from(self, axes, copy=copy) - return self._constructor(arr, **d) - fromDict = from_dict def to_sparse(self, fill_value=None, kind='block'): @@ -561,16 +476,10 @@ def to_excel(self, path, na_rep=''): df.to_excel(writer, name, na_rep=na_rep) writer.save() - # TODO: needed? - def keys(self): - return list(getattr(self, self._info_axis)) - - def _get_values(self): + def as_matrix(self): self._consolidate_inplace() return self._data.as_matrix() - values = property(fget=_get_values) - #---------------------------------------------------------------------- # Getting and setting elements @@ -625,10 +534,10 @@ def set_value(self, *args): axes = self._expand_axes(args) d = self._construct_axes_dict_from(self, axes, copy=False) result = self.reindex(**d) - args = list(args) + args = list(args) likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1]) made_bigger = not np.array_equal( - axes[0], getattr(self, self._info_axis)) + axes[0], self._info_axis) # how to make this logic simpler? if made_bigger: com._possibly_cast_item(result, args[0], likely_dtype) @@ -639,14 +548,6 @@ def _box_item_values(self, key, values): d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]) return self._constructor_sliced(values, **d) - def __getattr__(self, name): - """After regular attribute access, try looking up the name of an item. - This allows simpler access to items for interactive use.""" - if name in getattr(self, self._info_axis): - return self[name] - raise AttributeError("'%s' object has no attribute '%s'" % - (type(self).__name__, name)) - def _slice(self, slobj, axis=0, raise_on_error=False): new_data = self._data.get_slice(slobj, axis=axis, @@ -673,35 +574,6 @@ def __setitem__(self, key, value): mat = mat.reshape(tuple([1]) + shape[1:]) NDFrame._set_item(self, key, mat) - def pop(self, item): - """ - Return item slice from panel and delete from panel - - Parameters - ---------- - key : object - Must be contained in panel's items - - Returns - ------- - y : DataFrame - """ - return NDFrame.pop(self, item) - - def __getstate__(self): - "Returned pickled representation of the panel" - return self._data - - def __setstate__(self, state): - # old Panel pickle - if isinstance(state, BlockManager): - self._data = state - elif len(state) == 4: # pragma: no cover - self._unpickle_panel_compat(state) - else: # pragma: no cover - raise ValueError('unrecognized pickle') - self._item_cache = {} - def _unpickle_panel_compat(self, state): # pragma: no cover "Unpickle the panel" _unpickle = com._unpickle_array @@ -734,62 +606,15 @@ def conform(self, frame, axis='items'): axes = self._get_plane_axes(axis) return frame.reindex(**self._extract_axes_for_slice(self, axes)) - def reindex(self, major=None, minor=None, method=None, - major_axis=None, minor_axis=None, copy=True, **kwargs): - """ - Conform panel to new axis or axes - - Parameters - ---------- - major : Index or sequence, default None - Can also use 'major_axis' keyword - items : Index or sequence, default None - minor : Index or sequence, default None - Can also use 'minor_axis' keyword - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed Series - - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - copy : boolean, default True - Return a new object, even if the passed indexes are the same - - Returns - ------- - Panel (new object) - """ - result = self - - major = _mut_exclusive(major, major_axis) - minor = _mut_exclusive(minor, minor_axis) - al = self._AXIS_LEN - + def _needs_reindex_multi(self, axes, method, level): # only allowing multi-index on Panel (and not > dims) - if (method is None and not self._is_mixed_type and al <= 3): - items = kwargs.get('items') - if com._count_not_none(items, major, minor) == 3: - try: - return self._reindex_multi(items, major, minor) - except: - pass - - if major is not None: - result = result._reindex_axis(major, method, al - 2, copy) - - if minor is not None: - result = result._reindex_axis(minor, method, al - 1, copy) - - for i, a in enumerate(self._AXIS_ORDERS[0:al - 2]): - a = kwargs.get(a) - if a is not None: - result = result._reindex_axis(a, method, i, copy) - - if result is self and copy: - raise ValueError('Must specify at least one axis') + return method is None and not self._is_mixed_type and self._AXIS_LEN <= 3 and com._count_not_none(*axes.values()) == 3 - return result - - def _reindex_multi(self, items, major, minor): + def _reindex_multi(self, axes, copy, fill_value): + """ we are guaranteed non-Nones in the axes! """ + items = axes['items'] + major = axes['major_axis'] + minor = axes['minor_axis'] a0, a1, a2 = len(items), len(major), len(minor) values = self.values @@ -815,52 +640,6 @@ def _reindex_multi(self, items, major, minor): return Panel(new_values, items=new_items, major_axis=new_major, minor_axis=new_minor) - def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True): - """Conform Panel to new index with optional filling logic, placing - NA/NaN in locations having no value in the previous index. A new object - is produced unless the new index is equivalent to the current one and - copy=False - - Parameters - ---------- - index : array-like, optional - New labels / index to conform to. Preferably an Index object to - avoid duplicating data - axis : {0, 1} - 0 -> index (rows) - 1 -> columns - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed DataFrame - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - copy : boolean, default True - Return a new object, even if the passed indexes are the same - level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - - Returns - ------- - reindexed : Panel - """ - self._consolidate_inplace() - return self._reindex_axis(labels, method, axis, copy) - - def reindex_like(self, other, method=None): - """ return an object with matching indicies to myself - - Parameters - ---------- - other : Panel - method : string or None - - Returns - ------- - reindexed : Panel - """ - d = other._construct_axes_dict(method=method) - return self.reindex(**d) - def dropna(self, axis=0, how='any'): """ Drop 2D from panel, holding passed axis constant @@ -883,7 +662,7 @@ def dropna(self, axis=0, how='any'): values = self.values mask = com.notnull(values) - for ax in reversed(sorted(set(range(3)) - set([axis]))): + for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))): mask = mask.sum(ax) per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:]) @@ -1059,7 +838,7 @@ def _ixs(self, i, axis=0): # xs cannot handle a non-scalar key, so just reindex here if _is_list_like(key): - return self.reindex(**{ self._get_axis_name(axis) : key }) + return self.reindex(**{self._get_axis_name(axis): key}) return self.xs(key, axis=axis) @@ -1081,95 +860,6 @@ def groupby(self, function, axis='major'): axis = self._get_axis_number(axis) return PanelGroupBy(self, function, axis=axis) - def swapaxes(self, axis1='major', axis2='minor', copy=True): - """ - Interchange axes and swap values axes appropriately - - Returns - ------- - y : Panel (new object) - """ - i = self._get_axis_number(axis1) - j = self._get_axis_number(axis2) - - if i == j: - raise ValueError('Cannot specify the same axis') - - mapping = {i: j, j: i} - - new_axes = (self._get_axis(mapping.get(k, k)) - for k in range(self._AXIS_LEN)) - new_values = self.values.swapaxes(i, j) - if copy: - new_values = new_values.copy() - - return self._constructor(new_values, *new_axes) - - def transpose(self, *args, **kwargs): - """ - Permute the dimensions of the Panel - - Parameters - ---------- - items : int or one of {'items', 'major', 'minor'} - major : int or one of {'items', 'major', 'minor'} - minor : int or one of {'items', 'major', 'minor'} - copy : boolean, default False - Make a copy of the underlying data. Mixed-dtype data will - always result in a copy - - Examples - -------- - >>> p.transpose(2, 0, 1) - >>> p.transpose(2, 0, 1, copy=True) - - Returns - ------- - y : Panel (new object) - """ - # construct the args - args = list(args) - aliases = tuple(compat.iterkeys(kwargs)) - - for a in self._AXIS_ORDERS: - if not a in kwargs: - where = lmap(a.startswith, aliases) - - if any(where): - if sum(where) != 1: - raise AssertionError( - 'Ambiguous parameter aliases "{0}" passed, valid ' - 'parameter aliases are ' - '{1}'.format([n for n, m in zip(aliases, where) - if m], self._AXIS_ALIASES)) - - k = aliases[where.index(True)] - - try: - kwargs[self._AXIS_ALIASES[k]] = kwargs.pop(k) - except KeyError: - raise KeyError('Invalid parameter alias ' - '"{0}"'.format(k)) - else: - try: - kwargs[a] = args.pop(0) - except IndexError: - raise ValueError( - "not enough arguments specified to transpose!") - - axes = [self._get_axis_number(kwargs[a]) for a in self._AXIS_ORDERS] - - # we must have unique axes - if len(axes) != len(set(axes)): - raise ValueError('Must specify %s unique axes' % self._AXIS_LEN) - - new_axes = self._construct_axes_dict_from( - self, [self._get_axis(x) for x in axes]) - new_values = self.values.transpose(tuple(axes)) - if kwargs.get('copy') or (len(args) and args[-1]): - new_values = new_values.copy() - return self._constructor(new_values, **new_axes) - def to_frame(self, filter_observations=True): """ Transform wide format into long (stacked) format as DataFrame @@ -1217,21 +907,6 @@ def to_frame(self, filter_observations=True): to_long = deprecate('to_long', to_frame) toLong = deprecate('toLong', to_frame) - def filter(self, items): - """ - Restrict items in panel to input list - - Parameters - ---------- - items : sequence - - Returns - ------- - y : Panel - """ - intersection = self.items.intersection(items) - return self.reindex(items=intersection) - def apply(self, func, axis='major'): """ Apply @@ -1260,7 +935,7 @@ def _reduce(self, op, axis=0, skipna=True): result = f(self.values) axes = self._get_plane_axes(axis_name) - if result.ndim == 2 and axis_name != self._info_axis: + if result.ndim == 2 and axis_name != self._info_axis_name: result = result.T return self._construct_return_type(result, axes) @@ -1286,7 +961,7 @@ def _construct_return_type(self, result, axes=None, **kwargs): def _wrap_result(self, result, axis): axis = self._get_axis_name(axis) axes = self._get_plane_axes(axis) - if result.ndim == 2 and axis != self._info_axis: + if result.ndim == 2 and axis != self._info_axis_name: result = result.T return self._construct_return_type(result, axes) @@ -1449,9 +1124,9 @@ def update(self, other, join='left', overwrite=True, filter_func=None, if not isinstance(other, self._constructor): other = self._constructor(other) - axis = self._info_axis - axis_values = getattr(self, axis) - other = other.reindex(**{axis: axis_values}) + axis_name = self._info_axis_name + axis_values = self._info_axis + other = other.reindex(**{axis_name: axis_values}) for frame in axis_values: self[frame].update(other[frame], join, overwrite, filter_func, @@ -1514,7 +1189,8 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None): """ result = dict() - if isinstance(frames,OrderedDict): # caller differs dict/ODict, presered type + # caller differs dict/ODict, presered type + if isinstance(frames, OrderedDict): result = OrderedDict() adj_frames = OrderedDict() @@ -1623,7 +1299,7 @@ def f(self, other, axis=0): Parameters ---------- axis : {""" + ', '.join(cls._AXIS_ORDERS) + "} or {" \ -+ ', '.join([str(i) for i in range(cls._AXIS_LEN)]) + """} + + ', '.join([str(i) for i in range(cls._AXIS_LEN)]) + """} skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA @@ -1697,16 +1373,19 @@ def min(self, axis='major', skipna=True): return self._reduce(nanops.nanmin, axis=axis, skipna=skipna) cls.min = min +Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], + info_axis=0, + stat_axis=1, + aliases={'major': 'major_axis', + 'minor': 'minor_axis'}, + slicers={'major_axis': 'index', + 'minor_axis': 'columns'}) Panel._add_aggregate_operations() WidePanel = Panel LongPanel = DataFrame -def _monotonic(arr): - return not (arr[1:] < arr[:-1]).any() - - def install_ipython_completers(): # pragma: no cover """Register the Panel type with IPython's tab completion machinery, so that it knows about accessing column names as attributes.""" diff --git a/pandas/core/panel4d.py b/pandas/core/panel4d.py index 4113832f086fb..5679506cc6bb8 100644 --- a/pandas/core/panel4d.py +++ b/pandas/core/panel4d.py @@ -5,12 +5,12 @@ Panel4D = create_nd_panel_factory( klass_name='Panel4D', - axis_orders=['labels', 'items', 'major_axis', 'minor_axis'], - axis_slices={'labels': 'labels', 'items': 'items', + orders =['labels', 'items', 'major_axis', 'minor_axis'], + slices ={'labels': 'labels', 'items': 'items', 'major_axis': 'major_axis', 'minor_axis': 'minor_axis'}, slicer=Panel, - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, + aliases ={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2, ns=dict(__doc__= """ Represents a 4 dimensonal structured @@ -33,7 +33,6 @@ ) ) - def panel4d_init(self, data=None, labels=None, items=None, major_axis=None, minor_axis=None, copy=False, dtype=None): diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py index f43ec2c31ba96..8f427568a4102 100644 --- a/pandas/core/panelnd.py +++ b/pandas/core/panelnd.py @@ -5,20 +5,21 @@ import pandas.compat as compat -def create_nd_panel_factory(klass_name, axis_orders, axis_slices, slicer, axis_aliases=None, stat_axis=2,ns=None): + +def create_nd_panel_factory(klass_name, orders, slices, slicer, aliases=None, stat_axis=2, info_axis=0, ns=None): """ manufacture a n-d class: parameters ---------- - klass_name : the klass name - axis_orders : the names of the axes in order (highest to lowest) - axis_slices : a dictionary that defines how the axes map to the sliced axis - slicer : the class representing a slice of this panel - axis_aliases: a dictionary defining aliases for various axes + klass_name : the klass name + orders : the names of the axes in order (highest to lowest) + slices : a dictionary that defines how the axes map to the sliced axis + slicer : the class representing a slice of this panel + aliases : a dictionary defining aliases for various axes default = { major : major_axis, minor : minor_axis } - stat_axis : the default statistic axis + stat_axis : the default statistic axis default = 2 - het_axis : the info axis + info_axis : the info axis returns @@ -40,23 +41,15 @@ def create_nd_panel_factory(klass_name, axis_orders, axis_slices, slicer, axis_a ns = {} if not ns else ns klass = type(klass_name, (slicer,), ns) - # add the class variables - klass._AXIS_ORDERS = axis_orders - klass._AXIS_NUMBERS = dict([(a, i) for i, a in enumerate(axis_orders)]) - klass._AXIS_ALIASES = axis_aliases or dict() - klass._AXIS_NAMES = dict([(i, a) for i, a in enumerate(axis_orders)]) - klass._AXIS_SLICEMAP = axis_slices - klass._AXIS_LEN = len(axis_orders) - klass._default_stat_axis = stat_axis - klass._het_axis = 0 - klass._info_axis = axis_orders[klass._het_axis] + # setup the axes + klass._setup_axes(axes = orders, + info_axis = info_axis, + stat_axis = stat_axis, + aliases = aliases, + slicers = slices) klass._constructor_sliced = slicer - # add the axes - for i, a in enumerate(axis_orders): - setattr(klass, a, lib.AxisProperty(i)) - #### define the methods #### def __init__(self, *args, **kwargs): if not (kwargs.get('data') or len(args)): diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 4596b93d79778..0ac45e52d64fc 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -343,7 +343,7 @@ def pivot(self, index=None, columns=None, values=None): return indexed.unstack(columns) else: indexed = Series(self[values].values, - index=[self[index], self[columns]]) + index=MultiIndex.from_arrays([self[index], self[columns]])) return indexed.unstack(columns) diff --git a/pandas/core/series.py b/pandas/core/series.py index d35e251a2bde2..4f9c1e430d154 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -17,15 +17,20 @@ from pandas.core.common import (isnull, notnull, _is_bool_indexer, _default_index, _maybe_promote, _maybe_upcast, _asarray_tuplesafe, is_integer_dtype, - _infer_dtype_from_scalar, is_list_like, - _NS_DTYPE, _TD_DTYPE) + _NS_DTYPE, _TD_DTYPE, + _infer_dtype_from_scalar, is_list_like, _values_from_object, + ABCSparseArray) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, _ensure_index, _handle_legacy_indexes) -from pandas.core.indexing import (_SeriesIndexer, _check_bool_indexer, - _check_slice_bounds, _maybe_convert_indices) -from pandas.tseries.offsets import DateOffset +from pandas.core.indexing import ( + _SeriesIndexer, _check_bool_indexer, _check_slice_bounds, + _is_index_slice, _maybe_convert_indices) +from pandas.core import generic +from pandas.core.internals import SingleBlockManager +import pandas.core.expressions as expressions from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex, Period +from pandas.tseries.offsets import DateOffset from pandas import compat from pandas.util.terminal import get_terminal_size from pandas.compat import zip, lzip, u, OrderedDict @@ -35,7 +40,6 @@ import pandas.core.common as com import pandas.core.datetools as datetools import pandas.core.format as fmt -import pandas.core.generic as generic import pandas.core.nanops as nanops from pandas.util.decorators import Appender, Substitution, cache_readonly @@ -46,7 +50,7 @@ from pandas.compat.scipy import scoreatpercentile as _quantile from pandas.core.config import get_option -__all__ = ['Series', 'TimeSeries'] +__all__ = ['Series'] _np_version = np.version.short_version _np_version_under1p6 = LooseVersion(_np_version) < '1.6' @@ -67,18 +71,18 @@ def na_op(x, y): try: result = op(x, y) - result = com._fill_zeros(result,y,fill_zeros) + result = com._fill_zeros(result, y, fill_zeros) except TypeError: result = pa.empty(len(x), dtype=x.dtype) - if isinstance(y, pa.Array): + if isinstance(y, (pa.Array, Series)): mask = notnull(x) & notnull(y) result[mask] = op(x[mask], y[mask]) else: mask = notnull(x) result[mask] = op(x[mask], y) - result, changed = com._maybe_upcast_putmask(result,-mask,pa.NA) + result, changed = com._maybe_upcast_putmask(result, -mask, pa.NA) return result @@ -105,7 +109,7 @@ def convert_to_array(values): inferred_type = lib.infer_dtype(values) if inferred_type in set(['datetime64','datetime','date','time']): # a datetlike - if not (isinstance(values, pa.Array) and com.is_datetime64_dtype(values)): + if not (isinstance(values, (pa.Array, Series)) and com.is_datetime64_dtype(values)): values = tslib.array_to_datetime(values) elif inferred_type in set(['timedelta']): # have a timedelta, convert to to ns here @@ -226,8 +230,8 @@ def f(x): if self.index.equals(other.index): name = _maybe_match_name(self, other) - return Series(wrap_results(na_op(lvalues, rvalues)), - index=self.index, name=name, dtype=dtype) + return self._constructor(wrap_results(na_op(lvalues, rvalues)), + index=self.index, dtype=dtype, name=name) join_idx, lidx, ridx = self.index.join(other.index, how='outer', return_indexers=True) @@ -241,19 +245,19 @@ def f(x): arr = na_op(lvalues, rvalues) name = _maybe_match_name(self, other) - return Series(wrap_results(arr), index=join_idx, name=name,dtype=dtype) + return self._constructor(wrap_results(arr), index=join_idx, name=name, dtype=dtype) elif isinstance(other, DataFrame): return NotImplemented else: # scalars - if hasattr(lvalues,'values'): + if hasattr(lvalues, 'values'): lvalues = lvalues.values - return Series(wrap_results(na_op(lvalues, rvalues)), - index=self.index, name=self.name, dtype=dtype) + return self._constructor(wrap_results(na_op(lvalues, rvalues)), + index=self.index, name=self.name, dtype=dtype) return wrapper -def _comp_method(op, name): +def _comp_method(op, name, masker=False): """ Wrapper function for Series arithmetic operations, to avoid code duplication. @@ -263,7 +267,7 @@ def na_op(x, y): if isinstance(y, list): y = lib.list_to_object_array(y) - if isinstance(y, pa.Array): + if isinstance(y, (pa.Array, Series)): if y.dtype != np.object_: result = lib.vec_compare(x, y.astype(np.object_), op) else: @@ -282,16 +286,19 @@ def wrapper(self, other): name = _maybe_match_name(self, other) if len(self) != len(other): raise ValueError('Series lengths must match to compare') - return Series(na_op(self.values, other.values), - index=self.index, name=name) + return self._constructor(na_op(self.values, other.values), + index=self.index, name=name) elif isinstance(other, DataFrame): # pragma: no cover return NotImplemented - elif isinstance(other, pa.Array): + elif isinstance(other, (pa.Array, Series)): if len(self) != len(other): raise ValueError('Lengths must match to compare') - return Series(na_op(self.values, np.asarray(other)), - index=self.index, name=self.name) + return self._constructor(na_op(self.values, np.asarray(other)), + index=self.index, name=self.name) else: + + mask = isnull(self) + values = self.values other = _index.convert_scalar(values, other) @@ -303,8 +310,17 @@ def wrapper(self, other): if np.isscalar(res): raise TypeError('Could not compare %s type with Series' % type(other)) - return Series(na_op(values, other), - index=self.index, name=self.name) + + # always return a full value series here + res = _values_from_object(res) + + res = Series(res, index=self.index, name=self.name, dtype='bool') + + # mask out the invalids + if mask.any(): + res[mask.values] = masker + + return res return wrapper @@ -320,7 +336,7 @@ def na_op(x, y): if isinstance(y, list): y = lib.list_to_object_array(y) - if isinstance(y, pa.Array): + if isinstance(y, (pa.Array, Series)): if (x.dtype == np.bool_ and y.dtype == np.bool_): # pragma: no cover result = op(x, y) # when would this be hit? @@ -338,14 +354,14 @@ def wrapper(self, other): if isinstance(other, Series): name = _maybe_match_name(self, other) - return Series(na_op(self.values, other.values), - index=self.index, name=name) + return self._constructor(na_op(self.values, other.values), + index=self.index, name=name) elif isinstance(other, DataFrame): return NotImplemented else: # scalars - return Series(na_op(self.values, other), - index=self.index, name=self.name) + return self._constructor(na_op(self.values, other), + index=self.index, name=self.name) return wrapper @@ -366,6 +382,17 @@ def _radd_compat(left, right): return output +def _coerce_method(converter): + """ install the scalar coercion methods """ + + def wrapper(self): + if len(self) == 1: + return converter(self.iloc[0]) + raise TypeError( + "cannot convert the series to {0}".format(str(converter))) + return wrapper + + def _maybe_match_name(a, b): name = None if a.name == b.name: @@ -397,14 +424,14 @@ def _flex_method(op, name): def f(self, other, level=None, fill_value=None): if isinstance(other, Series): return self._binop(other, op, level=level, fill_value=fill_value) - elif isinstance(other, (pa.Array, list, tuple)): + elif isinstance(other, (pa.Array, Series, list, tuple)): if len(other) != len(self): raise ValueError('Lengths must be equal') - return self._binop(Series(other, self.index), op, + return self._binop(self._constructor(other, self.index), op, level=level, fill_value=fill_value) else: - return Series(op(self.values, other), self.index, - name=self.name) + return self._constructor(op(self.values, other), self.index, + name=self.name) f.__name__ = name return f @@ -413,8 +440,8 @@ def f(self, other, level=None, fill_value=None): def _unbox(func): @Appender(func.__doc__) def f(self, *args, **kwargs): - result = func(self, *args, **kwargs) - if isinstance(result, pa.Array) and result.ndim == 0: + result = func(self.values, *args, **kwargs) + if isinstance(result, (pa.Array, Series)) and result.ndim == 0: # return NumPy type return result.dtype.type(result.item()) else: # pragma: no cover @@ -452,14 +479,16 @@ def _make_stat_func(nanop, name, shortname, na_action=_doc_exclude_na, def f(self, axis=0, dtype=None, out=None, skipna=True, level=None): if level is not None: return self._agg_by_level(shortname, level=level, skipna=skipna) - return nanop(self.values, skipna=skipna) + return nanop(_values_from_object(self), skipna=skipna) f.__name__ = shortname return f #---------------------------------------------------------------------- # Series class -class Series(generic.PandasContainer, pa.Array): + +class Series(generic.NDFrame): + """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be any hashable type. The object @@ -486,160 +515,307 @@ class Series(generic.PandasContainer, pa.Array): If None, dtype will be inferred copy : boolean, default False, copy input data """ - _AXIS_NUMBERS = { - 'index': 0 - } + _prop_attributes = ['name'] - _AXIS_NAMES = dict((v, k) for k, v in compat.iteritems(_AXIS_NUMBERS)) + def __init__(self, data=None, index=None, dtype=None, name=None, + copy=False, fastpath=False): - def __new__(cls, data=None, index=None, dtype=None, name=None, - copy=False): - if data is None: - data = {} + # we are called internally, so short-circuit + if fastpath: - if isinstance(data, MultiIndex): - raise NotImplementedError + # data is an ndarray, index is defined + if not isinstance(data, SingleBlockManager): + data = SingleBlockManager(data, index, fastpath=True) + if copy: + data = data.copy() + if index is None: + index = data.index - if index is not None: - index = _ensure_index(index) + else: - if isinstance(data, Series): - if name is None: - name = data.name + if index is not None: + index = _ensure_index(index) - if index is None: - index = data.index - else: - data = data.reindex(index).values - elif isinstance(data, dict): - if index is None: - if isinstance(data, OrderedDict): - index = Index(data) + if data is None: + data = {} + + if isinstance(data, MultiIndex): + raise NotImplementedError + elif isinstance(data, pa.Array): + pass + elif isinstance(data, Series): + if name is None: + name = data.name + if index is None: + index = data.index else: - index = Index(sorted(data)) - try: - if isinstance(index, DatetimeIndex): - # coerce back to datetime objects for lookup - data = lib.fast_multiget(data, index.astype('O'), - default=pa.NA) - elif isinstance(index, PeriodIndex): + data = data.reindex(index, copy=copy) + data = data._data + elif isinstance(data, dict): + if index is None: + if isinstance(data, OrderedDict): + index = Index(data) + else: + index = Index(sorted(data)) + try: + if isinstance(index, DatetimeIndex): + # coerce back to datetime objects for lookup + data = lib.fast_multiget(data, index.astype('O'), + default=pa.NA) + elif isinstance(index, PeriodIndex): + data = [data.get(i, nan) for i in index] + else: + data = lib.fast_multiget(data, index.values, + default=pa.NA) + except TypeError: data = [data.get(i, nan) for i in index] + + elif isinstance(data, SingleBlockManager): + if index is None: + index = data.index else: - data = lib.fast_multiget(data, index.values, - default=pa.NA) - except TypeError: - data = [data.get(i, nan) for i in index] - elif isinstance(data, types.GeneratorType): - data = list(data) - elif isinstance(data, (set, frozenset)): - raise TypeError("{0!r} type is unordered" - "".format(data.__class__.__name__)) + data = data.reindex(index, copy=copy) + elif isinstance(data, types.GeneratorType): + data = list(data) + elif isinstance(data, (set, frozenset)): + raise TypeError("{0!r} type is unordered" + "".format(data.__class__.__name__)) + else: - if dtype is not None: - dtype = np.dtype(dtype) + # handle sparse passed here (and force conversion) + if isinstance(data, ABCSparseArray): + data = data.to_dense() + + if index is None: + if not is_list_like(data): + data = [data] + index = _default_index(len(data)) + + # create/copy the manager + if isinstance(data, SingleBlockManager): + if dtype is not None: + data = data.astype(dtype, raise_on_error=False) + elif copy: + data = data.copy() + else: + data = _sanitize_array(data, index, dtype, copy, + raise_cast_failure=True) - subarr = _sanitize_array(data, index, dtype, copy, - raise_cast_failure=True) + data = SingleBlockManager(data, index, fastpath=True) - if not isinstance(subarr, pa.Array): - return subarr + generic.NDFrame.__init__(self, data, fastpath=True) - if index is None: - index = _default_index(len(subarr)) + object.__setattr__(self, 'name', name) + self._set_axis(0, index, fastpath=True) + + @classmethod + def from_array(cls, arr, index=None, name=None, copy=False, fastpath=False): + + # return a sparse series here + if isinstance(arr, ABCSparseArray): + from pandas.sparse.series import SparseSeries + cls = SparseSeries + + return cls(arr, index=index, name=name, copy=copy, fastpath=fastpath) + + @property + def _constructor(self): + return Series + + # types + @property + def _can_hold_na(self): + return self._data._can_hold_na + + @property + def is_time_series(self): + return self._subtyp in ['time_series', 'sparse_time_series'] + + _index = None + + def _set_axis(self, axis, labels, fastpath=False): + """ override generic, we want to set the _typ here """ + + if not fastpath: + labels = _ensure_index(labels) + + is_all_dates = labels.is_all_dates + if is_all_dates: + from pandas.tseries.index import DatetimeIndex + from pandas.tseries.period import PeriodIndex + if not isinstance(labels, (DatetimeIndex, PeriodIndex)): + labels = DatetimeIndex(labels) + + # need to set here becuase we changed the index + if fastpath: + self._data.set_axis(axis, labels) + self._set_subtyp(is_all_dates) - # Change the class of the array to be the subclass type. - if index.is_all_dates: - if not isinstance(index, (DatetimeIndex, PeriodIndex)): - index = DatetimeIndex(index) - subarr = subarr.view(TimeSeries) + object.__setattr__(self, '_index', labels) + if not fastpath: + self._data.set_axis(axis, labels) + + def _set_subtyp(self, is_all_dates): + if is_all_dates: + object.__setattr__(self, '_subtyp', 'time_series') else: - subarr = subarr.view(Series) - subarr.index = index - subarr.name = name + object.__setattr__(self, '_subtyp', 'series') - return subarr + # ndarray compatibility + def item(self): + return self.values.item() - def _make_time_series(self): - # oh boy #2139 - self.__class__ = TimeSeries + @property + def data(self): + return self.values.data - @classmethod - def from_array(cls, arr, index=None, name=None, copy=False): - """ - Simplified alternate constructor - """ - if copy: - arr = arr.copy() + @property + def strides(self): + return self.values.strides - klass = Series - if index.is_all_dates: - if not isinstance(index, (DatetimeIndex, PeriodIndex)): - index = DatetimeIndex(index) - klass = TimeSeries + @property + def size(self): + return self.values.size - result = arr.view(klass) - result.index = index - result.name = name + @property + def flags(self): + return self.values.flags - return result + @property + def dtype(self): + return self._data.dtype - def __init__(self, data=None, index=None, dtype=None, name=None, - copy=False): - pass + @property + def ftype(self): + return self._data.ftype @property - def _can_hold_na(self): - return not is_integer_dtype(self.dtype) + def shape(self): + return self._data.shape - _index = None - index = lib.SeriesIndex() + @property + def ndim(self): + return 1 - def __array_finalize__(self, obj): + @property + def base(self): + return self.values.base + + def ravel(self): + return self.values.ravel() + + def transpose(self): + """ support for compatiblity """ + return self + + T = property(transpose) + + def nonzero(self): + """ numpy like, returns same as nonzero """ + return self.values.nonzero() + + def put(self, *args, **kwargs): + self.values.put(*args, **kwargs) + + def __len__(self): + return len(self._data) + + @property + def size(self): + return self.__len__() + + def view(self, dtype=None): + return self._constructor(self.values.view(dtype), index=self.index, name=self.name) + + def __array__(self, result=None): + """ the array interface, return my values """ + return self.values + + def __array_wrap__(self, result): """ - Gets called after any ufunc or other array operations, necessary - to pass on the index. + Gets called prior to a ufunc (and after) """ - self._index = getattr(obj, '_index', None) - self.name = getattr(obj, 'name', None) + return self._constructor(result, index=self.index, name=self.name, copy=False) def __contains__(self, key): return key in self.index - def __reduce__(self): - """Necessary for making this object picklable""" - object_state = list(ndarray.__reduce__(self)) - subclass_state = (self.index, self.name) - object_state[2] = (object_state[2], subclass_state) - return tuple(object_state) + # coercion + __float__ = _coerce_method(float) + __long__ = _coerce_method(int) + __int__ = _coerce_method(int) + + def __nonzero__(self): + # special case of a single element bool series degenerating to a scalar + if self.dtype == np.bool_ and len(self) == 1: + return bool(self.iloc[0]) + return not self.empty + __bool__ = __nonzero__ - def __setstate__(self, state): - """Necessary for making this object picklable""" - nd_state, own_state = state - ndarray.__setstate__(self, nd_state) + # we are preserving name here + def __getstate__(self): + return dict(_data=self._data, name=self.name) - # backwards compat - index, name = own_state[0], None - if len(own_state) > 1: - name = own_state[1] + def _unpickle_series_compat(self, state): + if isinstance(state, dict): + self._data = state['_data'] + self.name = state['name'] + self.index = self._data.index - self.index = _handle_legacy_indexes([index])[0] - self.name = name + elif isinstance(state, tuple): + + # < 0.12 series pickle + + nd_state, own_state = state + + # recreate the ndarray + data = np.empty(nd_state[1], dtype=nd_state[2]) + np.ndarray.__setstate__(data, nd_state) + + # backwards compat + index, name = own_state[0], None + if len(own_state) > 1: + name = own_state[1] + index = _handle_legacy_indexes([index])[0] + + # recreate + self._data = SingleBlockManager(data, index, fastpath=True) + self.index = index + self.name = name + + else: + raise Exception("cannot unpickle legacy formats -> [%s]" % state) # indexers @property def axes(self): - return [ self.index ] + return [self.index] - @property - def ix(self): - if self._ix is None: # defined in indexing.py; pylint: disable=E0203 - self._ix = _SeriesIndexer(self, 'ix') + def _maybe_box(self, values): + """ genericically box the values """ + + if isinstance(values, self.__class__): + return values + elif not hasattr(values, '__iter__'): + v = lib.infer_dtype([values]) + if v == 'datetime': + return lib.Timestamp(v) + return values + + v = lib.infer_dtype(values) + if v == 'datetime': + return lib.map_infer(values, lib.Timestamp) - return self._ix + if isinstance(values, np.ndarray): + return self.__class__(values) + + return values def _xs(self, key, axis=0, level=None, copy=True): return self.__getitem__(key) + xs = _xs + def _ixs(self, i, axis=0): """ Return the i-th value or values in the Series by location @@ -653,7 +829,7 @@ def _ixs(self, i, axis=0): value : scalar (int) or Series (slice, sequence) """ try: - return _index.get_value_at(self, i) + return _index.get_value_at(self.values, i) except IndexError: raise except: @@ -675,19 +851,22 @@ def _slice(self, slobj, axis=0, raise_on_error=False): if raise_on_error: _check_slice_bounds(slobj, self.values) - return self._constructor(self.values[slobj], index=self.index[slobj]) + return self._constructor(self.values[slobj], index=self.index[slobj], + name=self.name) def __getitem__(self, key): try: return self.index.get_value(self, key) except InvalidIndexError: pass - except KeyError: + except (KeyError, ValueError): if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # kludge pass elif key is Ellipsis: return self + elif _is_bool_indexer(key): + pass else: raise except Exception: @@ -696,9 +875,6 @@ def __getitem__(self, key): if com.is_iterator(key): key = list(key) - # boolean - # special handling of boolean data with NAs stored in object - # arrays. Since we can't represent NA with dtype=bool if _is_bool_indexer(key): key = _check_bool_indexer(self.index, key) @@ -707,7 +883,6 @@ def __getitem__(self, key): def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): - from pandas.core.indexing import _is_index_slice idx_type = self.index.inferred_type if idx_type == 'floating': @@ -728,7 +903,8 @@ def _get_with(self, key): return self._get_values(key) raise - if not isinstance(key, (list, pa.Array)): # pragma: no cover + # pragma: no cover + if not isinstance(key, (list, pa.Array, Series)): key = list(key) if isinstance(key, Index): @@ -746,7 +922,7 @@ def _get_with(self, key): else: try: # handle the dup indexing case (GH 4246) - if isinstance(key, (list,tuple)): + if isinstance(key, (list, tuple)): return self.ix[key] return self.reindex(key) @@ -768,121 +944,21 @@ def _get_values_tuple(self, key): # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) - return Series(self.values[indexer], index=new_index, name=self.name) + return self._constructor(self.values[indexer], index=new_index, name=self.name) def _get_values(self, indexer): try: - return Series(self.values[indexer], index=self.index[indexer], - name=self.name) + return self._constructor(self._data.get_slice(indexer), + name=self.name, fastpath=True) except Exception: return self.values[indexer] - def get_dtype_counts(self): - return Series({ self.dtype.name : 1 }) - - def where(self, cond, other=nan, inplace=False): - """ - Return a Series where cond is True; otherwise values are from other - - Parameters - ---------- - cond: boolean Series or array - other: scalar or Series - - Returns - ------- - wh: Series - """ - if isinstance(cond, Series): - cond = cond.reindex(self.index, fill_value=True) - if not hasattr(cond, 'shape'): - raise ValueError('where requires an ndarray like object for its ' - 'condition') - if len(cond) != len(self): - raise ValueError('condition must have same length as series') - - if cond.dtype != np.bool_: - cond = cond.astype(np.bool_) - - ser = self if inplace else self.copy() - if not isinstance(other, (list, tuple, pa.Array)): - ser._set_with(~cond, other) - return None if inplace else ser - - if isinstance(other, Series): - other = other.reindex(ser.index) - elif isinstance(other, (tuple,list)): - - # try to set the same dtype as ourselves - new_other = np.array(other,dtype=self.dtype) - if not (new_other == np.array(other)).all(): - other = np.array(other) - else: - other = new_other - - if len(other) != len(ser): - icond = ~cond - - # GH 2745 - # treat like a scalar - if len(other) == 1: - other = np.array(other[0]) - - # GH 3235 - # match True cond to other - elif len(icond[icond]) == len(other): - dtype, fill_value = _maybe_promote(other.dtype) - new_other = np.empty(len(cond),dtype=dtype) - new_other.fill(fill_value) - new_other[icond] = other - other = new_other - - else: - raise ValueError('Length of replacements must equal series length') - - change = ser if inplace else None - com._maybe_upcast_putmask(ser,~cond,other,change=change) - - return None if inplace else ser - - def mask(self, cond): - """ - Returns copy of self whose values are replaced with nan if the - inverted condition is True - - Parameters - ---------- - cond: boolean Series or array - - Returns - ------- - wh: Series - """ - return self.where(~cond, nan) - - def abs(self): - """ - Return an object with absolute value taken. Only applicable to objects - that are all numeric - - Returns - ------- - abs: type of caller - """ - obj = np.abs(self) - obj = com._possibly_cast_to_timedelta(obj, coerce=False) - return obj - def __setitem__(self, key, value): try: - try: - self.index._engine.set_value(self, key, value) - return - except KeyError: - values = self.values - values[self.index.get_loc(key)] = value - return - except KeyError: + self._set_with_engine(key, value) + return + except (KeyError, ValueError): + values = self.values if (com.is_integer(key) and not self.index.inferred_type == 'integer'): @@ -891,36 +967,46 @@ def __setitem__(self, key, value): elif key is Ellipsis: self[:] = value return - - raise KeyError('%s not in this series!' % str(key)) - except TypeError as e: - # python 3 type errors should be raised - if 'unorderable' in str(e): # pragma: no cover - raise IndexError(key) - # Could not hash item - except ValueError: - - # reassign a null value to iNaT - if com.is_timedelta64_dtype(self.dtype): + elif _is_bool_indexer(key): + pass + elif com.is_timedelta64_dtype(self.dtype): + # reassign a null value to iNaT if isnull(value): value = tslib.iNaT try: - self.index._engine.set_value(self, key, value) + self.index._engine.set_value(self.values, key, value) return except (TypeError): pass + self.loc[key] = value + return + + except TypeError as e: + # python 3 type errors should be raised + if 'unorderable' in str(e): # pragma: no cover + raise IndexError(key) + # Could not hash item + if _is_bool_indexer(key): key = _check_bool_indexer(self.index, key) - self.where(~key,value,inplace=True) + self.where(~key, value, inplace=True) else: self._set_with(key, value) + def _set_with_engine(self, key, value): + values = self.values + try: + self.index._engine.set_value(values, key, value) + return + except KeyError: + values[self.index.get_loc(key)] = value + return + def _set_with(self, key, value): # other: fancy integer or otherwise if isinstance(key, slice): - from pandas.core.indexing import _is_index_slice if self.index.inferred_type == 'integer' or _is_index_slice(key): indexer = key else: @@ -933,7 +1019,7 @@ def _set_with(self, key, value): except Exception: pass - if not isinstance(key, (list, pa.Array)): + if not isinstance(key, (list, Series, pa.Array, Series)): key = list(key) if isinstance(key, Index): @@ -947,7 +1033,7 @@ def _set_with(self, key, value): else: return self._set_values(key, value) elif key_type == 'boolean': - self._set_values(key, value) + self._set_values(key.astype(np.bool_), value) else: self._set_labels(key, value) @@ -965,6 +1051,8 @@ def _set_labels(self, key, value): def _set_values(self, key, value): values = self.values + if isinstance(key, Series): + key = key.values values[key] = _index.convert_scalar(values, value) # help out SparseSeries @@ -976,7 +1064,7 @@ def __getslice__(self, i, j): if j < 0: j = 0 slobj = slice(i, j) - return self.__getitem__(slobj) + return self._slice(slobj) def __setslice__(self, i, j, value): """Set slice equal to given value(s)""" @@ -987,57 +1075,21 @@ def __setslice__(self, i, j, value): slobj = slice(i, j) return self.__setitem__(slobj, value) - def astype(self, dtype): - """ - See numpy.ndarray.astype - """ - dtype = np.dtype(dtype) - if dtype == _NS_DTYPE or dtype == _TD_DTYPE: - values = com._possibly_cast_to_datetime(self.values,dtype) - else: - values = com._astype_nansafe(self.values, dtype) - return self._constructor(values, index=self.index, name=self.name, - dtype=values.dtype) - - def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True): - """ - Attempt to infer better dtype - - Parameters - ---------- - convert_dates : boolean, default True - if True, attempt to soft convert_dates, if 'coerce', force - conversion (and non-convertibles get NaT) - convert_numeric : boolean, default True - if True attempt to coerce to numbers (including strings), - non-convertibles get NaN - copy : boolean, default True - if True return a copy even if not object dtype - - Returns - ------- - converted : Series - """ - if self.dtype == np.object_: - return Series(com._possibly_convert_objects(self.values, - convert_dates=convert_dates, convert_numeric=convert_numeric), - index=self.index, name=self.name) - return self.copy() if copy else self - def repeat(self, reps): """ See ndarray.repeat """ new_index = self.index.repeat(reps) new_values = self.values.repeat(reps) - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) def reshape(self, newshape, order='C'): """ See numpy.ndarray.reshape """ - if order not in ['C','F']: - raise TypeError("must specify a tuple / singular length to reshape") + if order not in ['C', 'F']: + raise TypeError( + "must specify a tuple / singular length to reshape") if isinstance(newshape, tuple) and len(newshape) > 1: return self.values.reshape(newshape, order=order) @@ -1081,7 +1133,7 @@ def get_value(self, label): ------- value : scalar value """ - return self.index.get_value(self, label) + return self.index.get_value(self.values, label) def set_value(self, label, value): """ @@ -1103,7 +1155,7 @@ def set_value(self, label, value): otherwise a new object """ try: - self.index._engine.set_value(self, label, value) + self.index._engine.set_value(self.values, label, value) return self except KeyError: if len(self.index) == 0: @@ -1112,7 +1164,7 @@ def set_value(self, label, value): new_index = self.index.insert(len(self), label) new_values = np.concatenate([self.values, [value]]) - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) def reset_index(self, level=None, drop=False, name=None, inplace=False): """ @@ -1148,8 +1200,8 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): # set name if it was passed, otherwise, keep the previous name self.name = name or self.name else: - return Series(self.values.copy(), index=new_index, - name=self.name) + return self._constructor(self.values.copy(), index=new_index, + name=self.name) elif inplace: raise TypeError('Cannot reset_index inplace on a Series ' 'to create a DataFrame') @@ -1204,10 +1256,24 @@ def _tidy_repr(self, max_vals=20): return compat.text_type(result) def _repr_footer(self): + + # time series + if self.is_time_series: + if self.index.freq is not None: + freqstr = u('Freq: %s, ') % self.index.freqstr + else: + freqstr = u('') + + namestr = u("Name: %s, ") % com.prrint_thing( + self.name) if self.name is not None else "" + return u('%s%sLength: %d') % (freqstr, namestr, len(self)) + + # reg series namestr = u("Name: %s, ") % com.pprint_thing( self.name) if self.name is not None else "" - return u('%sLength: %d, dtype: %s') % (namestr, len(self), - str(self.dtype.name)) + return u('%sLength: %d, dtype: %s') % (namestr, + len(self), + str(self.dtype.name)) def to_string(self, buf=None, na_rep='NaN', float_format=None, nanRep=None, length=False, dtype=False, name=False): @@ -1256,8 +1322,9 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, with open(buf, 'w') as f: f.write(the_repr) - def _get_repr(self, name=False, print_header=False, length=True, dtype=True, - na_rep='NaN', float_format=None): + def _get_repr( + self, name=False, print_header=False, length=True, dtype=True, + na_rep='NaN', float_format=None): """ Internal function, should always return unicode string @@ -1283,11 +1350,6 @@ def iteritems(self): """ return lzip(iter(self.index), iter(self)) - def iterkv(self): - warnings.warn("iterkv is deprecated and will be removed in a future " - "release. Use ``iteritems`` instead", DeprecationWarning) - return self.iteritems() - if compat.PY3: # pragma: no cover items = iteritems @@ -1297,16 +1359,20 @@ def iterkv(self): __add__ = _arith_method(operator.add, '__add__') __sub__ = _arith_method(operator.sub, '__sub__') __mul__ = _arith_method(operator.mul, '__mul__') - __truediv__ = _arith_method(operator.truediv, '__truediv__', fill_zeros=np.inf) - __floordiv__ = _arith_method(operator.floordiv, '__floordiv__', fill_zeros=np.inf) + __truediv__ = _arith_method( + operator.truediv, '__truediv__', fill_zeros=np.inf) + __floordiv__ = _arith_method( + operator.floordiv, '__floordiv__', fill_zeros=np.inf) __pow__ = _arith_method(operator.pow, '__pow__') __mod__ = _arith_method(operator.mod, '__mod__', fill_zeros=np.nan) __radd__ = _arith_method(_radd_compat, '__add__') __rmul__ = _arith_method(operator.mul, '__mul__') __rsub__ = _arith_method(lambda x, y: y - x, '__sub__') - __rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__', fill_zeros=np.inf) - __rfloordiv__ = _arith_method(lambda x, y: y // x, '__floordiv__', fill_zeros=np.inf) + __rtruediv__ = _arith_method( + lambda x, y: y / x, '__truediv__', fill_zeros=np.inf) + __rfloordiv__ = _arith_method( + lambda x, y: y // x, '__floordiv__', fill_zeros=np.inf) __rpow__ = _arith_method(lambda x, y: y ** x, '__pow__') __rmod__ = _arith_method(lambda x, y: y % x, '__mod__', fill_zeros=np.nan) @@ -1316,16 +1382,16 @@ def iterkv(self): __lt__ = _comp_method(operator.lt, '__lt__') __le__ = _comp_method(operator.le, '__le__') __eq__ = _comp_method(operator.eq, '__eq__') - __ne__ = _comp_method(operator.ne, '__ne__') + __ne__ = _comp_method(operator.ne, '__ne__', True) # inversion def __neg__(self): arr = operator.neg(self.values) - return Series(arr, self.index, name=self.name) + return self._constructor(arr, self.index, name=self.name) def __invert__(self): arr = operator.inv(self.values) - return Series(arr, self.index, name=self.name) + return self._constructor(arr, self.index, name=self.name) # binary logic __or__ = _bool_method(operator.or_, '__or__') @@ -1343,7 +1409,8 @@ def __invert__(self): # Python 2 division operators if not compat.PY3: __div__ = _arith_method(operator.div, '__div__', fill_zeros=np.inf) - __rdiv__ = _arith_method(lambda x, y: y / x, '__div__', fill_zeros=np.inf) + __rdiv__ = _arith_method( + lambda x, y: y / x, '__div__', fill_zeros=np.inf) __idiv__ = __div__ #---------------------------------------------------------------------- @@ -1359,9 +1426,6 @@ def keys(self): "Alias for index" return self.index - # alas, I wish this worked - # values = lib.ValuesProperty() - @property def values(self): """ @@ -1371,7 +1435,7 @@ def values(self): ------- arr : numpy.ndarray """ - return self.view(ndarray) + return self._data.values def copy(self, order='C', deep=False): """ @@ -1399,14 +1463,13 @@ def copy(self, order='C', deep=False): return Series(self.values.copy(order), index=index, name=name) + def get_values(self): + """ same as values (but handles sparseness conversions); is a view """ + return self._data.values + def tolist(self): - """ - Convert Series to a nested list - Overrides numpy.ndarray.tolist - """ - if com.is_datetime64_dtype(self): - return list(self) - return self.values.tolist() + """ Convert Series to a nested list """ + return list(self) def to_dict(self): """ @@ -1473,16 +1536,16 @@ def count(self, level=None): level_index = self.index.levels[level] if len(self) == 0: - return Series(0, index=level_index) + return self._constructor(0, index=level_index) # call cython function max_bin = len(level_index) labels = com._ensure_int64(self.index.labels[level]) counts = lib.count_level_1d(mask.view(pa.uint8), labels, max_bin) - return Series(counts, index=level_index) + return self._constructor(counts, index=level_index) - return notnull(self.values).sum() + return notnull(_values_from_object(self)).sum() def value_counts(self, normalize=False): """ @@ -1556,7 +1619,7 @@ def duplicated(self, take_last=False): """ keys = com._ensure_object(self.values) duplicated = lib.duplicated(keys, take_last=take_last) - return Series(duplicated, index=self.index, name=self.name) + return self._constructor(duplicated, index=self.index, name=self.name) sum = _make_stat_func(nanops.nansum, 'sum', 'sum') mean = _make_stat_func(nanops.nanmean, 'mean', 'mean') @@ -1591,7 +1654,7 @@ def min(self, axis=None, out=None, skipna=True, level=None): """ if level is not None: return self._agg_by_level('min', level=level, skipna=skipna) - return nanops.nanmin(self.values, skipna=skipna) + return nanops.nanmin(_values_from_object(self), skipna=skipna) @Substitution(name='maximum', shortname='max', na_action=_doc_exclude_na, extras='') @@ -1611,7 +1674,7 @@ def max(self, axis=None, out=None, skipna=True, level=None): """ if level is not None: return self._agg_by_level('max', level=level, skipna=skipna) - return nanops.nanmax(self.values, skipna=skipna) + return nanops.nanmax(_values_from_object(self), skipna=skipna) @Substitution(name='standard deviation', shortname='stdev', na_action=_doc_exclude_na, extras='') @@ -1624,7 +1687,7 @@ def std(self, axis=None, dtype=None, out=None, ddof=1, skipna=True, if level is not None: return self._agg_by_level('std', level=level, skipna=skipna, ddof=ddof) - return np.sqrt(nanops.nanvar(self.values, skipna=skipna, ddof=ddof)) + return np.sqrt(nanops.nanvar(_values_from_object(self), skipna=skipna, ddof=ddof)) @Substitution(name='variance', shortname='var', na_action=_doc_exclude_na, extras='') @@ -1637,7 +1700,7 @@ def var(self, axis=None, dtype=None, out=None, ddof=1, skipna=True, if level is not None: return self._agg_by_level('var', level=level, skipna=skipna, ddof=ddof) - return nanops.nanvar(self.values, skipna=skipna, ddof=ddof) + return nanops.nanvar(_values_from_object(self), skipna=skipna, ddof=ddof) @Substitution(name='unbiased skewness', shortname='skew', na_action=_doc_exclude_na, extras='') @@ -1646,7 +1709,7 @@ def skew(self, skipna=True, level=None): if level is not None: return self._agg_by_level('skew', level=level, skipna=skipna) - return nanops.nanskew(self.values, skipna=skipna) + return nanops.nanskew(_values_from_object(self), skipna=skipna) @Substitution(name='unbiased kurtosis', shortname='kurt', na_action=_doc_exclude_na, extras='') @@ -1655,7 +1718,7 @@ def kurt(self, skipna=True, level=None): if level is not None: return self._agg_by_level('kurt', level=level, skipna=skipna) - return nanops.nankurt(self.values, skipna=skipna) + return nanops.nankurt(_values_from_object(self), skipna=skipna) def _agg_by_level(self, name, level=0, skipna=True, **kwds): grouped = self.groupby(level=level) @@ -1686,7 +1749,7 @@ def idxmin(self, axis=None, out=None, skipna=True): -------- DataFrame.idxmin """ - i = nanops.nanargmin(self.values, skipna=skipna) + i = nanops.nanargmin(_values_from_object(self), skipna=skipna) if i == -1: return pa.NA return self.index[i] @@ -1712,11 +1775,15 @@ def idxmax(self, axis=None, out=None, skipna=True): -------- DataFrame.idxmax """ - i = nanops.nanargmax(self.values, skipna=skipna) + i = nanops.nanargmax(_values_from_object(self), skipna=skipna) if i == -1: return pa.NA return self.index[i] + # ndarray compat + argmin = idxmin + argmax = idxmax + def cumsum(self, axis=0, dtype=None, out=None, skipna=True): """ Cumulative sum of values. Preserves locations of NaN values @@ -1732,7 +1799,7 @@ def cumsum(self, axis=0, dtype=None, out=None, skipna=True): ------- cumsum : Series """ - arr = self.values.copy() + arr = _values_from_object(self).copy() do_mask = skipna and not issubclass(self.dtype.type, (np.integer, np.bool_)) @@ -1745,7 +1812,7 @@ def cumsum(self, axis=0, dtype=None, out=None, skipna=True): if do_mask: np.putmask(result, mask, pa.NA) - return Series(result, index=self.index) + return self._constructor(result, index=self.index, name=self.name) def cumprod(self, axis=0, dtype=None, out=None, skipna=True): """ @@ -1762,7 +1829,7 @@ def cumprod(self, axis=0, dtype=None, out=None, skipna=True): ------- cumprod : Series """ - arr = self.values.copy() + arr = _values_from_object(self).copy() do_mask = skipna and not issubclass(self.dtype.type, (np.integer, np.bool_)) @@ -1775,7 +1842,7 @@ def cumprod(self, axis=0, dtype=None, out=None, skipna=True): if do_mask: np.putmask(result, mask, pa.NA) - return Series(result, index=self.index) + return self._constructor(result, index=self.index, name=self.name) def cummax(self, axis=0, dtype=None, out=None, skipna=True): """ @@ -1792,7 +1859,7 @@ def cummax(self, axis=0, dtype=None, out=None, skipna=True): ------- cummax : Series """ - arr = self.values.copy() + arr = _values_from_object(self).copy() do_mask = skipna and not issubclass(self.dtype.type, np.integer) if do_mask: @@ -1804,7 +1871,7 @@ def cummax(self, axis=0, dtype=None, out=None, skipna=True): if do_mask: np.putmask(result, mask, pa.NA) - return Series(result, index=self.index) + return self._constructor(result, index=self.index, name=self.name) def cummin(self, axis=0, dtype=None, out=None, skipna=True): """ @@ -1821,7 +1888,7 @@ def cummin(self, axis=0, dtype=None, out=None, skipna=True): ------- cummin : Series """ - arr = self.values.copy() + arr = _values_from_object(self).copy() do_mask = skipna and not issubclass(self.dtype.type, np.integer) if do_mask: @@ -1833,16 +1900,17 @@ def cummin(self, axis=0, dtype=None, out=None, skipna=True): if do_mask: np.putmask(result, mask, pa.NA) - return Series(result, index=self.index) + return self._constructor(result, index=self.index, name=self.name) @Appender(pa.Array.round.__doc__) def round(self, decimals=0, out=None): """ """ - result = self.values.round(decimals, out=out) + result = _values_from_object(self).round(decimals, out=out) if out is None: - result = Series(result, index=self.index, name=self.name) + result = self._constructor( + result, index=self.index, name=self.name) return result @@ -1866,7 +1934,7 @@ def quantile(self, q=0.5): return _quantile(valid_values, q * 100) def ptp(self, axis=None, out=None): - return self.values.ptp(axis, out) + return _values_from_object(self).ptp(axis, out) def describe(self, percentile_width=50): """ @@ -1901,7 +1969,7 @@ def describe(self, percentile_width=50): elif issubclass(self.dtype.type, np.datetime64): names = ['count', 'unique'] - asint = self.dropna().view('i8') + asint = self.dropna().values.view('i8') objcounts = Counter(asint) data = [self.count(), len(objcounts)] if data[1] > 0: @@ -1928,10 +1996,10 @@ def pretty_name(x): pretty_name(ub), 'max'] data += [self.mean(), self.std(), self.min(), self.quantile( - lb), self.median(), self.quantile(ub), + lb), self.median(), self.quantile(ub), self.max()] - return Series(data, index=names) + return self._constructor(data, index=names) def corr(self, other, method='pearson', min_periods=None): @@ -1994,8 +2062,8 @@ def diff(self, periods=1): ------- diffed : Series """ - result = com.diff(self.values, periods) - return Series(result, self.index, name=self.name) + result = com.diff(_values_from_object(self), periods) + return self._constructor(result, self.index, name=self.name) def autocorr(self): """ @@ -2124,6 +2192,7 @@ def append(self, to_append, verify_integrity=False): appended : Series """ from pandas.tools.merge import concat + if isinstance(to_append, (list, tuple)): to_concat = [self] + to_append else: @@ -2176,7 +2245,7 @@ def _binop(self, other, func, level=None, fill_value=None): result = func(this_vals, other_vals) name = _maybe_match_name(self, other) - return Series(result, index=new_index, name=name) + return self._constructor(result, index=new_index, name=name) add = _flex_method(operator.add, 'add') sub = _flex_method(operator.sub, 'subtract') @@ -2216,7 +2285,7 @@ def combine(self, other, func, fill_value=nan): new_index = self.index new_values = func(self.values, other) new_name = self.name - return Series(new_values, index=new_index, name=new_name) + return self._constructor(new_values, index=new_index, name=new_name) def combine_first(self, other): """ @@ -2235,8 +2304,8 @@ def combine_first(self, other): this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) name = _maybe_match_name(self, other) - rs_vals = com._where_compat(isnull(this), other, this) - return Series(rs_vals, index=new_index, name=name) + rs_vals = com._where_compat(isnull(this), other.values, this.values) + return self._constructor(rs_vals, index=new_index, name=name) def update(self, other): """ @@ -2249,7 +2318,9 @@ def update(self, other): """ other = other.reindex_like(self) mask = notnull(other) - com._maybe_upcast_putmask(self.values,mask,other,change=self.values) + + self._data = self._data.putmask(mask, other, inplace=True) + self._maybe_update_cacher() #---------------------------------------------------------------------- # Reindexing, sorting @@ -2276,7 +2347,7 @@ def sort(self, axis=0, kind='quicksort', order=None, ascending=True): sortedSeries = self.order(na_last=True, kind=kind, ascending=ascending) - true_base = self + true_base = self.values while true_base.base is not None: true_base = true_base.base @@ -2318,7 +2389,7 @@ def sort_index(self, ascending=True): ascending=ascending) new_values = self.values.take(indexer) - return Series(new_values, new_labels, name=self.name) + return self._constructor(new_values, new_labels, name=self.name) def argsort(self, axis=0, kind='quicksort', order=None): """ @@ -2342,13 +2413,15 @@ def argsort(self, axis=0, kind='quicksort', order=None): mask = isnull(values) if mask.any(): - result = Series(-1,index=self.index,name=self.name,dtype='int64') + result = Series( + -1, index=self.index, name=self.name, dtype='int64') notmask = -mask - result.values[notmask] = np.argsort(self.values[notmask], kind=kind) - return result + result[notmask] = np.argsort(values[notmask], kind=kind) + return self._constructor(result, index=self.index, name=self.name) else: - return Series(np.argsort(values, kind=kind), index=self.index, - name=self.name,dtype='int64') + return self._constructor( + np.argsort(values, kind=kind), index=self.index, + name=self.name, dtype='int64') def rank(self, method='average', na_option='keep', ascending=True): """ @@ -2374,7 +2447,7 @@ def rank(self, method='average', na_option='keep', ascending=True): from pandas.core.algorithms import rank ranks = rank(self.values, method=method, na_option=na_option, ascending=ascending) - return Series(ranks, index=self.index, name=self.name) + return self._constructor(ranks, index=self.index, name=self.name) def order(self, na_last=True, ascending=True, kind='mergesort'): """ @@ -2426,8 +2499,8 @@ def _try_kind_sort(arr): sortedIdx[n:] = idx[good][argsorted] sortedIdx[:n] = idx[bad] - return Series(arr[sortedIdx], index=self.index[sortedIdx], - name=self.name) + return self._constructor(arr[sortedIdx], index=self.index[sortedIdx], + name=self.name) def sortlevel(self, level=0, ascending=True): """ @@ -2449,7 +2522,7 @@ def sortlevel(self, level=0, ascending=True): new_index, indexer = self.index.sortlevel(level, ascending=ascending) new_values = self.values.take(indexer) - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) def swaplevel(self, i, j, copy=True): """ @@ -2465,7 +2538,7 @@ def swaplevel(self, i, j, copy=True): swapped : Series """ new_index = self.index.swaplevel(i, j) - return Series(self.values, index=new_index, copy=copy, name=self.name) + return self._constructor(self.values, index=new_index, copy=copy, name=self.name) def reorder_levels(self, order): """ @@ -2573,14 +2646,14 @@ def map_f(values, f): if isinstance(arg, (dict, Series)): if isinstance(arg, dict): - arg = Series(arg) + arg = self._constructor(arg) indexer = arg.index.get_indexer(values) new_values = com.take_1d(arg.values, indexer) - return Series(new_values, index=self.index, name=self.name) + return self._constructor(new_values, index=self.index, name=self.name) else: mapped = map_f(values, arg) - return Series(mapped, index=self.index, name=self.name) + return self._constructor(mapped, index=self.index, name=self.name) def apply(self, func, convert_dtype=True, args=(), **kwds): """ @@ -2614,16 +2687,16 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): if isinstance(f, np.ufunc): return f(self) - values = self.values + values = _values_from_object(self) if com.is_datetime64_dtype(values.dtype): values = lib.map_infer(values, lib.Timestamp) mapped = lib.map_infer(values, f, convert=convert_dtype) - if isinstance(mapped[0], Series): + if len(mapped) and isinstance(mapped[0], Series): from pandas.core.frame import DataFrame return DataFrame(mapped.tolist(), index=self.index) else: - return Series(mapped, index=self.index, name=self.name) + return self._constructor(mapped, index=self.index, name=self.name) def align(self, other, join='outer', level=None, copy=True, fill_value=None, method=None, limit=None): @@ -2721,7 +2794,7 @@ def reindex(self, index=None, method=None, level=None, fill_value=pa.NA, return self if len(self.index) == 0: - return Series(nan, index=index, name=self.name) + return self._constructor(nan, index=index, name=self.name) new_index, indexer = self.index.reindex(index, method=method, level=level, limit=limit, @@ -2729,42 +2802,18 @@ def reindex(self, index=None, method=None, level=None, fill_value=pa.NA, # GH4246 (dispatch to a common method with frame to handle possibly # duplicate index) - return self._reindex_with_indexers(new_index, indexer, copy=copy, - fill_value=fill_value) + return self._reindex_with_indexers({ 0 : [new_index, indexer] }, copy=copy, fill_value=fill_value) - def _reindex_with_indexers(self, index, indexer, copy, fill_value): + def _reindex_with_indexers(self, reindexers, copy, fill_value=None): + index, indexer = reindexers[0] new_values = com.take_1d(self.values, indexer, fill_value=fill_value) - return Series(new_values, index=index, name=self.name) + return self._constructor(new_values, index=index, name=self.name) def reindex_axis(self, labels, axis=0, **kwargs): """ for compatibility with higher dims """ if axis != 0: raise ValueError("cannot reindex series on non-zero axis!") - return self.reindex(index=labels,**kwargs) - - def reindex_like(self, other, method=None, limit=None, fill_value=pa.NA): - """ - Reindex Series to match index of another Series, optionally with - filling logic - - Parameters - ---------- - other : Series - method : string or None - See Series.reindex docstring - limit : int, default None - Maximum size gap to forward or backward fill - - Notes - ----- - Like calling s.reindex(other.index, method=...) - - Returns - ------- - reindexed : Series - """ - return self.reindex(other.index, method=method, limit=limit, - fill_value=fill_value) + return self.reindex(index=labels, **kwargs) def take(self, indices, axis=0, convert=True): """ @@ -2780,180 +2829,15 @@ def take(self, indices, axis=0, convert=True): ------- taken : Series """ + # check/convert indicies here + if convert: + indices = _maybe_convert_indices( + indices, len(self._get_axis(axis))) + indices = com._ensure_platform_int(indices) new_index = self.index.take(indices) new_values = self.values.take(indices) - return Series(new_values, index=new_index, name=self.name) - - truncate = generic.truncate - - def fillna(self, value=None, method=None, inplace=False, - limit=None): - """ - Fill NA/NaN values using the specified method - - Parameters - ---------- - value : any kind (should be same type as array) - Value to use to fill holes (e.g. 0) - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - inplace : boolean, default False - If True, fill the Series in place. Note: this will modify any other - views on this Series, for example a column in a DataFrame. Returns - a reference to the filled object, which is self if inplace=True - limit : int, default None - Maximum size gap to forward or backward fill - - See also - -------- - reindex, asfreq - - Returns - ------- - filled : Series - """ - if isinstance(value, (list, tuple)): - raise TypeError('"value" parameter must be a scalar or dict, but ' - 'you passed a "{0}"'.format(type(value).__name__)) - if not self._can_hold_na: - return self.copy() if not inplace else None - - if value is not None: - if method is not None: - raise ValueError('Cannot specify both a fill value and method') - result = self.copy() if not inplace else self - mask = isnull(self.values) - np.putmask(result, mask, value) - else: - if method is None: # pragma: no cover - raise ValueError('must specify a fill method or value') - - fill_f = _get_fill_func(method) - - if inplace: - values = self.values - else: - values = self.values.copy() - - fill_f(values, limit=limit) - - if inplace: - result = self - else: - result = Series(values, index=self.index, name=self.name) - - if not inplace: - return result - - def ffill(self, inplace=False, limit=None): - return self.fillna(method='ffill', inplace=inplace, limit=limit) - - def bfill(self, inplace=False, limit=None): - return self.fillna(method='bfill', inplace=inplace, limit=limit) - - def replace(self, to_replace, value=None, method='pad', inplace=False, - limit=None): - """ - Replace arbitrary values in a Series - - Parameters - ---------- - to_replace : list or dict - list of values to be replaced or dict of replacement values - value : anything - if to_replace is a list then value is the replacement value - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - inplace : boolean, default False - If True, fill the Series in place. Note: this will modify any other - views on this Series, for example a column in a DataFrame. Returns - a reference to the filled object, which is self if inplace=True - limit : int, default None - Maximum size gap to forward or backward fill - - Notes - ----- - replace does not distinguish between NaN and None - - See also - -------- - fillna, reindex, asfreq - - Returns - ------- - replaced : Series - """ - - if inplace: - result = self - change = self - else: - result = self.copy() - change = None - - def _rep_one(s, to_rep, v): # replace single value - mask = com.mask_missing(s.values, to_rep) - com._maybe_upcast_putmask(s.values,mask,v,change=change) - - def _rep_dict(rs, to_rep): # replace {[src] -> dest} - - all_src = set() - dd = {} # group by unique destination value - for s, d in compat.iteritems(to_rep): - dd.setdefault(d, []).append(s) - all_src.add(s) - - if any(d in all_src for d in dd.keys()): - # don't clobber each other at the cost of temporaries - masks = {} - for d, sset in compat.iteritems(dd): # now replace by each dest - masks[d] = com.mask_missing(rs.values, sset) - - for d, m in compat.iteritems(masks): - com._maybe_upcast_putmask(rs.values,m,d,change=change) - else: # if no risk of clobbering then simple - for d, sset in compat.iteritems(dd): - _rep_one(rs, sset, d) - - if np.isscalar(to_replace): - to_replace = [to_replace] - - if isinstance(to_replace, dict): - _rep_dict(result, to_replace) - elif isinstance(to_replace, (list, pa.Array)): - - if isinstance(value, (list, pa.Array)): # check same length - vl, rl = len(value), len(to_replace) - if vl == rl: - _rep_dict(result, dict(zip(to_replace, value))) - else: - raise ValueError('Got %d to replace but %d values' - % (rl, vl)) - - elif value is not None: # otherwise all replaced with same value - _rep_one(result, to_replace, value) - else: # method - if method is None: # pragma: no cover - raise ValueError('must specify a fill method') - fill_f = _get_fill_func(method) - - mask = com.mask_missing(result, to_replace) - fill_f(result.values, limit=limit, mask=mask) - - if not inplace: - result = Series(result.values, index=self.index, - name=self.name) - else: - raise ValueError('Unrecognized to_replace type %s' % - type(to_replace)) - - if not inplace: - return result + return self._constructor(new_values, index=new_index, name=self.name) def isin(self, values): """ @@ -2969,8 +2853,8 @@ def isin(self, values): isin : Series (boolean dtype) """ value_set = set(values) - result = lib.ismember(self.values, value_set) - return Series(result, self.index, name=self.name) + result = lib.ismember(_values_from_object(self), value_set) + return self._constructor(result, self.index, name=self.name) def between(self, left, right, inclusive=True): """ @@ -3153,19 +3037,20 @@ def _get_values(): new_values[:periods] = self.values[-periods:] new_values[periods:] = fill_value - return Series(new_values, index=self.index, name=self.name) + return self._constructor(new_values, index=self.index, name=self.name) elif isinstance(self.index, PeriodIndex): orig_offset = datetools.to_offset(self.index.freq) if orig_offset == offset: - return Series(_get_values(), self.index.shift(periods), - name=self.name) + return self._constructor( + _get_values(), self.index.shift(periods), + name=self.name) msg = ('Given freq %s does not match PeriodIndex freq %s' % (offset.rule_code, orig_offset.rule_code)) raise ValueError(msg) else: - return Series(_get_values(), - index=self.index.shift(periods, offset), - name=self.name) + return self._constructor(_get_values(), + index=self.index.shift(periods, offset), + name=self.name) def asof(self, where): """ @@ -3211,7 +3096,7 @@ def asof(self, where): locs = self.index.asof_locs(where, notnull(values)) new_values = com.take_1d(values, locs) - return Series(new_values, index=where, name=self.name) + return self._constructor(new_values, index=where, name=self.name) def interpolate(self, method='linear'): """ @@ -3230,7 +3115,7 @@ def interpolate(self, method='linear'): interpolated : Series """ if method == 'time': - if not isinstance(self, TimeSeries): + if not self.is_time_series: raise Exception('time-weighted interpolation only works' 'on TimeSeries') method = 'values' @@ -3262,7 +3147,7 @@ def interpolate(self, method='linear'): result[firstIndex:][invalid] = np.interp( inds[invalid], inds[valid], values[firstIndex:][valid]) - return Series(result, index=self.index, name=self.name) + return self._constructor(result, index=self.index, name=self.name) def rename(self, mapper, inplace=False): """ @@ -3300,14 +3185,15 @@ def rename(self, mapper, inplace=False): """ mapper_f = _get_rename_function(mapper) result = self if inplace else self.copy() - result.index = Index([mapper_f(x) for x in self.index], name=self.index.name) + result.index = Index([mapper_f(x) + for x in self.index], name=self.index.name) if not inplace: return result @property def weekday(self): - return Series([d.weekday() for d in self.index], index=self.index) + return self._constructor([d.weekday() for d in self.index], index=self.index) def tz_convert(self, tz, copy=True): """ @@ -3329,7 +3215,7 @@ def tz_convert(self, tz, copy=True): if copy: new_values = new_values.copy() - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) def tz_localize(self, tz, copy=True): """ @@ -3364,27 +3250,80 @@ def tz_localize(self, tz, copy=True): if copy: new_values = new_values.copy() - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) @cache_readonly def str(self): from pandas.core.strings import StringMethods return StringMethods(self) + def to_timestamp(self, freq=None, how='start', copy=True): + """ + Cast to datetimeindex of timestamps, at *beginning* of period + + Parameters + ---------- + freq : string, default frequency of PeriodIndex + Desired frequency + how : {'s', 'e', 'start', 'end'} + Convention for converting period to timestamp; start of period + vs. end + + Returns + ------- + ts : TimeSeries with DatetimeIndex + """ + new_values = self.values + if copy: + new_values = new_values.copy() + + new_index = self.index.to_timestamp(freq=freq, how=how) + return self._constructor(new_values, index=new_index, name=self.name) + + def to_period(self, freq=None, copy=True): + """ + Convert TimeSeries from DatetimeIndex to PeriodIndex with desired + frequency (inferred from index if not passed) + + Parameters + ---------- + freq : string, default + + Returns + ------- + ts : TimeSeries with PeriodIndex + """ + new_values = self.values + if copy: + new_values = new_values.copy() + + if freq is None: + freq = self.index.freqstr or self.index.inferred_freq + new_index = self.index.to_period(freq=freq) + return self._constructor(new_values, index=new_index, name=self.name) + +Series._setup_axes(['index'], info_axis=0) _INDEX_TYPES = ndarray, Index, list, tuple +# reinstall the SeriesIndexer +# defined in indexing.py; pylint: disable=E0203 +Series._create_indexer('ix', _SeriesIndexer) + #------------------------------------------------------------------------------ # Supplementary functions -def remove_na(arr): +def remove_na(series): """ - Return array containing only true/non-NaN values, possibly empty. + Return series containing only true/non-NaN values, possibly empty. """ - return arr[notnull(arr)] + return series[notnull(_values_from_object(series))] + def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False): + if dtype is not None: + dtype = np.dtype(dtype) if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) @@ -3412,8 +3351,8 @@ def _try_cast(arr, take_fast_path): return subarr # GH #846 - if isinstance(data, pa.Array): - subarr = data + if isinstance(data, (pa.Array, Series)): + subarr = np.array(data, copy=False) if dtype is not None: # possibility of nan -> garbage @@ -3541,6 +3480,9 @@ def _get_fill_func(method): fill_f = com.backfill_1d return fill_f +# backwards compatiblity +TimeSeries = Series + #---------------------------------------------------------------------- # Add plotting methods to Series @@ -3548,91 +3490,3 @@ def _get_fill_func(method): Series.plot = _gfx.plot_series Series.hist = _gfx.hist_series - -# Put here, otherwise monkey-patching in methods fails - - -class TimeSeries(Series): - """ - The time series varians of Series, a One-dimensional ndarray with `TimeStamp` - axis labels. - Labels need not be unique but must be any hashable type. The object - supports both integer- and label-based indexing and provides a host of - methods for performing operations involving the index. Statistical - methods from ndarray have been overridden to automatically exclude - missing data (currently represented as NaN) - - Operations between Series (+, -, /, *, **) align values based on their - associated index values-- they need not be the same length. The result - index will be the sorted union of the two indexes. - - Parameters - ---------- - data : array-like, dict, or scalar value - Contains data stored in Series - index : array-like or Index (1d) - Values must be unique and hashable, same length as data. Index - object (or other iterable of same length as data) Will default to - np.arange(len(data)) if not provided. If both a dict and index - sequence are used, the index will override the keys found in the - dict. - dtype : numpy.dtype or None - If None, dtype will be inferred copy : boolean, default False Copy - input data - copy : boolean, default False - """ - def _repr_footer(self): - if self.index.freq is not None: - freqstr = 'Freq: %s, ' % self.index.freqstr - else: - freqstr = '' - - namestr = "Name: %s, " % str( - self.name) if self.name is not None else "" - return '%s%sLength: %d, dtype: %s' % (freqstr, namestr, len(self), - com.pprint_thing(self.dtype.name)) - - def to_timestamp(self, freq=None, how='start', copy=True): - """ - Cast to datetimeindex of timestamps, at *beginning* of period - - Parameters - ---------- - freq : string, default frequency of PeriodIndex - Desired frequency - how : {'s', 'e', 'start', 'end'} - Convention for converting period to timestamp; start of period - vs. end - - Returns - ------- - ts : TimeSeries with DatetimeIndex - """ - new_values = self.values - if copy: - new_values = new_values.copy() - - new_index = self.index.to_timestamp(freq=freq, how=how) - return Series(new_values, index=new_index, name=self.name) - - def to_period(self, freq=None, copy=True): - """ - Convert TimeSeries from DatetimeIndex to PeriodIndex with desired - frequency (inferred from index if not passed) - - Parameters - ---------- - freq : string, default - - Returns - ------- - ts : TimeSeries with PeriodIndex - """ - new_values = self.values - if copy: - new_values = new_values.copy() - - if freq is None: - freq = self.index.freqstr or self.index.inferred_freq - new_index = self.index.to_period(freq=freq) - return Series(new_values, index=new_index, name=self.name) diff --git a/pandas/core/sparse.py b/pandas/core/sparse.py index 1405e88a1343a..7b9caaa3a0139 100644 --- a/pandas/core/sparse.py +++ b/pandas/core/sparse.py @@ -5,6 +5,6 @@ # pylint: disable=W0611 -from pandas.sparse.series import SparseSeries, SparseTimeSeries +from pandas.sparse.series import SparseSeries from pandas.sparse.frame import SparseDataFrame from pandas.sparse.panel import SparsePanel diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 462ed81aaf875..4ba77d118d272 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1,7 +1,7 @@ import numpy as np from pandas.compat import zip -from pandas.core.common import isnull +from pandas.core.common import isnull, _values_from_object from pandas.core.series import Series import pandas.compat as compat import re @@ -91,6 +91,8 @@ def _na_map(f, arr, na_result=np.nan): def _map(f, arr, na_mask=False, na_value=np.nan): + if isinstance(arr, Series): + arr = arr.values if not isinstance(arr, np.ndarray): arr = np.asarray(arr, dtype=object) if na_mask: @@ -296,7 +298,7 @@ def rep(x, r): return compat.text_type.__mul__(x, r) repeats = np.asarray(repeats, dtype=object) - result = lib.vec_binop(arr, repeats, rep) + result = lib.vec_binop(_values_from_object(arr), repeats, rep) return result diff --git a/pandas/index.pyx b/pandas/index.pyx index 2311ac25293f1..53c96b1c55605 100644 --- a/pandas/index.pyx +++ b/pandas/index.pyx @@ -50,8 +50,9 @@ cdef inline is_definitely_invalid_key(object val): except TypeError: return True + # we have a _data, means we are a NDFrame return (PySlice_Check(val) or cnp.PyArray_Check(val) - or PyList_Check(val)) + or PyList_Check(val) or hasattr(val,'_data')) def get_value_at(ndarray arr, object loc): if arr.descr.type_num == NPY_DATETIME: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 9990da148f8a3..aee839c354cd3 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -206,7 +206,29 @@ def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, app f(path_or_buf) def read_hdf(path_or_buf, key, **kwargs): - """ read from the store, closeit if we opened it """ + """ read from the store, closeit if we opened it + + Retrieve pandas object stored in file, optionally based on where + criteria + + Parameters + ---------- + path_or_buf : path (string), or buffer to read from + key : group identifier in the store + where : list of Term (or convertable) objects, optional + start : optional, integer (defaults to None), row number to start selection + stop : optional, integer (defaults to None), row number to stop selection + columns : optional, a list of columns that if not None, will limit the return columns + iterator : optional, boolean, return an iterator, default False + chunksize : optional, nrows to include in iteration, return an iterator + auto_close : optional, boolean, should automatically close the store when finished, default is False + + Returns + ------- + The selected object + + """ + f = lambda store, auto_close: store.select(key, auto_close=auto_close, **kwargs) if isinstance(path_or_buf, compat.string_types): @@ -468,6 +490,10 @@ def select(self, key, where=None, start=None, stop=None, columns=None, iterator= chunksize : nrows to include in iteration, return an iterator auto_close : boolean, should automatically close the store when finished, default is False + Returns + ------- + The selected object + """ group = self.get_node(key) if group is None: @@ -1430,7 +1456,7 @@ def get_atom_string(self, block, itemsize): def set_atom_string(self, block, existing_col, min_itemsize, nan_rep, encoding): # fill nan items with myself - block = block.fillna(nan_rep) + block = block.fillna(nan_rep)[0] data = block.values # see if we have a valid string type @@ -2673,7 +2699,9 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, # reindex by our non_index_axes & compute data_columns for a in self.non_index_axes: - obj = obj.reindex_axis(a[1], axis=a[0]) + labels = _ensure_index(a[1]) + if not labels.equals(obj._get_axis(a[0])): + obj = obj.reindex_axis(labels, axis=a[0]) # figure out data_columns and get out blocks block_obj = self.get_object(obj).consolidate() @@ -2759,7 +2787,9 @@ def process_axes(self, obj, columns=None): for axis, labels in self.non_index_axes: if columns is not None: labels = Index(labels) & Index(columns) - obj = obj.reindex_axis(labels, axis=axis) + labels = _ensure_index(labels) + if not labels.equals(obj._get_axis(axis)): + obj = obj.reindex_axis(labels, axis=axis) # apply the selection filters (but keep in the same order) if self.selection.filter: diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 21cf6d40ddec9..9d21e10d69982 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -551,7 +551,7 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None): labeled_data = np.copy(data[col]) labeled_data = labeled_data.astype(object) for k, v in compat.iteritems(self.value_label_dict[self.lbllist[i]]): - labeled_data[data[col] == k] = v + labeled_data[(data[col] == k).values] = v data[col] = Categorical.from_array(labeled_data) return data diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index 3c805e9fa260d..f2ddce7fa7b7e 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -16,6 +16,7 @@ from pandas.sparse.tests import test_sparse from pandas import compat from pandas.util.misc import is_little_endian +import pandas class TestPickle(unittest.TestCase): _multiprocess_can_split_ = True @@ -27,14 +28,20 @@ def setUp(self): def compare(self, vf): # py3 compat when reading py2 pickle - try: with open(vf,'rb') as fh: data = pickle.load(fh) - except (ValueError): + except ValueError as detail: # we are trying to read a py3 pickle in py2..... return + + # we have a deprecated klass + except TypeError as detail: + + from pandas.compat.pickle_compat import load + data = load(vf) + except: if not compat.PY3: raise diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 34576f8521d1b..6b9bdf3385732 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -1329,7 +1329,8 @@ def test_append_raise(self): # datetime with embedded nans as object df = tm.makeDataFrame() - s = Series(datetime.datetime(2001,1,2),index=df.index,dtype=object) + s = Series(datetime.datetime(2001,1,2),index=df.index) + s = s.astype(object) s[0:5] = np.nan df['invalid'] = s self.assert_(df.dtypes['invalid'] == np.object_) diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index d75de149d6f4b..31472dc667847 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -48,11 +48,10 @@ def test_read_dta1(self): columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) - for i, col in enumerate(parsed.columns): - np.testing.assert_almost_equal( - parsed[col], - expected[expected.columns[i]] - ) + # this is an oddity as really the nan should be float64, but + # the casting doesn't fail so need to match stata here + expected['float_miss'] = expected['float_miss'].astype(np.float32) + tm.assert_frame_equal(parsed, expected) def test_read_dta2(self): expected = DataFrame.from_records( @@ -101,14 +100,16 @@ def test_read_dta2(self): tm.assert_frame_equal(parsed, expected) def test_read_dta3(self): + parsed = self.read_dta(self.dta3) + + # match stata here expected = self.read_csv(self.csv3) - for i, col in enumerate(parsed.columns): - np.testing.assert_almost_equal( - parsed[col], - expected[expected.columns[i]], - decimal=3 - ) + expected = expected.astype(np.float32) + expected['year'] = expected['year'].astype(np.int32) + expected['quarter']= expected['quarter'].astype(np.int16) + + tm.assert_frame_equal(parsed,expected) def test_read_dta4(self): parsed = self.read_dta(self.dta4) @@ -164,37 +165,19 @@ def test_write_dta6(self): def test_read_dta7(self): expected = read_csv(self.csv7, parse_dates=True, sep='\t') parsed = self.read_dta(self.dta7) - - for i, col in enumerate(parsed.columns): - np.testing.assert_almost_equal( - parsed[col], - expected[expected.columns[i]], - decimal=3 - ) + tm.assert_frame_equal(parsed, expected) @nose.tools.nottest def test_read_dta8(self): expected = read_csv(self.csv8, parse_dates=True, sep='\t') parsed = self.read_dta(self.dta8) - - for i, col in enumerate(parsed.columns): - np.testing.assert_almost_equal( - parsed[col], - expected[expected.columns[i]], - decimal=3 - ) + tm.assert_frame_equal(parsed, expected) @nose.tools.nottest def test_read_dta9(self): expected = read_csv(self.csv9, parse_dates=True, sep='\t') parsed = self.read_dta(self.dta9) - - for i, col in enumerate(parsed.columns): - np.testing.assert_equal( - parsed[col], - expected[expected.columns[i]], - decimal=3 - ) + assert_frame_equal(parsed, expected) def test_read_write_dta10(self): if not is_little_endian(): diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 7c4ba1cda35eb..f5205ae0c3133 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -714,11 +714,20 @@ def vec_binop(ndarray[object] left, ndarray[object] right, object op): def astype_intsafe(ndarray[object] arr, new_dtype): cdef: Py_ssize_t i, n = len(arr) + object v + bint is_datelike ndarray result + # on 32-bit, 1.6.2 numpy M8[ns] is a subdtype of integer, which is weird + is_datelike = new_dtype in ['M8[ns]','m8[ns]'] + result = np.empty(n, dtype=new_dtype) for i in range(n): - util.set_value_at(result, i, arr[i]) + v = arr[i] + if is_datelike and checknull(v): + result[i] = NPY_NAT + else: + util.set_value_at(result, i, v) return result diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py index 7710749a869f0..34823c052a518 100644 --- a/pandas/sparse/array.py +++ b/pandas/sparse/array.py @@ -12,6 +12,7 @@ import pandas.core.common as com from pandas import compat +from pandas.compat import range from pandas._sparse import BlockIndex, IntIndex import pandas._sparse as splib @@ -24,6 +25,7 @@ def _sparse_op_wrap(op, name): Wrapper function for Series arithmetic operations, to avoid code duplication. """ + def wrapper(self, other): if isinstance(other, np.ndarray): if not ((len(self) == len(other))): @@ -87,7 +89,9 @@ def _sparse_fillop(this, other, name): return result, result_index + class SparseArray(PandasObject, np.ndarray): + """Data structure for labeled, sparse floating point data Parameters @@ -101,18 +105,32 @@ class SparseArray(PandasObject, np.ndarray): Notes ----- -SparseSeries objects are immutable via the typical Python means. If you +SparseArray objects are immutable via the typical Python means. If you must change values, convert to dense, make your changes, then convert back to sparse """ __array_priority__ = 15 + _typ = 'array' + _subtyp = 'sparse_array' sp_index = None fill_value = None - def __new__(cls, data, sparse_index=None, kind='integer', fill_value=None, - copy=False): - + def __new__( + cls, data, sparse_index=None, index=None, kind='integer', fill_value=None, + dtype=np.float64, copy=False): + + if index is not None: + if data is None: + data = np.nan + if not np.isscalar(data): + raise Exception("must only pass scalars with an index ") + values = np.empty(len(index), dtype='float64') + values.fill(data) + data = values + + if dtype is not None: + dtype = np.dtype(dtype) is_sparse_array = isinstance(data, SparseArray) if fill_value is None: if is_sparse_array: @@ -135,14 +153,21 @@ def __new__(cls, data, sparse_index=None, kind='integer', fill_value=None, # Create array, do *not* copy data by default if copy: - subarr = np.array(values, dtype=np.float64, copy=True) + subarr = np.array(values, dtype=dtype, copy=True) else: - subarr = np.asarray(values, dtype=np.float64) + subarr = np.asarray(values, dtype=dtype) + + # if we have a bool type, make sure that we have a bool fill_value + if (dtype is not None and issubclass(dtype.type, np.bool_)) or (data is not None and lib.is_bool_array(subarr)): + if np.isnan(fill_value) or not fill_value: + fill_value = False + else: + fill_value = bool(fill_value) # Change the class of the array to be the subclass type. output = subarr.view(cls) output.sp_index = sparse_index - output.fill_value = np.float64(fill_value) + output.fill_value = fill_value return output @property @@ -182,11 +207,15 @@ def __setstate__(self, state): self.fill_value = fill_value def __len__(self): - return self.sp_index.length + try: + return self.sp_index.length + except: + return 0 def __unicode__(self): - return '%s\n%s' % (com.pprint_thing(self), - com.pprint_thing(self.sp_index)) + return '%s\nFill: %s\n%s' % (com.pprint_thing(self), + com.pprint_thing(self.fill_value), + com.pprint_thing(self.sp_index)) # Arithmetic operators @@ -237,6 +266,29 @@ def sp_values(self): # caching not an option, leaks memory return self.view(np.ndarray) + def get_values(self, fill=None): + """ return a dense representation """ + return self.to_dense(fill=fill) + + def to_dense(self, fill=None): + """ + Convert SparseSeries to (dense) Series + """ + values = self.values + + # fill the nans + if fill is None: + fill = self.fill_value + if not np.isnan(fill): + values[np.isnan(values)] = fill + + return values + + def __iter__(self): + for i in range(len(self)): + yield self._get_val_at(i) + raise StopIteration + def __getitem__(self, key): """ @@ -260,8 +312,8 @@ def _get_val_at(self, loc): if loc < 0: loc += n - if loc >= len(self) or loc < 0: - raise IndexError('out of bounds access') + if loc >= n or loc < 0: + raise IndexError('Out of bounds access') sp_loc = self.sp_index.lookup(loc) if sp_loc == -1: @@ -279,16 +331,26 @@ def take(self, indices, axis=0): """ if not ((axis == 0)): raise AssertionError() - indices = np.asarray(indices, dtype=int) + indices = np.atleast_1d(np.asarray(indices, dtype=int)) + # allow -1 to indicate missing values n = len(self) - if (indices < 0).any() or (indices >= n).any(): + if ((indices >= n) | (indices < -1)).any(): raise IndexError('out of bounds access') if self.sp_index.npoints > 0: - locs = np.array([self.sp_index.lookup(loc) for loc in indices]) + locs = np.array( + [self.sp_index.lookup(loc) if loc > -1 else -1 for loc in indices]) result = self.sp_values.take(locs) - result[locs == -1] = self.fill_value + mask = locs == -1 + if mask.any(): + try: + result[mask] = self.fill_value + except (ValueError): + # wrong dtype + result = result.astype('float64') + result[mask] = self.fill_value + else: result = np.empty(len(indices)) result.fill(self.fill_value) @@ -296,16 +358,28 @@ def take(self, indices, axis=0): return result def __setitem__(self, key, value): - raise TypeError('%r object does not support item assignment' % self.__class__.__name__) + # if com.is_integer(key): + # self.values[key] = value + # else: + # raise Exception("SparseArray does not support seting non-scalars via setitem") + raise TypeError( + "SparseArray does not support item assignment via setitem") def __setslice__(self, i, j, value): - raise TypeError('%r object does not support item assignment' % self.__class__.__name__) + if i < 0: + i = 0 + if j < 0: + j = 0 + slobj = slice(i, j) - def to_dense(self): - """ - Convert SparseSeries to (dense) Series - """ - return self.values + # if not np.isscalar(value): + # raise Exception("SparseArray does not support seting non-scalars via slices") + + #x = self.values + #x[slobj] = value + #self.values = x + raise TypeError( + "SparseArray does not support item assignment via slices") def astype(self, dtype=None): """ @@ -326,6 +400,7 @@ def copy(self, deep=True): else: values = self.sp_values return SparseArray(values, sparse_index=self.sp_index, + dtype=self.dtype, fill_value=self.fill_value) def count(self): @@ -407,6 +482,22 @@ def mean(self, axis=None, dtype=None, out=None): return (sp_sum + self.fill_value * nsparse) / (ct + nsparse) +def _maybe_to_dense(obj): + """ try to convert to dense """ + if hasattr(obj, 'to_dense'): + return obj.to_dense() + return obj + + +def _maybe_to_sparse(array): + if isinstance(array, com.ABCSparseSeries): + array = SparseArray( + array.values, sparse_index=array.sp_index, fill_value=array.fill_value, copy=True) + if not isinstance(array, SparseArray): + array = com._values_from_object(array) + return array + + def make_sparse(arr, kind='block', fill_value=nan): """ Convert ndarray to sparse format @@ -421,7 +512,13 @@ def make_sparse(arr, kind='block', fill_value=nan): ------- (sparse_values, index) : (ndarray, SparseIndex) """ - arr = np.asarray(arr) + if hasattr(arr, 'values'): + arr = arr.values + else: + if np.isscalar(arr): + arr = [arr] + arr = np.asarray(arr) + length = len(arr) if np.isnan(fill_value): diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index d108094036f64..00a9d41112154 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -10,7 +10,8 @@ from pandas import compat import numpy as np -from pandas.core.common import _pickle_array, _unpickle_array, _try_sort +from pandas.core.common import (isnull, notnull, _pickle_array, + _unpickle_array, _try_sort) from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices from pandas.core.series import Series @@ -19,49 +20,16 @@ from pandas.util.decorators import cache_readonly import pandas.core.common as com import pandas.core.datetools as datetools +from pandas.core.internals import BlockManager, create_block_manager_from_arrays -from pandas.sparse.series import SparseSeries +from pandas.core.generic import NDFrame +from pandas.sparse.series import SparseSeries, SparseArray from pandas.util.decorators import Appender import pandas.lib as lib -class _SparseMockBlockManager(object): - - def __init__(self, sp_frame): - self.sp_frame = sp_frame - - def get(self, item): - return self.sp_frame[item].values - - def iget(self, i): - return self.get(self.sp_frame.columns[i]) - - @property - def shape(self): - x, y = self.sp_frame.shape - return y, x - - @property - def axes(self): - return [self.sp_frame.columns, self.sp_frame.index] - - @property - def items(self): - return self.sp_frame.columns - - @property - def blocks(self): - """ return our series in the column order """ - return [ self.iget(i) for i, c in enumerate(self.sp_frame.columns) ] - - def get_numeric_data(self): - # does not check, but assuming all numeric for now - return self.sp_frame - - def get_bool_data(self): - raise NotImplementedError - class SparseDataFrame(DataFrame): + """ DataFrame containing sparse floating point data in the form of SparseSeries objects @@ -78,29 +46,63 @@ class SparseDataFrame(DataFrame): Default fill_value for converting Series to SparseSeries. Will not override SparseSeries passed in """ - _columns = None - _series = None - _is_mixed_type = False - _col_klass = SparseSeries - ndim = 2 + _verbose_info = False + _constructor_sliced = SparseSeries + _subtyp = 'sparse_frame' def __init__(self, data=None, index=None, columns=None, - default_kind='block', default_fill_value=None): + default_kind=None, default_fill_value=None, + dtype=None, copy=False): + + # pick up the defaults from the Sparse structures + if isinstance(data, SparseDataFrame): + if index is None: + index = data.index + if columns is None: + columns = data.columns + if default_fill_value is None: + default_fill_value = data.default_fill_value + if default_kind is None: + default_kind = data.default_kind + elif isinstance(data, (SparseSeries, SparseArray)): + if index is None: + index = data.index + if default_fill_value is None: + default_fill_value = data.fill_value + if columns is None and hasattr(data, 'name'): + columns = [data.name] + if columns is None: + raise Exception("cannot pass a series w/o a name or columns") + data = {columns[0]: data} + if default_fill_value is None: default_fill_value = np.nan + if default_kind is None: + default_kind = 'block' - self.default_kind = default_kind - self.default_fill_value = default_fill_value + self._default_kind = default_kind + self._default_fill_value = default_fill_value if isinstance(data, dict): - sdict, columns, index = self._init_dict(data, index, columns) + mgr = self._init_dict(data, index, columns) + if dtype is not None: + mgr = mgr.astype(dtype) elif isinstance(data, (np.ndarray, list)): - sdict, columns, index = self._init_matrix(data, index, columns) + mgr = self._init_matrix(data, index, columns) + if dtype is not None: + mgr = mgr.astype(dtype) + elif isinstance(data, SparseDataFrame): + mgr = self._init_mgr( + data._data, dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, DataFrame): - sdict, columns, index = self._init_dict(data, data.index, - data.columns) + mgr = self._init_dict(data, data.index, data.columns) + if dtype is not None: + mgr = mgr.astype(dtype) + elif isinstance(data, BlockManager): + mgr = self._init_mgr( + data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif data is None: - sdict = {} + data = {} if index is None: index = Index([]) @@ -111,39 +113,33 @@ def __init__(self, data=None, index=None, columns=None, columns = Index([]) else: for c in columns: - sdict[c] = SparseSeries(np.nan, index=index, - kind=self.default_kind, - fill_value=self.default_fill_value) + data[c] = SparseArray(np.nan, + index=index, + kind=self._default_kind, + fill_value=self._default_fill_value) + mgr = dict_to_manager(data, columns, index) + if dtype is not None: + mgr = mgr.astype(dtype) - self._series = sdict - self.columns = columns - self.index = index + NDFrame.__init__(self, mgr) - def _from_axes(self, data, axes): - columns, index = axes - return self._constructor(data, index=index, columns=columns) - - @cache_readonly - def _data(self): - return _SparseMockBlockManager(self) + @property + def _constructor(self): + def wrapper(data, index=None, columns=None, default_fill_value=None, kind=None, fill_value=None, copy=False): + result = SparseDataFrame(data, index=index, columns=columns, + default_fill_value=fill_value, + default_kind=kind, + copy=copy) - def _consolidate_inplace(self): - # do nothing when DataFrame calls this method - pass + # fill if requested + if fill_value is not None and not isnull(fill_value): + result.fillna(fill_value, inplace=True) - def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True): - # XXX - return self + # set the default_fill_value + # if default_fill_value is not None: + # result._default_fill_value = default_fill_value + return result - @property - def _constructor(self): - def wrapper(data, index=None, columns=None, copy=False): - sf = SparseDataFrame(data, index=index, columns=columns, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) - if copy: - sf = sf.copy() - return sf return wrapper def _init_dict(self, data, index, columns, dtype=None): @@ -157,11 +153,10 @@ def _init_dict(self, data, index, columns, dtype=None): if index is None: index = extract_index(list(data.values())) - sp_maker = lambda x: SparseSeries(x, index=index, - kind=self.default_kind, - fill_value=self.default_fill_value, - copy=True) - + sp_maker = lambda x: SparseArray(x, + kind=self._default_kind, + fill_value=self._default_fill_value, + copy=True) sdict = {} for k, v in compat.iteritems(data): if isinstance(v, Series): @@ -170,7 +165,9 @@ def _init_dict(self, data, index, columns, dtype=None): v = v.reindex(index) if not isinstance(v, SparseSeries): - v = sp_maker(v) + v = sp_maker(v.values) + elif isinstance(v, SparseArray): + v = sp_maker(v.values) else: if isinstance(v, dict): v = [v.get(i, nan) for i in index] @@ -186,7 +183,7 @@ def _init_dict(self, data, index, columns, dtype=None): if c not in sdict: sdict[c] = sp_maker(nan_vec) - return sdict, columns, index + return dict_to_manager(sdict, columns, index) def _init_matrix(self, data, index, columns, dtype=None): data = _prep_ndarray(data, copy=False) @@ -208,19 +205,19 @@ def _init_matrix(self, data, index, columns, dtype=None): def __array_wrap__(self, result): return SparseDataFrame(result, index=self.index, columns=self.columns, - default_kind=self.default_kind, - default_fill_value=self.default_fill_value) + default_kind=self._default_kind, + default_fill_value=self._default_fill_value) def __getstate__(self): - series = dict((k, (v.sp_index, v.sp_values)) - for k, v in compat.iteritems(self)) - columns = self.columns - index = self.index - - return (series, columns, index, self.default_fill_value, - self.default_kind) - - def __setstate__(self, state): + # pickling + return dict(_typ=self._typ, + _subtyp=self._subtyp, + _data=self._data, + _default_fill_value=self._default_fill_value, + _default_kind=self._default_kind) + + def _unpickle_sparse_frame_compat(self, state): + """ original pickle format """ series, cols, idx, fv, kind = state if not isinstance(cols, Index): # pragma: no cover @@ -238,11 +235,9 @@ def __setstate__(self, state): series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index, fill_value=fv) - self._series = series_dict - self.index = index - self.columns = columns - self.default_fill_value = fv - self.default_kind = kind + self._data = dict_to_manager(series_dict, columns, index) + self._default_fill_value = fv + self._default_kind = kind def to_dense(self): """ @@ -255,13 +250,6 @@ def to_dense(self): data = dict((k, v.to_dense()) for k, v in compat.iteritems(self)) return DataFrame(data, index=self.index) - def get_dtype_counts(self): - from collections import defaultdict - d = defaultdict(int) - for k, v in compat.iteritems(self): - d[v.dtype.name] += 1 - return Series(d) - def astype(self, dtype): raise NotImplementedError @@ -269,10 +257,18 @@ def copy(self, deep=True): """ Make a copy of this SparseDataFrame """ - series = dict((k, v.copy()) for k, v in compat.iteritems(self)) - return SparseDataFrame(series, index=self.index, columns=self.columns, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) + result = super(SparseDataFrame, self).copy(deep=deep) + result._default_fill_value = self._default_fill_value + result._default_kind = self._default_kind + return result + + @property + def default_fill_value(self): + return self._default_fill_value + + @property + def default_kind(self): + return self._default_kind @property def density(self): @@ -285,143 +281,74 @@ def density(self): tot = len(self.index) * len(self.columns) return tot_nonsparse / float(tot) + def fillna(self, value=None, method=None, axis=0, inplace=False, + limit=None, downcast=None): + new_self = super( + SparseDataFrame, self).fillna(value=value, method=method, axis=axis, + inplace=inplace, limit=limit, downcast=downcast) + if not inplace: + self = new_self + + # set the fill value if we are filling as a scalar with nothing special + # going on + if value is not None and value == value and method is None and limit is None: + self._default_fill_value = value + + if not inplace: + return self + #---------------------------------------------------------------------- # Support different internal representation of SparseDataFrame - def _set_item(self, key, value): - sp_maker = lambda x: SparseSeries(x, index=self.index, - fill_value=self.default_fill_value, - kind=self.default_kind) - if hasattr(value, '__iter__'): + def _sanitize_column(self, key, value): + sp_maker = lambda x, index=None: SparseArray(x, + index=index, + fill_value=self._default_fill_value, + kind=self._default_kind) + if isinstance(value, SparseSeries): + clean = value.reindex( + self.index).as_sparse_array(fill_value=self._default_fill_value, + kind=self._default_kind) + + elif isinstance(value, SparseArray): + if len(value) != len(self.index): + raise AssertionError('Length of values does not match ' + 'length of index') + clean = value + + elif hasattr(value, '__iter__'): if isinstance(value, Series): - clean_series = value.reindex(self.index) + clean = value.reindex(self.index) if not isinstance(value, SparseSeries): - clean_series = sp_maker(clean_series) + clean = sp_maker(clean) else: - clean_series = sp_maker(value) + if len(value) != len(self.index): + raise AssertionError('Length of values does not match ' + 'length of index') + clean = sp_maker(value) - self._series[key] = clean_series # Scalar else: - self._series[key] = sp_maker(value) - - if key not in self.columns: - self._insert_column(key) - - def _insert_column(self, key): - self.columns = self.columns.insert(len(self.columns), key) - - def __delitem__(self, key): - """ - Delete column from DataFrame - """ - loc = self.columns.get_loc(key) - del self._series[key] - self._delete_column_index(loc) - - def _delete_column_index(self, loc): - if loc == len(self.columns) - 1: - new_columns = self.columns[:loc] - else: - new_columns = Index(np.concatenate((self.columns[:loc], - self.columns[loc + 1:]))) - self.columns = new_columns + clean = sp_maker(value, self.index) - _index = None - - def _set_index(self, index): - self._index = _ensure_index(index) - for v in self._series.values(): - v.index = self._index - - def _get_index(self): - return self._index - - def _get_columns(self): - return self._columns - - def _set_columns(self, cols): - if len(cols) != len(self._series): - raise Exception('Columns length %d did not match data %d!' % - (len(cols), len(self._series))) - - cols = _ensure_index(cols) - - # rename the _series if needed - existing = getattr(self,'_columns',None) - if existing is not None and len(existing) == len(cols): - - new_series = {} - for i, col in enumerate(existing): - new_col = cols[i] - if new_col in new_series: # pragma: no cover - raise Exception('Non-unique mapping!') - new_series[new_col] = self._series.get(col) - - self._series = new_series - - self._columns = cols - - index = property(fget=_get_index, fset=_set_index) - columns = property(fget=_get_columns, fset=_set_columns) + # always return a SparseArray! + return clean def __getitem__(self, key): """ Retrieve column or slice from DataFrame """ - try: - # unsure about how kludgy this is - s = self._series[key] - s.name = key - return s - except (TypeError, KeyError): - if isinstance(key, slice): - date_rng = self.index[key] - return self.reindex(date_rng) - elif isinstance(key, (np.ndarray, list)): - return self._getitem_array(key) - else: # pragma: no cover - raise - - def icol(self, i): - """ - Retrieve the i-th column or columns of the DataFrame by location - - Parameters - ---------- - i : int, slice, or sequence of integers - - Notes - ----- - If slice passed, the resulting data will be a view - - Returns - ------- - column : Series (int) or DataFrame (slice, sequence) - """ - if isinstance(i, slice): - # need to return view - lab_slice = slice(label[0], label[-1]) - return self.ix[:, lab_slice] + if isinstance(key, slice): + date_rng = self.index[key] + return self.reindex(date_rng) + elif isinstance(key, (np.ndarray, list, Series)): + return self._getitem_array(key) else: - label = self.columns[i] - if isinstance(label, Index): - if self.columns.inferred_type == 'integer': - # XXX re: #2228 - return self.reindex(columns=label) - else: - return self.ix[:, i] - - return self[label] - # values = self._data.iget(i) - # return self._col_klass.from_array( - # values, index=self.index, name=label, - # fill_value= self.default_fill_value) + return self._get_item_cache(key) @Appender(DataFrame.get_value.__doc__, indents=0) def get_value(self, index, col): - s = self._series[col] - return s.get_value(index) + return self._get_item_cache(col).get_value(index) def set_value(self, index, col, value): """ @@ -444,8 +371,8 @@ def set_value(self, index, col, value): frame : DataFrame """ dense = self.to_dense().set_value(index, col, value) - return dense.to_sparse(kind=self.default_kind, - fill_value=self.default_fill_value) + return dense.to_sparse(kind=self._default_kind, + fill_value=self._default_fill_value) def _slice(self, slobj, axis=0, raise_on_error=False): if axis == 0: @@ -461,24 +388,6 @@ def _slice(self, slobj, axis=0, raise_on_error=False): return self.reindex(index=new_index, columns=new_columns) - def as_matrix(self, columns=None): - """ - Convert the frame to its Numpy-array matrix representation - - Columns are presented in sorted order unless a specific list - of columns is provided. - """ - if columns is None: - columns = self.columns - - if len(columns) == 0: - return np.zeros((len(self.index), 0), dtype=float) - - return np.array([self.icol(i).values - for i in range(len(self.columns))]).T - - values = property(as_matrix) - def xs(self, key, axis=0, copy=False): """ Returns a row (cross-section) from the SparseDataFrame as a Series @@ -497,9 +406,8 @@ def xs(self, key, axis=0, copy=False): return data i = self.index.get_loc(key) - series = self._series - values = [series[k][i] for k in self.columns] - return Series(values, index=self.columns) + data = self.take([i]).get_values()[0] + return Series(data, index=self.columns) #---------------------------------------------------------------------- # Arithmetic-related methods @@ -516,6 +424,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None): return SparseDataFrame(index=new_index) new_data = {} + new_fill_value = None if fill_value is not None: # TODO: be a bit more intelligent here for col in new_columns: @@ -526,12 +435,25 @@ def _combine_frame(self, other, func, fill_value=None, level=None): result = result.to_sparse(fill_value=this[col].fill_value) new_data[col] = result else: + for col in new_columns: if col in this and col in other: new_data[col] = func(this[col], other[col]) - return self._constructor(data=new_data, index=new_index, - columns=new_columns) + # if the fill values are the same use them? or use a valid one + other_fill_value = getattr(other, 'default_fill_value', np.nan) + if self.default_fill_value == other_fill_value: + new_fill_value = self.default_fill_value + elif np.isnan(self.default_fill_value) and not np.isnan(other_fill_value): + new_fill_value = other_fill_value + elif not np.isnan(self.default_fill_value) and np.isnan(other_fill_value): + new_fill_value = self.default_fill_value + + return self._constructor(data=new_data, + index=new_index, + columns=new_columns, + default_fill_value=new_fill_value, + fill_value=new_fill_value) def _combine_match_index(self, other, func, fill_value=None): new_data = {} @@ -550,8 +472,18 @@ def _combine_match_index(self, other, func, fill_value=None): for col, series in compat.iteritems(this): new_data[col] = func(series.values, other.values) - return self._constructor(new_data, index=new_index, - columns=self.columns) + # fill_value is a function of our operator + if isnull(other.fill_value) or isnull(self.default_fill_value): + fill_value = np.nan + else: + fill_value = func(np.float64(self.default_fill_value), + np.float64(other.fill_value)) + + return self._constructor(new_data, + index=new_index, + columns=self.columns, + default_fill_value=fill_value, + fill_value=self.default_fill_value) def _combine_match_columns(self, other, func, fill_value): # patched version of DataFrame._combine_match_columns to account for @@ -573,16 +505,22 @@ def _combine_match_columns(self, other, func, fill_value): for col in intersection: new_data[col] = func(self[col], float(other[col])) - return self._constructor(new_data, index=self.index, - columns=union) + return self._constructor(new_data, + index=self.index, + columns=union, + default_fill_value=self.default_fill_value, + fill_value=self.default_fill_value) def _combine_const(self, other, func): new_data = {} for col, series in compat.iteritems(self): new_data[col] = func(series, other) - return self._constructor(data=new_data, index=self.index, - columns=self.columns) + return self._constructor(data=new_data, + index=self.index, + columns=self.columns, + default_fill_value=self.default_fill_value, + fill_value=self.default_fill_value) def _reindex_index(self, index, method, copy, level, fill_value=np.nan, limit=None, takeable=False): @@ -604,7 +542,10 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, need_mask = mask.any() new_series = {} - for col, series in compat.iteritems(self): + for col, series in self.iteritems(): + if mask.all(): + continue + values = series.values new = values.take(indexer) @@ -614,7 +555,7 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, new_series[col] = new return SparseDataFrame(new_series, index=index, columns=self.columns, - default_fill_value=self.default_fill_value) + default_fill_value=self._default_fill_value) def _reindex_columns(self, columns, copy, level, fill_value, limit=None, takeable=False): @@ -630,10 +571,13 @@ def _reindex_columns(self, columns, copy, level, fill_value, limit=None, # TODO: fill value handling sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns) return SparseDataFrame(sdict, index=self.index, columns=columns, - default_fill_value=self.default_fill_value) + default_fill_value=self._default_fill_value) + + def _reindex_with_indexers(self, reindexers, method=None, copy=False, fill_value=np.nan): + + index, row_indexer = reindexers.get(0, (None, None)) + columns, col_indexer = reindexers.get(1, (None, None)) - def _reindex_with_indexers(self, index, row_indexer, columns, col_indexer, - copy, fill_value): if columns is None: columns = self.columns @@ -642,73 +586,20 @@ def _reindex_with_indexers(self, index, row_indexer, columns, col_indexer, if col not in self: continue if row_indexer is not None: - new_arrays[col] = com.take_1d(self[col].values, row_indexer, - fill_value=fill_value) + new_arrays[col] = com.take_1d( + self[col].get_values(), row_indexer, + fill_value=fill_value) else: new_arrays[col] = self[col] return self._constructor(new_arrays, index=index, columns=columns) - def _rename_index_inplace(self, mapper): - self.index = [mapper(x) for x in self.index] - - def _rename_columns_inplace(self, mapper): - new_series = {} - new_columns = [] - - for col in self.columns: - new_col = mapper(col) - if new_col in new_series: # pragma: no cover - raise Exception('Non-unique mapping!') - new_series[new_col] = self[col] - new_columns.append(new_col) - - self.columns = new_columns - self._series = new_series - - def take(self, indices, axis=0, convert=True): - """ - Analogous to ndarray.take, return SparseDataFrame corresponding to - requested indices along an axis - - Parameters - ---------- - indices : list / array of ints - axis : {0, 1} - convert : convert indices for negative values, check bounds, default True - mainly useful for an user routine calling - - Returns - ------- - taken : SparseDataFrame - """ - - indices = com._ensure_platform_int(indices) - - # check/convert indicies here - if convert: - indices = _maybe_convert_indices(indices, len(self._get_axis(axis))) - - new_values = self.values.take(indices, axis=axis) - if axis == 0: - new_columns = self.columns - new_index = self.index.take(indices) - else: - new_columns = self.columns.take(indices) - new_index = self.index - return self._constructor(new_values, index=new_index, - columns=new_columns) - - def add_prefix(self, prefix): - f = (('%s' % prefix) + '%s').__mod__ - return self.rename(columns=f) - - def add_suffix(self, suffix): - f = ('%s' + ('%s' % suffix)).__mod__ - return self.rename(columns=f) - def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): + if isinstance(other, Series): + assert(other.name is not None) + other = SparseDataFrame({other.name: other}, + default_fill_value=self._default_fill_value) if on is not None: raise NotImplementedError else: @@ -720,7 +611,7 @@ def _join_index(self, other, how, lsuffix, rsuffix): raise AssertionError() other = SparseDataFrame({other.name: other}, - default_fill_value=self.default_fill_value) + default_fill_value=self._default_fill_value) join_index = self.index.join(other.index, how=how) @@ -729,11 +620,8 @@ def _join_index(self, other, how, lsuffix, rsuffix): this, other = this._maybe_rename_join(other, lsuffix, rsuffix) - result_series = this._series - other_series = other._series - result_series.update(other_series) - - return self._constructor(result_series, index=join_index) + from pandas import concat + return concat([this, other], axis=1, verify_integrity=True) def _maybe_rename_join(self, other, lsuffix, rsuffix): intersection = self.columns.intersection(other.columns) @@ -765,8 +653,8 @@ def transpose(self): """ return SparseDataFrame(self.values.T, index=self.columns, columns=self.index, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) + default_fill_value=self._default_fill_value, + default_kind=self._default_kind) T = property(transpose) @Appender(DataFrame.count.__doc__) @@ -788,32 +676,7 @@ def cumsum(self, axis=0): """ return self.apply(lambda x: x.cumsum(), axis=axis) - def shift(self, periods, freq=None, **kwds): - """ - Analogous to DataFrame.shift - """ - from pandas.core.series import _resolve_offset - - offset = _resolve_offset(freq, kwds) - - new_series = {} - if offset is None: - new_index = self.index - for col, s in compat.iteritems(self): - new_series[col] = s.shift(periods) - else: - new_index = self.index.shift(periods, offset) - for col, s in compat.iteritems(self): - new_series[col] = SparseSeries(s.sp_values, index=new_index, - sparse_index=s.sp_index, - fill_value=s.fill_value) - - return SparseDataFrame(new_series, index=new_index, - columns=self.columns, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) - - def apply(self, func, axis=0, broadcast=False): + def apply(self, func, axis=0, broadcast=False, reduce=False): """ Analogous to DataFrame.apply, for SparseDataFrame @@ -841,11 +704,11 @@ def apply(self, func, axis=0, broadcast=False): new_series[k] = applied return SparseDataFrame(new_series, index=self.index, columns=self.columns, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) + default_fill_value=self._default_fill_value, + default_kind=self._default_kind) else: if not broadcast: - return self._apply_standard(func, axis) + return self._apply_standard(func, axis, reduce=reduce) else: return self._apply_broadcast(func, axis) @@ -866,18 +729,13 @@ def applymap(self, func): """ return self.apply(lambda x: lmap(func, x)) - @Appender(DataFrame.fillna.__doc__) - def fillna(self, value=None, method=None, inplace=False, limit=None): - new_series = {} - for k, v in compat.iteritems(self): - new_series[k] = v.fillna(value=value, method=method, limit=limit) +def dict_to_manager(sdict, columns, index): + """ create and return the block manager from a dict of series, columns, index """ - if inplace: - self._series = new_series - return self - else: - return self._constructor(new_series, index=self.index, - columns=self.columns) + # from BlockManager perspective + axes = [_ensure_index(columns), _ensure_index(index)] + + return create_block_manager_from_arrays([sdict[c] for c in columns], columns, axes) def stack_sparse_frame(frame): diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py index ceb03eae5d282..bfc4ab9d3eb48 100644 --- a/pandas/sparse/list.py +++ b/pandas/sparse/list.py @@ -7,6 +7,7 @@ class SparseList(PandasObject): + """ Data structure for accumulating data to be converted into a SparseArray. Has similar API to the standard Python list @@ -16,6 +17,7 @@ class SparseList(PandasObject): data : scalar or array-like fill_value : scalar, default NaN """ + def __init__(self, data=None, fill_value=np.nan): self.fill_value = fill_value self._chunks = [] diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index 260d648243633..ab946090c8ea8 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -40,6 +40,7 @@ def __set__(self, obj, value): class SparsePanel(Panel): + """ Sparse version of Panel @@ -60,9 +61,12 @@ class SparsePanel(Panel): ----- """ ndim = 3 + _typ = 'panel' + _subtyp = 'sparse_panel' def __init__(self, frames, items=None, major_axis=None, minor_axis=None, - default_fill_value=np.nan, default_kind='block'): + default_fill_value=np.nan, default_kind='block', + copy=False): if isinstance(frames, np.ndarray): new_frames = {} for item, vals in zip(items, frames): @@ -130,6 +134,9 @@ def to_dense(self): return Panel(self.values, self.items, self.major_axis, self.minor_axis) + def as_matrix(self): + return self.values + @property def values(self): # return dense values @@ -328,7 +335,7 @@ def reindex(self, major=None, items=None, minor=None, major_axis=None, new_frames[item] = self._frames[item] else: raise NotImplementedError('Reindexing with new items not yet ' - 'supported') + 'supported') else: new_frames = self._frames diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 83adf135d47d3..6d7e4994f3694 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -10,14 +10,18 @@ import operator -from pandas.core.common import isnull +from pandas.core.common import isnull, _values_from_object from pandas.core.index import Index, _ensure_index -from pandas.core.series import Series, TimeSeries, _maybe_match_name +from pandas.core.series import Series, _maybe_match_name from pandas.core.frame import DataFrame +from pandas.core.internals import SingleBlockManager +from pandas.core import generic import pandas.core.common as com import pandas.core.datetools as datetools +import pandas.index as _index from pandas import compat +from pandas.util import rwproperty from pandas.sparse.array import (make_sparse, _sparse_array_op, SparseArray) from pandas._sparse import BlockIndex, IntIndex @@ -34,6 +38,7 @@ def _sparse_op_wrap(op, name): Wrapper function for Series arithmetic operations, to avoid code duplication. """ + def wrapper(self, other): if isinstance(other, Series): if not isinstance(other, SparseSeries): @@ -66,14 +71,11 @@ def _sparse_series_op(left, right, op, name): new_name = _maybe_match_name(left, right) result = _sparse_array_op(left, right, op, name) - result = result.view(SparseSeries) - result.index = new_index - result.name = new_name + return SparseSeries(result, index=new_index, name=new_name) - return result +class SparseSeries(Series): -class SparseSeries(SparseArray, Series): """Data structure for labeled, sparse floating point data Parameters @@ -91,111 +93,156 @@ class SparseSeries(SparseArray, Series): must change values, convert to dense, make your changes, then convert back to sparse """ - __array_priority__ = 15 + _subtyp = 'sparse_series' - sp_index = None - fill_value = None + def __init__(self, data, index=None, sparse_index=None, kind='block', + fill_value=None, name=None, dtype=None, copy=False, + fastpath=False): - def __new__(cls, data, index=None, sparse_index=None, kind='block', - fill_value=None, name=None, copy=False): + # we are called internally, so short-circuit + if fastpath: - is_sparse_array = isinstance(data, SparseArray) - if fill_value is None: - if is_sparse_array: - fill_value = data.fill_value - else: - fill_value = nan - - if is_sparse_array: - if isinstance(data, SparseSeries) and index is None: - index = data.index - elif index is not None: - if not (len(index) == len(data)): - raise AssertionError() - - sparse_index = data.sp_index - values = np.asarray(data) - elif isinstance(data, (Series, dict)): - if index is None: - index = data.index - - data = Series(data) - values, sparse_index = make_sparse(data, kind=kind, - fill_value=fill_value) - elif isinstance(data, (tuple, list, np.ndarray)): - # array-like - if sparse_index is None: - values, sparse_index = make_sparse(data, kind=kind, - fill_value=fill_value) - else: - values = data - if not (len(values) == sparse_index.npoints): - raise AssertionError() + # data is an ndarray, index is defined + data = SingleBlockManager(data, index, fastpath=True) + if copy: + data = data.copy() else: - if index is None: - raise TypeError('must pass index!') - length = len(index) + is_sparse_array = isinstance(data, SparseArray) + if fill_value is None: + if is_sparse_array: + fill_value = data.fill_value + else: + fill_value = nan + + if is_sparse_array: + if isinstance(data, SparseSeries) and index is None: + index = data.index + elif index is not None: + assert(len(index) == len(data)) + + sparse_index = data.sp_index + data = np.asarray(data) + + elif isinstance(data, SparseSeries): + if index is None: + index = data.index + + # extract the SingleBlockManager + data = data._data + + elif isinstance(data, (Series, dict)): + if index is None: + index = data.index + + data = Series(data) + data, sparse_index = make_sparse(data, kind=kind, + fill_value=fill_value) + + elif isinstance(data, (tuple, list, np.ndarray)): + # array-like + if sparse_index is None: + data, sparse_index = make_sparse(data, kind=kind, + fill_value=fill_value) + else: + assert(len(data) == sparse_index.npoints) - if data == fill_value or (isnull(data) - and isnull(fill_value)): - if kind == 'block': - sparse_index = BlockIndex(length, [], []) + elif isinstance(data, SingleBlockManager): + if dtype is not None: + data = data.astype(dtype) + if index is None: + index = data.index else: - sparse_index = IntIndex(length, []) - values = np.array([]) + data = data.reindex(index, copy=False) + else: - if kind == 'block': - locs, lens = ([0], [length]) if length else ([], []) - sparse_index = BlockIndex(length, locs, lens) + + length = len(index) + + if data == fill_value or (isnull(data) + and isnull(fill_value)): + if kind == 'block': + sparse_index = BlockIndex(length, [], []) + else: + sparse_index = IntIndex(length, []) + data = np.array([]) + else: - sparse_index = IntIndex(length, index) - values = np.empty(length) - values.fill(data) + if kind == 'block': + locs, lens = ([0], [length]) if length else ([], []) + sparse_index = BlockIndex(length, locs, lens) + else: + sparse_index = IntIndex(length, index) + v = data + data = np.empty(length) + data.fill(v) + + if index is None: + index = com._default_index(sparse_index.length) + index = _ensure_index(index) - if index is None: - index = com._default_index(sparse_index.length) - index = _ensure_index(index) + # create/copy the manager + if isinstance(data, SingleBlockManager): - # Create array, do *not* copy data by default - if copy: - subarr = np.array(values, dtype=np.float64, copy=True) - else: - subarr = np.asarray(values, dtype=np.float64) + if copy: + data = data.copy() + else: + + # create a sparse array + if not isinstance(data, SparseArray): + data = SparseArray( + data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype, copy=copy) + + data = SingleBlockManager(data, index) + + generic.NDFrame.__init__(self, data) + + self.index = index + self.name = name + + @property + def values(self): + """ return the array """ + return self._data._values + + def get_values(self): + """ same as values """ + return self._data._values.to_dense().view() + + @property + def block(self): + return self._data._block - if index.is_all_dates: - cls = SparseTimeSeries + @rwproperty.getproperty + def fill_value(self): + return self.block.fill_value - # Change the class of the array to be the subclass type. - output = subarr.view(cls) - output.sp_index = sparse_index - output.fill_value = np.float64(fill_value) - output.index = index - output.name = name - return output + @rwproperty.setproperty + def fill_value(self, v): + self.block.fill_value = v - def _make_time_series(self): - # oh boy #2139 - self.__class__ = SparseTimeSeries + @property + def sp_index(self): + return self.block.sp_index + + @property + def sp_values(self): + return self.values.sp_values + + @property + def npoints(self): + return self.sp_index.npoints @classmethod - def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None): + def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None, fastpath=False): """ Simplified alternate constructor """ - return SparseSeries(arr, index=index, name=name, copy=copy, fill_value=fill_value) - - def __init__(self, data, index=None, sparse_index=None, kind='block', - fill_value=None, name=None, copy=False): - pass + return cls(arr, index=index, name=name, copy=copy, fill_value=fill_value, fastpath=fastpath) @property def _constructor(self): - def make_sp_series(data, index=None, name=None): - return SparseSeries(data, index=index, fill_value=self.fill_value, - kind=self.kind, name=name) - - return make_sp_series + return SparseSeries @property def kind(self): @@ -204,42 +251,21 @@ def kind(self): elif isinstance(self.sp_index, IntIndex): return 'integer' - def __array_finalize__(self, obj): - """ - Gets called after any ufunc or other array operations, necessary - to pass on the index. - """ - self._index = getattr(obj, '_index', None) - self.name = getattr(obj, 'name', None) - self.sp_index = getattr(obj, 'sp_index', None) - self.fill_value = getattr(obj, 'fill_value', None) - - def __reduce__(self): - """Necessary for making this object picklable""" - object_state = list(ndarray.__reduce__(self)) + def as_sparse_array(self, kind=None, fill_value=None, copy=False): + """ return my self as a sparse array, do not copy by default """ - subclass_state = (self.index, self.fill_value, self.sp_index, - self.name) - object_state[2] = (object_state[2], subclass_state) - return tuple(object_state) - - def __setstate__(self, state): - """Necessary for making this object picklable""" - nd_state, own_state = state - ndarray.__setstate__(self, nd_state) - - index, fill_value, sp_index = own_state[:3] - name = None - if len(own_state) > 3: - name = own_state[3] - - self.sp_index = sp_index - self.fill_value = fill_value - self.index = index - self.name = name + if fill_value is None: + fill_value = self.fill_value + if kind is None: + kind = self.kind + return SparseArray(self.values, + sparse_index=self.sp_index, + fill_value=fill_value, + kind=kind, + copy=copy) def __len__(self): - return self.sp_index.length + return len(self.block) def __unicode__(self): # currently, unicode is same as repr...fixes infinite loop @@ -256,6 +282,14 @@ def __unicode__(self): __floordiv__ = _sparse_op_wrap(operator.floordiv, 'floordiv') __pow__ = _sparse_op_wrap(operator.pow, 'pow') + # Inplace operators + __iadd__ = __add__ + __isub__ = __sub__ + __imul__ = __mul__ + __itruediv__ = __truediv__ + __ifloordiv__ = __floordiv__ + __ipow__ = __pow__ + # reverse operators __radd__ = _sparse_op_wrap(operator.add, '__radd__') __rsub__ = _sparse_op_wrap(lambda x, y: y - x, '__rsub__') @@ -269,6 +303,71 @@ def __unicode__(self): __div__ = _sparse_op_wrap(operator.div, 'div') __rdiv__ = _sparse_op_wrap(lambda x, y: y / x, '__rdiv__') + def __array_wrap__(self, result): + """ + Gets called prior to a ufunc (and after) + """ + return self._constructor(result, + index=self.index, + sparse_index=self.sp_index, + fill_value=self.fill_value, + copy=False) + + def __array_finalize__(self, obj): + """ + Gets called after any ufunc or other array operations, necessary + to pass on the index. + """ + self.name = getattr(obj, 'name', None) + self.fill_value = getattr(obj, 'fill_value', None) + + def __getstate__(self): + # pickling + return dict(_typ=self._typ, + _subtyp=self._subtyp, + _data=self._data, + fill_value=self.fill_value, + name=self.name) + + def _unpickle_series_compat(self, state): + + nd_state, own_state = state + + # recreate the ndarray + data = np.empty(nd_state[1], dtype=nd_state[2]) + np.ndarray.__setstate__(data, nd_state) + + index, fill_value, sp_index = own_state[:3] + name = None + if len(own_state) > 3: + name = own_state[3] + + # create a sparse array + if not isinstance(data, SparseArray): + data = SparseArray( + data, sparse_index=sp_index, fill_value=fill_value, copy=False) + + # recreate + data = SingleBlockManager(data, index, fastpath=True) + generic.NDFrame.__init__(self, data) + + self._set_axis(0, index) + self.name = name + + def __iter__(self): + """ forward to the array """ + return iter(self.values) + + def _set_subtyp(self, is_all_dates): + if is_all_dates: + object.__setattr__(self, '_subtyp', 'sparse_time_series') + else: + object.__setattr__(self, '_subtyp', 'sparse_series') + + def _get_val_at(self, loc): + """ forward to the array """ + return self.block.values._get_val_at(loc) + def __getitem__(self, key): """ @@ -288,10 +387,14 @@ def __getitem__(self, key): # is there a case where this would NOT be an ndarray? # need to find an example, I took out the case for now + key = _values_from_object(key) dataSlice = self.values[key] new_index = Index(self.index.view(ndarray)[key]) return self._constructor(dataSlice, index=new_index, name=self.name) + def _set_with_engine(self, key, value): + return self.set_value(key, value) + def abs(self): """ Return an object with absolute value taken. Only applicable to objects @@ -365,8 +468,33 @@ def set_value(self, label, value): ------- series : SparseSeries """ - dense = self.to_dense().set_value(label, value) - return dense.to_sparse(kind=self.kind, fill_value=self.fill_value) + values = self.to_dense() + + # if the label doesn't exist, we will create a new object here + # and possibily change the index + new_values = values.set_value(label, value) + if new_values is not None: + values = new_values + new_index = values.index + values = SparseArray( + values, fill_value=self.fill_value, kind=self.kind) + self._data = SingleBlockManager(values, new_index) + self._index = new_index + + def _set_values(self, key, value): + + # this might be inefficient as we have to recreate the sparse array + # rather than setting individual elements, but have to convert + # the passed slice/boolean that's in dense space into a sparse indexer + # not sure how to do that! + if isinstance(key, Series): + key = key.values + + values = self.values.to_dense() + values[key] = _index.convert_scalar(values, value) + values = SparseArray( + values, fill_value=self.fill_value, kind=self.kind) + self._data = SingleBlockManager(values, self.index) def to_dense(self, sparse_only=False): """ @@ -377,34 +505,25 @@ def to_dense(self, sparse_only=False): index = self.index.take(int_index.indices) return Series(self.sp_values, index=index, name=self.name) else: - return Series(self.values, index=self.index, name=self.name) + return Series(self.values.to_dense(), index=self.index, name=self.name) @property def density(self): r = float(self.sp_index.npoints) / float(self.sp_index.length) return r - def astype(self, dtype=None): - """ - - """ - if dtype is not None and dtype not in (np.float_, float): - raise TypeError('Can only support floating point data') - - return self.copy() - def copy(self, deep=True): """ Make a copy of the SparseSeries. Only the actual sparse values need to be copied """ + new_data = self._data if deep: - values = self.sp_values.copy() - else: - values = self.sp_values - return SparseSeries(values, index=self.index, - sparse_index=self.sp_index, - fill_value=self.fill_value, name=self.name) + new_data = self._data.copy() + + return self._constructor(new_data, index=self.index, + sparse_index=self.sp_index, + fill_value=self.fill_value, name=self.name) def reindex(self, index=None, method=None, copy=True, limit=None): """ @@ -423,19 +542,7 @@ def reindex(self, index=None, method=None, copy=True, limit=None): return self.copy() else: return self - - if len(self.index) == 0: - # FIXME: inelegant / slow - values = np.empty(len(new_index), dtype=np.float64) - values.fill(nan) - return SparseSeries(values, index=new_index, - fill_value=self.fill_value) - - new_index, fill_vec = self.index.reindex(index, method=method, - limit=limit) - new_values = com.take_1d(self.values, fill_vec) - return SparseSeries(new_values, index=new_index, - fill_value=self.fill_value, name=self.name) + return self._constructor(self._data.reindex(new_index, method=method, limit=limit, copy=copy), index=new_index, name=self.name) def sparse_reindex(self, new_index): """ @@ -452,26 +559,25 @@ def sparse_reindex(self, new_index): if not (isinstance(new_index, splib.SparseIndex)): raise AssertionError() - new_values = self.sp_index.to_int_index().reindex(self.sp_values, - self.fill_value, - new_index) - return SparseSeries(new_values, index=self.index, - sparse_index=new_index, - fill_value=self.fill_value) - - @Appender(Series.fillna.__doc__) - def fillna(self, value=None, method=None, inplace=False, limit=None): - dense = self.to_dense() - filled = dense.fillna(value=value, method=method, limit=limit) - result = filled.to_sparse(kind=self.kind, - fill_value=self.fill_value) + block = self.block.sparse_reindex(new_index) + new_data = SingleBlockManager(block, block.ref_items) + return self._constructor(new_data, index=self.index, + sparse_index=new_index, + fill_value=self.fill_value) - if inplace: - self.sp_values[:] = result.values - return self + def _reindex_indexer(self, new_index, indexer, copy): + if indexer is not None: + new_values = com.take_1d(self.values.values, indexer) else: + if copy: + result = self.copy() + else: + result = self return result + # be subclass-friendly + return self._constructor(new_values, new_index, name=self.name) + def take(self, indices, axis=0, convert=True): """ Sparse-compatible version of ndarray.take @@ -480,7 +586,7 @@ def take(self, indices, axis=0, convert=True): ------- taken : ndarray """ - new_values = SparseArray.take(self, indices) + new_values = SparseArray.take(self.values, indices) new_index = self.index.take(indices) return self._constructor(new_values, index=new_index) @@ -488,22 +594,14 @@ def cumsum(self, axis=0, dtype=None, out=None): """ Cumulative sum of values. Preserves locations of NaN values - Extra parameters are to preserve ndarray interface. - Returns ------- cumsum : Series or SparseSeries """ - result = SparseArray.cumsum(self) - if isinstance(result, SparseArray): - result = self._attach_meta(result) - return result - - def _attach_meta(self, sparse_arr): - sparse_series = sparse_arr.view(SparseSeries) - sparse_series.index = self.index - sparse_series.name = self.name - return sparse_series + new_array = SparseArray.cumsum(self.values) + if isinstance(new_array, SparseArray): + return self._constructor(new_array, index=self.index, sparse_index=new_array.sp_index, name=self.name) + return Series(new_array, index=self.index, name=self.name) def dropna(self): """ @@ -514,6 +612,7 @@ def dropna(self): if isnull(self.fill_value): return dense_valid else: + dense_valid = dense_valid[dense_valid != self.fill_value] return dense_valid.to_sparse(fill_value=self.fill_value) def shift(self, periods, freq=None, **kwds): @@ -535,10 +634,10 @@ def shift(self, periods, freq=None, **kwds): return self.copy() if offset is not None: - return SparseSeries(self.sp_values, - sparse_index=self.sp_index, - index=self.index.shift(periods, offset), - fill_value=self.fill_value) + return self._constructor(self.sp_values, + sparse_index=self.sp_index, + index=self.index.shift(periods, offset), + fill_value=self.fill_value) int_index = self.sp_index.to_int_index() new_indices = int_index.indices + periods @@ -550,10 +649,10 @@ def shift(self, periods, freq=None, **kwds): if isinstance(self.sp_index, BlockIndex): new_sp_index = new_sp_index.to_block_index() - return SparseSeries(self.sp_values[start:end].copy(), - index=self.index, - sparse_index=new_sp_index, - fill_value=self.fill_value) + return self._constructor(self.sp_values[start:end].copy(), + index=self.index, + sparse_index=new_sp_index, + fill_value=self.fill_value) def combine_first(self, other): """ @@ -574,25 +673,5 @@ def combine_first(self, other): dense_combined = self.to_dense().combine_first(other) return dense_combined.to_sparse(fill_value=self.fill_value) - -class SparseTimeSeries(SparseSeries, TimeSeries): - """Data structure for labeled, sparse floating point data, with `TimeStamp` - index labels - - Parameters - ---------- - data : {array-like, Series, SparseSeries, dict} - kind : {'block', 'integer'} - fill_value : float - Defaults to NaN (code for missing) - sparse_index : {BlockIndex, IntIndex}, optional - Only if you have one. Mainly used internally - - Notes - ----- - SparseSeries objects are immutable via the typical Python means. If you - must change values, convert to dense, make your changes, then convert back - to sparse - """ - - pass +# backwards compatiblity +SparseTimeSeries = SparseSeries diff --git a/pandas/sparse/tests/test_array.py b/pandas/sparse/tests/test_array.py index bd5f99ef73fe8..3d2b67f33861d 100644 --- a/pandas/sparse/tests/test_array.py +++ b/pandas/sparse/tests/test_array.py @@ -33,17 +33,18 @@ def setUp(self): def test_get_item(self): errmsg = re.compile("bounds") - assertRaisesRegexp(IndexError, errmsg, lambda : self.arr[11]) - assertRaisesRegexp(IndexError, errmsg, lambda : self.arr[-11]) + assertRaisesRegexp(IndexError, errmsg, lambda: self.arr[11]) + assertRaisesRegexp(IndexError, errmsg, lambda: self.arr[-11]) self.assertEqual(self.arr[-1], self.arr[len(self.arr) - 1]) def test_bad_take(self): - assertRaisesRegexp(IndexError, "bounds", lambda : self.arr.take(11)) - self.assertRaises(IndexError, lambda : self.arr.take(-11)) + assertRaisesRegexp(IndexError, "bounds", lambda: self.arr.take(11)) + self.assertRaises(IndexError, lambda: self.arr.take(-11)) def test_set_item(self): def setitem(): self.arr[5] = 3 + def setslice(): self.arr[1:5] = 2 assertRaisesRegexp(TypeError, "item assignment", setitem) diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py index 248c920b03838..ba002415c1112 100644 --- a/pandas/sparse/tests/test_sparse.py +++ b/pandas/sparse/tests/test_sparse.py @@ -75,6 +75,7 @@ def _test_data2_zero(): arr[np.isnan(arr)] = 0 return arr, index + def assert_sp_series_equal(a, b, exact_indices=True): assert(a.index.equals(b.index)) assert_sp_array_equal(a, b) @@ -152,17 +153,34 @@ def setUp(self): self.ziseries2 = SparseSeries(arr, index=index, kind='integer', fill_value=0) + def test_iteration_and_str(self): + [x for x in self.bseries] + str(self.bseries) + def test_construct_DataFrame_with_sp_series(self): # it works! df = DataFrame({'col': self.bseries}) + # printing & access + df.iloc[:1] + df['col'] + df.dtypes + str(df) + + assert_sp_series_equal(df['col'], self.bseries) + + # blocking + expected = Series({'col': 'float64:sparse'}) + result = df.ftypes + assert_series_equal(expected, result) + def test_series_density(self): # GH2803 ts = Series(np.random.randn(10)) ts[2:-2] = nan sts = ts.to_sparse() - density = sts.density # don't die - self.assertEqual(density,4/10.0) + density = sts.density # don't die + self.assertEqual(density, 4 / 10.0) def test_sparse_to_dense(self): arr, index = _test_data1() @@ -209,7 +227,8 @@ def test_constructor(self): tm.assert_isinstance(self.iseries.sp_index, IntIndex) self.assertEquals(self.zbseries.fill_value, 0) - assert_equal(self.zbseries.values, self.bseries.to_dense().fillna(0)) + assert_equal(self.zbseries.values.values, + self.bseries.to_dense().fillna(0).values) # pass SparseSeries s2 = SparseSeries(self.bseries) @@ -231,7 +250,7 @@ def test_constructor(self): # pass dict? # don't copy the data by default - values = np.ones(len(self.bseries.sp_values)) + values = np.ones(self.bseries.npoints) sp = SparseSeries(values, sparse_index=self.bseries.sp_index) sp.sp_values[:5] = 97 self.assert_(values[0] == 97) @@ -258,10 +277,10 @@ def test_constructor_ndarray(self): def test_constructor_nonnan(self): arr = [0, 0, 0, nan, nan] sp_series = SparseSeries(arr, fill_value=0) - assert_equal(sp_series.values, arr) + assert_equal(sp_series.values.values, arr) def test_copy_astype(self): - cop = self.bseries.astype(np.float_) + cop = self.bseries.astype(np.float64) self.assert_(cop is not self.bseries) self.assert_(cop.sp_index is self.bseries.sp_index) self.assert_(cop.dtype == np.float64) @@ -272,7 +291,7 @@ def test_copy_astype(self): assert_sp_series_equal(cop2, self.iseries) # test that data is copied - cop.sp_values[:5] = 97 + cop[:5] = 97 self.assert_(cop.sp_values[0] == 97) self.assert_(self.bseries.sp_values[0] != 97) @@ -352,15 +371,14 @@ def test_get_get_value(self): assert_almost_equal(self.bseries.get_value(10), self.bseries[10]) def test_set_value(self): + idx = self.btseries.index[7] - res = self.btseries.set_value(idx, 0) - self.assert_(res is not self.btseries) - self.assertEqual(res[idx], 0) + self.btseries.set_value(idx, 0) + self.assertEqual(self.btseries[idx], 0) - res = self.iseries.set_value('foobar', 0) - self.assert_(res is not self.iseries) - self.assert_(res.index[-1] == 'foobar') - self.assertEqual(res['foobar'], 0) + self.iseries.set_value('foobar', 0) + self.assert_(self.iseries.index[-1] == 'foobar') + self.assertEqual(self.iseries['foobar'], 0) def test_getitem_slice(self): idx = self.bseries.index @@ -386,8 +404,8 @@ def _compare_with_dense(sp): def _compare(idx): dense_result = dense.take(idx).values sparse_result = sp.take(idx) - tm.assert_isinstance(sparse_result, SparseSeries) - assert_almost_equal(dense_result, sparse_result.values) + self.assert_(isinstance(sparse_result, SparseSeries)) + assert_almost_equal(dense_result, sparse_result.values.values) _compare([1., 2., 3., 4., 5., 0.]) _compare([7, 2, 9, 0, 4]) @@ -395,7 +413,6 @@ def _compare(idx): self._check_all(_compare_with_dense) - self.assertRaises(Exception, self.bseries.take, [-1, 0]) self.assertRaises(Exception, self.bseries.take, [0, len(self.bseries) + 1]) @@ -404,11 +421,13 @@ def _compare(idx): assert_almost_equal(sp.take([0, 1, 2, 3, 4]), np.repeat(nan, 5)) def test_setitem(self): - self.assertRaises(Exception, self.bseries.__setitem__, 5, 7.) - self.assertRaises(Exception, self.iseries.__setitem__, 5, 7.) + self.bseries[5] = 7. + self.assert_(self.bseries[5] == 7.) def test_setslice(self): - self.assertRaises(Exception, self.bseries.__setslice__, 5, 10, 7.) + self.bseries[5:10] = 7. + assert_series_equal(self.bseries[5:10].to_dense(), Series( + 7., index=range(5, 10), name=self.bseries.name)) def test_operators(self): def _check_op(a, b, op): @@ -465,12 +484,21 @@ def test_operators_corner2(self): assert_sp_series_equal(result, 3 - self.zbseries) def test_binary_operators(self): - def _check_inplace_op(op): + + # skipping for now ##### + raise nose.SkipTest + + def _check_inplace_op(iop, op): tmp = self.bseries.copy() - self.assertRaises(NotImplementedError, op, tmp, self.bseries) - inplace_ops = ['iadd', 'isub', 'imul', 'itruediv', 'ifloordiv', 'ipow'] + + expected = op(tmp, self.bseries) + iop(tmp, self.bseries) + assert_sp_series_equal(tmp, expected) + + inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow'] for op in inplace_ops: - _check_inplace_op(getattr(operator, op)) + _check_inplace_op( + getattr(operator, "i%s" % op), getattr(operator, op)) def test_reindex(self): def _compare_with_series(sps, new_index): @@ -606,9 +634,12 @@ def test_dropna(self): fill_value=0) sp_valid = sp.valid() - assert_almost_equal(sp_valid.values, - sp.to_dense().valid().values) - self.assert_(sp_valid.index.equals(sp.to_dense().valid().index)) + + expected = sp.to_dense().valid() + expected = expected[expected != 0] + + assert_almost_equal(sp_valid.values, expected.values) + self.assert_(sp_valid.index.equals(expected.index)) self.assertEquals(len(sp_valid.sp_values), 2) result = self.bseries.dropna() @@ -711,6 +742,7 @@ class TestSparseDataFrame(TestCase, test_frame.SafeForSparse): _multiprocess_can_split_ = True def setUp(self): + self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], 'C': np.arange(10), @@ -783,12 +815,13 @@ def test_constructor(self): # init dict with different index idx = self.frame.index[:5] - cons = SparseDataFrame(self.frame._series, index=idx, + cons = SparseDataFrame(self.frame, index=idx, columns=self.frame.columns, default_fill_value=self.frame.default_fill_value, - default_kind=self.frame.default_kind) + default_kind=self.frame.default_kind, + copy=True) reindexed = self.frame.reindex(idx) - assert_sp_frame_equal(cons, reindexed) + assert_sp_frame_equal(cons, reindexed, exact_indices=False) # assert level parameter breaks reindex self.assertRaises(TypeError, self.frame.reindex, idx, level=0) @@ -809,10 +842,12 @@ def test_constructor_ndarray(self): level=1) # wrong length index / columns - assertRaisesRegexp(ValueError, "^Index length", SparseDataFrame, self.frame.values, - index=self.frame.index[:-1]) - assertRaisesRegexp(ValueError, "^Column length", SparseDataFrame, self.frame.values, - columns=self.frame.columns[:-1]) + assertRaisesRegexp( + ValueError, "^Index length", SparseDataFrame, self.frame.values, + index=self.frame.index[:-1]) + assertRaisesRegexp( + ValueError, "^Column length", SparseDataFrame, self.frame.values, + columns=self.frame.columns[:-1]) def test_constructor_empty(self): sp = SparseDataFrame() @@ -838,8 +873,8 @@ def test_constructor_from_series(self): df = SparseDataFrame(x) tm.assert_isinstance(df,SparseDataFrame) - x = Series(np.random.randn(10000), name ='a') - y = Series(np.random.randn(10000), name ='b') + x = Series(np.random.randn(10000), name='a') + y = Series(np.random.randn(10000), name='b') x2 = x.astype(float) x2.ix[:9998] = np.NaN x_sparse = x2.to_sparse(fill_value=np.NaN) @@ -858,7 +893,7 @@ def test_dtypes(self): sdf = df.to_sparse() result = sdf.get_dtype_counts() - expected = Series({ 'float64' : 4 }) + expected = Series({'float64': 4}) assert_series_equal(result, expected) def test_str(self): @@ -1018,7 +1053,7 @@ def test_scalar_ops(self): pass def test_getitem(self): - # #1585 select multiple columns + # 1585 select multiple columns sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c']) result = sdf[['a', 'b']] @@ -1028,7 +1063,7 @@ def test_getitem(self): self.assertRaises(Exception, sdf.__getitem__, ['a', 'd']) def test_icol(self): - # #2227 + # 2227 result = self.frame.icol(0) self.assertTrue(isinstance(result, SparseSeries)) assert_sp_series_equal(result, self.frame['A']) @@ -1089,8 +1124,9 @@ def _check_frame(frame): # insert SparseSeries differently-indexed to_insert = frame['A'][::2] frame['E'] = to_insert - assert_series_equal(frame['E'].to_dense(), - to_insert.to_dense().reindex(frame.index)) + expected = to_insert.to_dense().reindex( + frame.index).fillna(to_insert.fill_value) + assert_series_equal(frame['E'].to_dense(), expected) # insert Series frame['F'] = frame['A'].to_dense() @@ -1100,8 +1136,9 @@ def _check_frame(frame): # insert Series differently-indexed to_insert = frame['A'].to_dense()[::2] frame['G'] = to_insert - assert_series_equal(frame['G'].to_dense(), - to_insert.reindex(frame.index)) + expected = to_insert.reindex( + frame.index).fillna(frame.default_fill_value) + assert_series_equal(frame['G'].to_dense(), expected) # insert ndarray frame['H'] = np.random.randn(N) @@ -1131,11 +1168,15 @@ def test_setitem_corner(self): assert_sp_series_equal(self.frame['a'], self.frame['B']) def test_setitem_array(self): - arr = self.frame['B'].view(SparseArray) + arr = self.frame['B'] self.frame['E'] = arr assert_sp_series_equal(self.frame['E'], self.frame['B']) - self.assertRaises(Exception, self.frame.__setitem__, 'F', arr[:-1]) + + self.frame['F'] = arr[:-1] + index = self.frame.index[:-1] + assert_sp_series_equal( + self.frame['E'].reindex(index), self.frame['F'].reindex(index)) def test_delitem(self): A = self.frame['A'] @@ -1167,12 +1208,13 @@ def test_append(self): b = self.frame[5:] appended = a.append(b) - assert_sp_frame_equal(appended, self.frame) + assert_sp_frame_equal(appended, self.frame, exact_indices=False) a = self.frame.ix[:5, :3] b = self.frame.ix[5:] appended = a.append(b) - assert_sp_frame_equal(appended.ix[:, :3], self.frame.ix[:, :3]) + assert_sp_frame_equal( + appended.ix[:, :3], self.frame.ix[:, :3], exact_indices=False) def test_apply(self): applied = self.frame.apply(np.sqrt) @@ -1183,10 +1225,6 @@ def test_apply(self): self.assert_(applied['A'].fill_value == np.sqrt(2)) # agg / broadcast - applied = self.frame.apply(np.sum) - assert_series_equal(applied, - self.frame.to_dense().apply(np.sum)) - broadcasted = self.frame.apply(np.sum, broadcast=True) tm.assert_isinstance(broadcasted, SparseDataFrame) assert_frame_equal(broadcasted.to_dense(), @@ -1194,6 +1232,11 @@ def test_apply(self): self.assert_(self.empty.apply(np.sqrt) is self.empty) + from pandas.core import nanops + applied = self.frame.apply(np.sum) + assert_series_equal(applied, + self.frame.to_dense().apply(nanops.nansum)) + def test_apply_nonuq(self): df_orig = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c']) @@ -1220,12 +1263,12 @@ def test_fillna(self): df = self.zframe.reindex(lrange(5)) result = df.fillna(0) expected = df.to_dense().fillna(0).to_sparse(fill_value=0) - assert_sp_frame_equal(result, expected) + assert_sp_frame_equal(result, expected, exact_indices=False) result = df.copy() result.fillna(0, inplace=True) expected = df.to_dense().fillna(0).to_sparse(fill_value=0) - assert_sp_frame_equal(result, expected) + assert_sp_frame_equal(result, expected, exact_indices=False) result = df.copy() result = df['A'] @@ -1243,13 +1286,15 @@ def test_corr(self): def test_describe(self): self.frame['foo'] = np.nan + self.frame.get_dtype_counts() + str(self.frame) desc = self.frame.describe() def test_join(self): left = self.frame.ix[:, ['A', 'B']] right = self.frame.ix[:, ['C', 'D']] joined = left.join(right) - assert_sp_frame_equal(joined, self.frame) + assert_sp_frame_equal(joined, self.frame, exact_indices=False) right = self.frame.ix[:, ['B', 'D']] self.assertRaises(Exception, left.join, right) @@ -1269,7 +1314,8 @@ def _check_frame(frame): dense_result) sparse_result2 = sparse_result.reindex(index) - dense_result2 = dense_result.reindex(index) + dense_result2 = dense_result.reindex( + index).fillna(frame.default_fill_value) assert_frame_equal(sparse_result2.to_dense(), dense_result2) # propagate CORRECT fill value @@ -1366,7 +1412,6 @@ def _check(frame): def test_shift(self): def _check(frame): shifted = frame.shift(0) - self.assert_(shifted is not frame) assert_sp_frame_equal(shifted, frame) f = lambda s: s.shift(1) @@ -1435,7 +1480,7 @@ def test_isin(self): assert_frame_equal(xp, rs) def test_sparse_pow_issue(self): - # #2220 + # 2220 df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]}) # note : no error without nan @@ -1460,7 +1505,7 @@ def _dense_series_compare(s, f): def _dense_frame_compare(frame, f): result = f(frame) assert(isinstance(frame, SparseDataFrame)) - dense_result = f(frame.to_dense()) + dense_result = f(frame.to_dense()).fillna(frame.default_fill_value) assert_frame_equal(result.to_dense(), dense_result) diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index f4474bfb5f853..ecf0949451a80 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -331,7 +331,7 @@ def is_period(object o): def is_period_array(ndarray[object] values): cdef int i, n = len(values) - from pandas import Period + from pandas.tseries.period import Period if n == 0: return False diff --git a/pandas/src/properties.pyx b/pandas/src/properties.pyx index 1df11cecf7b94..28e1ecfefc6a8 100644 --- a/pandas/src/properties.pyx +++ b/pandas/src/properties.pyx @@ -58,26 +58,6 @@ cdef class AxisProperty(object): def __set__(self, obj, value): obj._set_axis(self.axis, value) -cdef class SeriesIndex(object): - cdef: - object _check_type - - def __init__(self): - from pandas.core.index import _ensure_index - self._check_type = _ensure_index - - def __get__(self, obj, type): - return obj._index - - def __set__(self, obj, value): - if len(obj) != len(value): - raise AssertionError('Index length did not match values') - obj._index = val = self._check_type(value) - if hasattr(val, 'tz'): - # hack for #2139 - obj._make_time_series() - - cdef class ValuesProperty(object): def __get__(self, obj, type): diff --git a/pandas/src/reduce.pyx b/pandas/src/reduce.pyx index d173ed8d8e1b7..d59c28a30796a 100644 --- a/pandas/src/reduce.pyx +++ b/pandas/src/reduce.pyx @@ -1,6 +1,11 @@ +#cython=False from numpy cimport * import numpy as np +from pandas.core.array import SNDArray +from distutils.version import LooseVersion + +is_numpy_prior_1_6_2 = LooseVersion(np.__version__) < '1.6.2' cdef class Reducer: ''' @@ -9,8 +14,7 @@ cdef class Reducer: ''' cdef: Py_ssize_t increment, chunksize, nresults - object arr, dummy, f, labels - bint can_set_name + object arr, dummy, f, labels, typ, index def __init__(self, object arr, object f, axis=1, dummy=None, labels=None): @@ -33,49 +37,82 @@ cdef class Reducer: self.f = f self.arr = arr - self.dummy = self._check_dummy(dummy) + self.typ = None self.labels = labels + self.dummy, index = self._check_dummy(dummy) + + if axis == 0: + self.labels = index + self.index = labels + else: + self.labels = labels + self.index = index def _check_dummy(self, dummy=None): + cdef object index + if dummy is None: dummy = np.empty(self.chunksize, dtype=self.arr.dtype) - self.can_set_name = 0 + index = None else: if dummy.dtype != self.arr.dtype: raise ValueError('Dummy array must be same dtype') if len(dummy) != self.chunksize: raise ValueError('Dummy array must be length %d' % self.chunksize) - self.can_set_name = type(dummy) != np.ndarray - return dummy + # we passed a series-like + if hasattr(dummy,'values'): + + self.typ = type(dummy) + index = getattr(dummy,'index',None) + dummy = dummy.values + + return dummy, index def get_result(self): cdef: char* dummy_buf ndarray arr, result, chunk - Py_ssize_t i + Py_ssize_t i, incr flatiter it - object res - bint set_label = 0 - ndarray labels + object res, tchunk, name, labels, index, typ arr = self.arr chunk = self.dummy - dummy_buf = chunk.data chunk.data = arr.data - - set_label = self.labels is not None and self.can_set_name - if set_label: - labels = self.labels + labels = self.labels + index = self.index + typ = self.typ + incr = self.increment try: for i in range(self.nresults): - if set_label: - chunk.name = util.get_value_at(labels, i) + # need to make sure that we pass an actual object to the function + # and not just an ndarray + if typ is not None: + try: + if labels is not None: + name = labels[i] + + # recreate with the index if supplied + if index is not None: + tchunk = typ(chunk, index=index, name=name, fastpath=True) + else: + tchunk = typ(chunk, name=name) + + except: + tchunk = chunk + typ = None + else: + tchunk = chunk + + res = self.f(tchunk) + + if hasattr(res,'values'): + res = res.values - res = self.f(chunk) if i == 0: result = self._get_result_array(res) it = <flatiter> PyArray_IterNew(result) @@ -117,19 +154,24 @@ cdef class SeriesBinGrouper: bint passed_dummy cdef public: - object arr, index, dummy, f, bins + object arr, index, dummy_arr, dummy_index, values, f, bins, typ, ityp, name def __init__(self, object series, object f, object bins, object dummy): n = len(series) self.bins = bins self.f = f - if not series.flags.c_contiguous: - series = series.copy('C') - self.arr = series + + values = series.values + if not values.flags.c_contiguous: + values = values.copy('C') + self.arr = values self.index = series.index + self.typ = type(series) + self.ityp = type(series.index) + self.name = getattr(series,'name',None) - self.dummy = self._check_dummy(dummy) + self.dummy_arr, self.dummy_index = self._check_dummy(dummy) self.passed_dummy = dummy is not None # kludge for #1688 @@ -140,24 +182,27 @@ cdef class SeriesBinGrouper: def _check_dummy(self, dummy=None): if dummy is None: - dummy = np.empty(0, dtype=self.arr.dtype) + values = np.empty(0, dtype=self.arr.dtype) + index = None else: if dummy.dtype != self.arr.dtype: raise ValueError('Dummy array must be same dtype') - if not dummy.flags.contiguous: - dummy = dummy.copy() + values = dummy.values + if not values.flags.contiguous: + values = values.copy() + index = dummy.index - return dummy + return values, index def get_result(self): cdef: ndarray arr, result ndarray[int64_t] counts Py_ssize_t i, n, group_size - object res, chunk - bint initialized = 0 + object res + bint initialized = 0, needs_typ = 1, try_typ = 0 Slider vslider, islider - object gin + object gin, typ, ityp, name counts = np.zeros(self.ngroups, dtype=np.int64) @@ -169,14 +214,21 @@ cdef class SeriesBinGrouper: else: counts[i] = self.bins[i] - self.bins[i-1] - chunk = self.dummy group_size = 0 n = len(self.arr) + typ = self.typ + ityp = self.ityp + name = self.name + + vslider = Slider(self.arr, self.dummy_arr) + islider = Slider(self.index, self.dummy_index) - vslider = Slider(self.arr, self.dummy) - islider = Slider(self.index, self.dummy.index) + gin = self.dummy_index._engine - gin = self.dummy.index._engine + # old numpy issue, need to always create and pass the Series + if is_numpy_prior_1_6_2: + try_typ = 1 + needs_typ = 1 try: for i in range(self.ngroups): @@ -185,8 +237,25 @@ cdef class SeriesBinGrouper: islider.set_length(group_size) vslider.set_length(group_size) - res = self.f(chunk) + # see if we need to create the object proper + if try_typ: + if needs_typ: + res = self.f(typ(vslider.buf, index=islider.buf, + name=name, fastpath=True)) + else: + res = self.f(SNDArray(vslider.buf,islider.buf,name=name)) + else: + try: + res = self.f(SNDArray(vslider.buf,islider.buf,name=name)) + needs_typ = 0 + except: + res = self.f(typ(vslider.buf, index=islider.buf, + name=name, fastpath=True)) + needs_typ = 1 + + try_typ = 1 + res = _extract_result(res) if not initialized: result = self._get_result_array(res) initialized = 1 @@ -212,7 +281,7 @@ cdef class SeriesBinGrouper: def _get_result_array(self, object res): try: assert(not isinstance(res, np.ndarray)) - assert(not (isinstance(res, list) and len(res) == len(self.dummy))) + assert(not (isinstance(res, list) and len(res) == len(self.dummy_arr))) result = np.empty(self.ngroups, dtype='O') except Exception: @@ -230,7 +299,7 @@ cdef class SeriesGrouper: bint passed_dummy cdef public: - object arr, index, dummy, f, labels + object arr, index, dummy_arr, dummy_index, f, labels, values, typ, ityp, name def __init__(self, object series, object f, object labels, Py_ssize_t ngroups, object dummy): @@ -238,46 +307,62 @@ cdef class SeriesGrouper: self.labels = labels self.f = f - if not series.flags.c_contiguous: - series = series.copy('C') - self.arr = series + + values = series.values + if not values.flags.c_contiguous: + values = values.copy('C') + self.arr = values self.index = series.index + self.typ = type(series) + self.ityp = type(series.index) + self.name = getattr(series,'name',None) - self.dummy = self._check_dummy(dummy) + self.dummy_arr, self.dummy_index = self._check_dummy(dummy) self.passed_dummy = dummy is not None self.ngroups = ngroups def _check_dummy(self, dummy=None): if dummy is None: - dummy = np.empty(0, dtype=self.arr.dtype) + values = np.empty(0, dtype=self.arr.dtype) + index = None else: if dummy.dtype != self.arr.dtype: raise ValueError('Dummy array must be same dtype') - if not dummy.flags.contiguous: - dummy = dummy.copy() + values = dummy.values + if not values.flags.contiguous: + values = values.copy() + index = dummy.index - return dummy + return values, index def get_result(self): cdef: ndarray arr, result ndarray[int64_t] labels, counts Py_ssize_t i, n, group_size, lab - object res, chunk - bint initialized = 0 + object res + bint initialized = 0, needs_typ = 1, try_typ = 0 Slider vslider, islider - object gin + object gin, typ, ityp, name labels = self.labels counts = np.zeros(self.ngroups, dtype=np.int64) - chunk = self.dummy group_size = 0 n = len(self.arr) + typ = self.typ + ityp = self.ityp + name = self.name + + vslider = Slider(self.arr, self.dummy_arr) + islider = Slider(self.index, self.dummy_index) - vslider = Slider(self.arr, self.dummy) - islider = Slider(self.index, self.dummy.index) + gin = self.dummy_index._engine + + # old numpy issue, need to always create and pass the Series + if is_numpy_prior_1_6_2: + try_typ = 1 + needs_typ = 1 - gin = self.dummy.index._engine try: for i in range(n): group_size += 1 @@ -294,8 +379,28 @@ cdef class SeriesGrouper: islider.set_length(group_size) vslider.set_length(group_size) - res = self.f(chunk) - + # see if we need to create the object proper + # try on the first go around + if try_typ: + if needs_typ: + res = self.f(typ(vslider.buf, index=islider.buf, + name=name, fastpath=True)) + else: + res = self.f(SNDArray(vslider.buf,islider.buf,name=name)) + else: + + # try with a numpy array directly + try: + res = self.f(SNDArray(vslider.buf,islider.buf,name=name)) + needs_typ = 0 + except (Exception), detail: + res = self.f(typ(vslider.buf, index=islider.buf, + name=name, fastpath=True)) + needs_typ = 1 + + try_typ = 1 + + res = _extract_result(res) if not initialized: result = self._get_result_array(res) initialized = 1 @@ -324,13 +429,25 @@ cdef class SeriesGrouper: def _get_result_array(self, object res): try: assert(not isinstance(res, np.ndarray)) - assert(not (isinstance(res, list) and len(res) == len(self.dummy))) + assert(not (isinstance(res, list) and len(res) == len(self.dummy_arr))) result = np.empty(self.ngroups, dtype='O') except Exception: raise ValueError('function does not reduce') return result +cdef inline _extract_result(object res): + ''' extract the result object, it might be a 0-dim ndarray + or a len-1 0-dim, or a scalar ''' + if hasattr(res,'values'): + res = res.values + if not np.isscalar(res): + if isinstance(res, np.ndarray): + if res.ndim == 0: + res = res.item() + elif res.ndim == 1 and len(res) == 1: + res = res[0] + return res cdef class Slider: ''' diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c index f28ed137383c6..22f9cf8d7667a 100644 --- a/pandas/src/ujson/python/objToJSON.c +++ b/pandas/src/ujson/python/objToJSON.c @@ -260,7 +260,7 @@ static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts, JSONTypeCont return NULL; } } - else + else { PRINTMARK(); *((JSINT64*)outValue) = pandas_datetimestruct_to_datetime(base, dts); @@ -283,7 +283,7 @@ static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, s pandas_datetimestruct dts; PyObject *obj = (PyObject *) _obj; - + if (!convert_pydatetime_to_datetimestruct(obj, &dts, NULL, 1)) { PRINTMARK(); @@ -453,7 +453,7 @@ int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) PRINTMARK(); npyarr = GET_TC(tc)->npyarr; - if (PyErr_Occurred()) + if (PyErr_Occurred()) { PRINTMARK(); return 0; @@ -1234,7 +1234,7 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) PRINTMARK(); pc->PyTypeToJSON = NpyDateTimeToJSON; - if (enc->datetimeIso) + if (enc->datetimeIso) { tc->type = JT_UTF8; } @@ -1311,7 +1311,7 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) } return; } - else + else if (PyObject_IsInstance(obj, type_decimal)) { PRINTMARK(); @@ -1337,7 +1337,7 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) PRINTMARK(); pc->PyTypeToJSON = PyDateTimeToJSON; - if (enc->datetimeIso) + if (enc->datetimeIso) { PRINTMARK(); tc->type = JT_UTF8; @@ -1397,7 +1397,7 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) pc->iterGetValue = Tuple_iterGetValue; pc->iterGetName = Tuple_iterGetName; return; - } + } else if (PyAnySet_Check(obj)) { @@ -1450,12 +1450,14 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) return; } + pc->newObj = PyObject_GetAttrString(obj, "values"); + if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS) { PRINTMARK(); tc->type = JT_OBJECT; - pc->columnLabelsLen = PyArray_SIZE(obj); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(PyObject_GetAttrString(obj, "index"), "values"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); + pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0); + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); if (!pc->columnLabels) { goto INVALID; @@ -1466,7 +1468,6 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) PRINTMARK(); tc->type = JT_ARRAY; } - pc->newObj = PyObject_GetAttrString(obj, "values"); pc->iterBegin = NpyArr_iterBegin; pc->iterEnd = NpyArr_iterEnd; pc->iterNext = NpyArr_iterNext; @@ -1715,7 +1716,7 @@ PyObject* objToJSON(PyObject* self, PyObject *args, PyObject *kwargs) PyObject *oencodeHTMLChars = NULL; char *sOrient = NULL; char *sdateFormat = NULL; - PyObject *oisoDates = 0; + PyObject *oisoDates = 0; PyObjectEncoder pyEncoder = { @@ -1765,11 +1766,11 @@ PyObject* objToJSON(PyObject* self, PyObject *args, PyObject *kwargs) encoder->encodeHTMLChars = 1; } - if (idoublePrecision > JSON_DOUBLE_MAX_DECIMALS || idoublePrecision < 0) + if (idoublePrecision > JSON_DOUBLE_MAX_DECIMALS || idoublePrecision < 0) { PyErr_Format ( - PyExc_ValueError, - "Invalid value '%d' for option 'double_precision', max is '%u'", + PyExc_ValueError, + "Invalid value '%d' for option 'double_precision', max is '%u'", idoublePrecision, JSON_DOUBLE_MAX_DECIMALS); return NULL; @@ -1821,7 +1822,7 @@ PyObject* objToJSON(PyObject* self, PyObject *args, PyObject *kwargs) { pyEncoder.datetimeUnit = PANDAS_FR_us; } - else + else if (strcmp(sdateFormat, "ns") == 0) { pyEncoder.datetimeUnit = PANDAS_FR_ns; diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index b104c70da9494..c3f4c8b3cd604 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -12,6 +12,7 @@ from pandas.core.api import DataFrame, Series, Panel, notnull import pandas.algos as algos import pandas.core.common as com +from pandas.core.common import _values_from_object from pandas.util.decorators import Substitution, Appender @@ -191,11 +192,11 @@ def _get_corr(a, b): def _flex_binary_moment(arg1, arg2, f): - if not (isinstance(arg1,(np.ndarray, DataFrame)) and - isinstance(arg1,(np.ndarray, DataFrame))): + if not (isinstance(arg1,(np.ndarray, Series, DataFrame)) and + isinstance(arg1,(np.ndarray, Series, DataFrame))): raise ValueError("arguments to moment function must be of type ndarray/DataFrame") - if isinstance(arg1, np.ndarray) and isinstance(arg2, np.ndarray): + if isinstance(arg1, (np.ndarray,Series)) and isinstance(arg2, (np.ndarray,Series)): X, Y = _prep_binary(arg1, arg2) return f(X, Y) elif isinstance(arg1, DataFrame): diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py index 2b8f6fc1601c8..2bf366f4dc8cb 100644 --- a/pandas/stats/ols.py +++ b/pandas/stats/ols.py @@ -65,14 +65,14 @@ def __init__(self, y, x, intercept=True, weights=None, nw_lags=None, if self._weights is not None: self._x_trans = self._x.mul(np.sqrt(self._weights), axis=0) self._y_trans = self._y * np.sqrt(self._weights) - self.sm_ols = sm.WLS(self._y.values, - self._x.values, + self.sm_ols = sm.WLS(self._y.get_values(), + self._x.get_values(), weights=self._weights.values).fit() else: self._x_trans = self._x self._y_trans = self._y - self.sm_ols = sm.OLS(self._y.values, - self._x.values).fit() + self.sm_ols = sm.OLS(self._y.get_values(), + self._x.get_values()).fit() def _prepare_data(self): """ @@ -97,6 +97,9 @@ def _prepare_data(self): filt_rhs['intercept'] = 1. pre_filt_rhs['intercept'] = 1. + if hasattr(filt_weights,'to_dense'): + filt_weights = filt_weights.to_dense() + return (filt_lhs, filt_rhs, filt_weights, pre_filt_rhs, index, valid) @@ -1301,8 +1304,11 @@ def _filter_data(lhs, rhs, weights=None): filt_lhs = combined.pop('__y__') filt_rhs = combined - return (filt_lhs, filt_rhs, filt_weights, - pre_filt_rhs, index, valid) + if hasattr(filt_weights,'to_dense'): + filt_weights = filt_weights.to_dense() + + return (filt_lhs.to_dense(), filt_rhs.to_dense(), filt_weights, + pre_filt_rhs.to_dense(), index, valid) def _combine_rhs(rhs): diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py index 697425c8e0fcf..a2271731b6de9 100644 --- a/pandas/stats/tests/test_ols.py +++ b/pandas/stats/tests/test_ols.py @@ -98,9 +98,10 @@ def testOLSWithDatasets_scotland(self): def testWLS(self): # WLS centered SS changed (fixed) in 0.5.0 - if sm.version.version < '0.5.0': - raise nose.SkipTest - + v = sm.version.version.split('.') + if int(v[0]) >= 0 and int(v[1]) <= 5: + if int(v[2]) < 1: + raise nose.SkipTest print( "Make sure you're using statsmodels 0.5.0.dev-cec4f26 or later.") X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D']) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7043698ea6476..b84115bd3e6b4 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -407,6 +407,16 @@ def test_setitem(self): self.frame[dtype] = np.array(arr,dtype=dtype) self.assert_(self.frame[dtype].dtype.name == dtype) + # dtype changing GH4204 + df = DataFrame([[0,0]]) + df.iloc[0] = np.nan + expected = DataFrame([[np.nan,np.nan]]) + assert_frame_equal(df,expected) + + df = DataFrame([[0,0]]) + df.loc[0] = np.nan + assert_frame_equal(df,expected) + def test_setitem_tuple(self): self.frame['A', 'B'] = self.frame['A'] assert_series_equal(self.frame['A', 'B'], self.frame['A']) @@ -500,7 +510,7 @@ def test_setitem_boolean_column(self): mask = self.frame['A'] > 0 self.frame.ix[mask, 'B'] = 0 - expected.values[mask, 1] = 0 + expected.values[mask.values, 1] = 0 assert_frame_equal(self.frame, expected) @@ -1041,6 +1051,7 @@ def test_getitem_fancy_1d(self): assert_series_equal(xs, exp) def test_setitem_fancy_1d(self): + # case 1: set cross-section for indices frame = self.frame.copy() expected = self.frame.copy() @@ -1142,13 +1153,13 @@ def test_setitem_fancy_boolean(self): mask = frame['A'] > 0 frame.ix[mask] = 0. - expected.values[mask] = 0. + expected.values[mask.values] = 0. assert_frame_equal(frame, expected) frame = self.frame.copy() expected = self.frame.copy() frame.ix[mask, ['A', 'B']] = 0. - expected.values[mask, :2] = 0. + expected.values[mask.values, :2] = 0. assert_frame_equal(frame, expected) def test_getitem_fancy_ints(self): @@ -2738,11 +2749,36 @@ def test_constructor_Series_named(self): self.assert_(df.columns[0] == 'x') self.assert_(df.index.equals(a.index)) + # ndarray like + arr = np.random.randn(10) + s = Series(arr,name='x') + df = DataFrame(s) + expected = DataFrame(dict(x = s)) + assert_frame_equal(df,expected) + + s = Series(arr,index=range(3,13)) + df = DataFrame(s) + expected = DataFrame({ 0 : s }) + assert_frame_equal(df,expected) + + self.assertRaises(ValueError, DataFrame, s, columns=[1,2]) + # #2234 a = Series([], name='x') df = DataFrame(a) self.assert_(df.columns[0] == 'x') + # series with name and w/o + s1 = Series(arr,name='x') + df = DataFrame([s1, arr]).T + expected = DataFrame({ 'x' : s1, 'Unnamed 0' : arr },columns=['x','Unnamed 0']) + assert_frame_equal(df,expected) + + # this is a bit non-intuitive here; the series collapse down to arrays + df = DataFrame([arr, s1]).T + expected = DataFrame({ 1 : s1, 0 : arr },columns=[0,1]) + assert_frame_equal(df,expected) + def test_constructor_Series_differently_indexed(self): # name s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x') @@ -3026,8 +3062,8 @@ def test_constructor_with_datetimes(self): index=np.arange(10)) result = df.get_dtype_counts() expected = Series({'int64': 1, datetime64name: 2, objectname : 2}) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) # check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 ndarray with a dtype specified) @@ -3046,16 +3082,16 @@ def test_constructor_with_datetimes(self): expected['float64'] = 1 expected[floatname] = 1 - result.sort() + result.sort_index() expected = Series(expected) - expected.sort() + expected.sort_index() assert_series_equal(result, expected) # check with ndarray construction ndim>0 df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array([1.]*10,dtype=floatname), intname : np.array([1]*10,dtype=intname)}, index=np.arange(10)) result = df.get_dtype_counts() - result.sort() + result.sort_index() assert_series_equal(result, expected) # GH 2809 @@ -3066,8 +3102,8 @@ def test_constructor_with_datetimes(self): df = DataFrame({'datetime_s':datetime_s}) result = df.get_dtype_counts() expected = Series({ datetime64name : 1 }) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) # GH 2810 @@ -3077,8 +3113,8 @@ def test_constructor_with_datetimes(self): df = DataFrame({'datetimes': datetimes, 'dates':dates}) result = df.get_dtype_counts() expected = Series({ datetime64name : 1, objectname : 1 }) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) def test_constructor_for_list_with_dtypes(self): @@ -3139,8 +3175,8 @@ def test_constructor_for_list_with_dtypes(self): 'e' : [1.,2,4.,7]}) result = df.get_dtype_counts() expected = Series({'int64': 1, 'float64' : 2, datetime64name: 1, objectname : 1}) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) def test_not_hashable(self): @@ -3328,27 +3364,17 @@ def test_astype_with_exclude_string(self): def test_astype_with_view(self): tf = self.mixed_float.reindex(columns = ['A','B','C']) - self.assertRaises(TypeError, self.frame.astype, np.int32, copy = False) - self.assertRaises(TypeError, tf, np.int32, copy = False) - - self.assertRaises(TypeError, tf, np.int64, copy = False) casted = tf.astype(np.int64) - self.assertRaises(TypeError, tf, np.float32, copy = False) casted = tf.astype(np.float32) # this is the only real reason to do it this way tf = np.round(self.frame).astype(np.int32) casted = tf.astype(np.float32, copy = False) - #self.assert_(casted.values.data == tf.values.data) tf = self.frame.astype(np.float64) casted = tf.astype(np.int64, copy = False) - #self.assert_(casted.values.data == tf.values.data) - - # can't view to an object array - self.assertRaises(Exception, self.frame.astype, 'O', copy = False) def test_astype_cast_nan_int(self): df = DataFrame(data={"Values": [1.0, 2.0, 3.0, np.nan]}) @@ -5713,10 +5739,13 @@ def test_as_matrix_duplicates(self): self.assertTrue(np.array_equal(result, expected)) - def test_as_blocks(self): + def test_ftypes(self): frame = self.mixed_float - mat = frame.blocks - self.assert_(set([ x.name for x in frame.dtypes.values ]) == set(mat.keys())) + expected = Series(dict(A = 'float32:dense', B = 'float32:dense', C = 'float16:dense', D = 'float64:dense')) + expected.sort() + result = frame.ftypes + result.sort() + assert_series_equal(result,expected) def test_values(self): self.frame.values[:, 0] = 5. @@ -6227,6 +6256,16 @@ def test_fillna(self): df.x.fillna(method=m,inplace=1) df.x.fillna(method=m) + # with different dtype (GH3386) + df = DataFrame([['a','a',np.nan,'a'],['b','b',np.nan,'b'],['c','c',np.nan,'c']]) + + result = df.fillna({ 2: 'foo' }) + expected = DataFrame([['a','a','foo','a'],['b','b','foo','b'],['c','c','foo','c']]) + assert_frame_equal(result, expected) + + df.fillna({ 2: 'foo' }, inplace=True) + assert_frame_equal(df, expected) + def test_ffill(self): self.tsframe['A'][:5] = nan self.tsframe['A'][-5:] = nan @@ -7322,6 +7361,11 @@ def test_reindex(self): newFrame = self.frame.reindex(list(self.ts1.index)) self.assert_(newFrame.index.equals(self.ts1.index)) + # copy with no axes + result = self.frame.reindex() + assert_frame_equal(result,self.frame) + self.assert_((result is self.frame) == False) + def test_reindex_name_remains(self): s = Series(random.rand(10)) df = DataFrame(s, index=np.arange(len(s))) @@ -7410,6 +7454,7 @@ def test_reindex_fill_value(self): assert_frame_equal(result, expected) def test_align(self): + af, bf = self.frame.align(self.frame) self.assert_(af._data is not self.frame._data) @@ -7584,15 +7629,14 @@ def _check_get(df, cond, check_dtypes = True): other1 = _safe_add(df) rs = df.where(cond, other1) rs2 = df.where(cond.values, other1) - for k, v in compat.iteritems(rs): - assert_series_equal(v, np.where(cond[k], df[k], other1[k])) + for k, v in rs.iteritems(): + assert_series_equal(v, Series(np.where(cond[k], df[k], other1[k]),index=v.index)) assert_frame_equal(rs, rs2) # dtypes if check_dtypes: self.assert_((rs.dtypes == df.dtypes).all() == True) - # check getting for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]: cond = df > 0 @@ -8174,8 +8218,8 @@ def test_apply_yield_list(self): def test_apply_reduce_Series(self): self.frame.ix[::2, 'A'] = np.nan - result = self.frame.apply(np.mean, axis=1) expected = self.frame.mean(1) + result = self.frame.apply(np.mean, axis=1) assert_series_equal(result, expected) def test_apply_differently_indexed(self): @@ -8313,11 +8357,20 @@ def test_applymap(self): def test_filter(self): # items - filtered = self.frame.filter(['A', 'B', 'E']) self.assertEqual(len(filtered.columns), 2) self.assert_('E' not in filtered) + filtered = self.frame.filter(['A', 'B', 'E'], axis='columns') + self.assertEqual(len(filtered.columns), 2) + self.assert_('E' not in filtered) + + # other axis + idx = self.frame.index[0:4] + filtered = self.frame.filter(idx, axis='index') + expected = self.frame.reindex(index=idx) + assert_frame_equal(filtered,expected) + # like fcopy = self.frame.copy() fcopy['AA'] = 1 @@ -8932,8 +8985,8 @@ def test_get_numeric_data(self): index=np.arange(10)) result = df.get_dtype_counts() expected = Series({'int64': 1, 'float64' : 1, datetime64name: 1, objectname : 1}) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', @@ -9185,7 +9238,7 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, if has_skipna: def skipna_wrapper(x): - nona = x.dropna().values + nona = x.dropna() if len(nona) == 0: return np.nan return alternative(nona) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index a5f98107895a5..ae81752c11b29 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1348,7 +1348,6 @@ def test_get_loc_level(self): def test_slice_locs(self): df = tm.makeTimeDataFrame() stacked = df.stack() - idx = stacked.index slob = slice(*idx.slice_locs(df.index[5], df.index[15])) diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index 57827857e107a..07436236a62de 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -5,6 +5,7 @@ import numpy as np from pandas import Index, MultiIndex, DataFrame, Series +from pandas.sparse.array import SparseArray from pandas.core.internals import * import pandas.core.internals as internals import pandas.util.testing as tm @@ -24,7 +25,7 @@ def assert_block_equal(left, right): def get_float_mat(n, k, dtype): return np.repeat(np.atleast_2d(np.arange(k, dtype=dtype)), n, axis=0) -TEST_COLS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] +TEST_COLS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 's1', 's2'] N = 10 @@ -44,7 +45,6 @@ def get_obj_ex(cols=['b', 'd']): mat[:, 1] = 'bar' return make_block(mat.T, cols, TEST_COLS) - def get_bool_ex(cols=['f']): mat = np.ones((N, 1), dtype=bool) return make_block(mat.T, cols, TEST_COLS) @@ -59,6 +59,14 @@ def get_dt_ex(cols=['h']): mat = randn(N, 1).astype(int).astype('M8[ns]') return make_block(mat.T, cols, TEST_COLS) +def get_sparse_ex1(): + sa1 = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + return make_block(sa1, ['s1'], TEST_COLS) + +def get_sparse_ex2(): + sa2 = SparseArray([0, 0, 2, 3, 4, 0, 6, 7, 0, 8], fill_value=0) + return make_block(sa2, ['s2'], TEST_COLS) + def create_blockmanager(blocks): l = [] for b in blocks: @@ -67,9 +75,19 @@ def create_blockmanager(blocks): for b in blocks: b.ref_items = items - index_sz = blocks[0].values.shape[1] + index_sz = blocks[0].shape[1] return BlockManager(blocks, [items, np.arange(index_sz)]) +def create_singleblockmanager(blocks): + l = [] + for b in blocks: + l.extend(b.items) + items = Index(l) + for b in blocks: + b.ref_items = items + + return SingleBlockManager(blocks, [items]) + class TestBlock(unittest.TestCase): _multiprocess_can_split_ = True @@ -344,8 +362,27 @@ def test_set_change_dtype(self): def test_copy(self): shallow = self.mgr.copy(deep=False) - for cp_blk, blk in zip(shallow.blocks, self.mgr.blocks): - self.assert_(cp_blk.values is blk.values) + # we don't guaranteee block ordering + for blk in self.mgr.blocks: + found = False + for cp_blk in shallow.blocks: + if cp_blk.values is blk.values: + found = True + break + self.assert_(found == True) + + def test_sparse(self): + mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2()]) + + # what to test here? + self.assert_(mgr.as_matrix().dtype == np.float64) + + def test_sparse_mixed(self): + mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2(),get_float_ex()]) + self.assert_(len(mgr.blocks) == 3) + self.assert_(isinstance(mgr,BlockManager)) + + # what to test here? def test_as_matrix_float(self): @@ -531,15 +568,15 @@ def test_get_numeric_data(self): assert_frame_equal(xp, rs) xp = DataFrame({'bool': bool_ser}) - rs = DataFrame(df._data.get_numeric_data(type_list=bool)) + rs = DataFrame(df._data.get_bool_data()) assert_frame_equal(xp, rs) - rs = DataFrame(df._data.get_numeric_data(type_list=bool)) + rs = DataFrame(df._data.get_bool_data()) df.ix[0, 'bool'] = not df.ix[0, 'bool'] self.assertEqual(rs.ix[0, 'bool'], df.ix[0, 'bool']) - rs = DataFrame(df._data.get_numeric_data(type_list=bool, copy=True)) + rs = DataFrame(df._data.get_bool_data(copy=True)) df.ix[0, 'bool'] = not df.ix[0, 'bool'] self.assertEqual(rs.ix[0, 'bool'], not df.ix[0, 'bool']) diff --git a/pandas/tests/test_ndframe.py b/pandas/tests/test_ndframe.py index d5d50359b67e8..edafeb64af98e 100644 --- a/pandas/tests/test_ndframe.py +++ b/pandas/tests/test_ndframe.py @@ -14,21 +14,6 @@ def setUp(self): tdf = t.makeTimeDataFrame() self.ndf = NDFrame(tdf._data) - def test_constructor(self): - # with cast - ndf = NDFrame(self.ndf._data, dtype=np.int64) - self.assert_(ndf.values.dtype == np.int64) - - def test_ndim(self): - self.assertEquals(self.ndf.ndim, 2) - - def test_astype(self): - casted = self.ndf.astype(int) - self.assert_(casted.values.dtype == np.int_) - - casted = self.ndf.astype(np.int32) - self.assert_(casted.values.dtype == np.int32) - def test_squeeze(self): # noop for s in [ t.makeFloatSeries(), t.makeStringSeries(), t.makeObjectSeries() ]: diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index c5f9f962f4646..430e5df839e18 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1029,11 +1029,14 @@ def test_reindex(self): major=self.panel.major_axis, minor=self.panel.minor_axis) - assert(result.items is self.panel.items) - assert(result.major_axis is self.panel.major_axis) - assert(result.minor_axis is self.panel.minor_axis) + self.assert_(result.items is self.panel.items) + self.assert_(result.major_axis is self.panel.major_axis) + self.assert_(result.minor_axis is self.panel.minor_axis) - self.assertRaises(Exception, self.panel.reindex) + # this ok + result = self.panel.reindex() + assert_panel_equal(result,self.panel) + self.assert_((result is self.panel) == False) # with filling smaller_major = self.panel.major_axis[::5] @@ -1047,7 +1050,8 @@ def test_reindex(self): # don't necessarily copy result = self.panel.reindex(major=self.panel.major_axis, copy=False) - self.assert_(result is self.panel) + assert_panel_equal(result,self.panel) + self.assert_((result is self.panel) == False) def test_reindex_like(self): # reindex_like @@ -1161,8 +1165,10 @@ def test_swapaxes(self): result = self.panel.swapaxes(0, 1) self.assert_(result.items is self.panel.major_axis) - # this should not work - self.assertRaises(Exception, self.panel.swapaxes, 'items', 'items') + # this works, but return a copy + result = self.panel.swapaxes('items', 'items') + assert_panel_equal(self.panel,result) + self.assert_(id(self.panel) != id(result)) def test_transpose(self): result = self.panel.transpose('minor', 'major', 'items') @@ -1190,7 +1196,7 @@ def test_transpose(self): maj='major', majo='items') # test invalid kwargs - self.assertRaises(KeyError, self.panel.transpose, 'minor', + self.assertRaises(AssertionError, self.panel.transpose, 'minor', maj='major', minor='items') result = self.panel.transpose(2, 1, 0) @@ -1788,15 +1794,18 @@ def test_pivot(self): def test_monotonic(): pos = np.array([1, 2, 3, 5]) - assert panelm._monotonic(pos) + def _monotonic(arr): + return not (arr[1:] < arr[:-1]).any() + + assert _monotonic(pos) neg = np.array([1, 2, 3, 4, 3]) - assert not panelm._monotonic(neg) + assert not _monotonic(neg) neg2 = np.array([5, 1, 2, 3, 4, 5]) - assert not panelm._monotonic(neg2) + assert not _monotonic(neg2) def test_panel_index(): diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index eddddb42b680e..add8ebf73f85f 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -767,7 +767,10 @@ def test_reindex(self): assert(result.major_axis is self.panel4d.major_axis) assert(result.minor_axis is self.panel4d.minor_axis) - self.assertRaises(Exception, self.panel4d.reindex) + # don't necessarily copy + result = self.panel4d.reindex() + assert_panel4d_equal(result,self.panel4d) + self.assert_((result is self.panel4d) == False) # with filling smaller_major = self.panel4d.major_axis[::5] @@ -782,7 +785,8 @@ def test_reindex(self): # don't necessarily copy result = self.panel4d.reindex( major=self.panel4d.major_axis, copy=False) - self.assert_(result is self.panel4d) + assert_panel4d_equal(result,self.panel4d) + self.assert_((result is self.panel4d) == False) def test_not_hashable(self): p4D_empty = Panel4D() @@ -883,8 +887,10 @@ def test_swapaxes(self): result = self.panel4d.swapaxes(0, 1) self.assert_(result.labels is self.panel4d.items) - # this should also work - self.assertRaises(Exception, self.panel4d.swapaxes, 'items', 'items') + # this works, but return a copy + result = self.panel4d.swapaxes('items', 'items') + assert_panel4d_equal(self.panel4d,result) + self.assert_(id(self.panel4d) != id(result)) def test_to_frame(self): raise nose.SkipTest diff --git a/pandas/tests/test_panelnd.py b/pandas/tests/test_panelnd.py index e195839242f55..3c86998c5630a 100644 --- a/pandas/tests/test_panelnd.py +++ b/pandas/tests/test_panelnd.py @@ -29,11 +29,11 @@ def test_4d_construction(self): # create a 4D Panel4D = panelnd.create_nd_panel_factory( klass_name='Panel4D', - axis_orders=['labels', 'items', 'major_axis', 'minor_axis'], - axis_slices={'items': 'items', 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['labels', 'items', 'major_axis', 'minor_axis'], + slices={'items': 'items', 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer=Panel, - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2) p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) @@ -43,11 +43,11 @@ def test_4d_construction_alt(self): # create a 4D Panel4D = panelnd.create_nd_panel_factory( klass_name='Panel4D', - axis_orders=['labels', 'items', 'major_axis', 'minor_axis'], - axis_slices={'items': 'items', 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['labels', 'items', 'major_axis', 'minor_axis'], + slices={'items': 'items', 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer='Panel', - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2) p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) @@ -58,14 +58,14 @@ def test_4d_construction_error(self): self.assertRaises(Exception, panelnd.create_nd_panel_factory, klass_name='Panel4D', - axis_orders=['labels', 'items', 'major_axis', - 'minor_axis'], - axis_slices={'items': 'items', - 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['labels', 'items', 'major_axis', + 'minor_axis'], + slices={'items': 'items', + 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer='foo', - axis_aliases={'major': 'major_axis', - 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', + 'minor': 'minor_axis'}, stat_axis=2) def test_5d_construction(self): @@ -73,11 +73,11 @@ def test_5d_construction(self): # create a 4D Panel4D = panelnd.create_nd_panel_factory( klass_name='Panel4D', - axis_orders=['labels1', 'items', 'major_axis', 'minor_axis'], - axis_slices={'items': 'items', 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['labels1', 'items', 'major_axis', 'minor_axis'], + slices={'items': 'items', 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer=Panel, - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2) p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) @@ -85,13 +85,13 @@ def test_5d_construction(self): # create a 5D Panel5D = panelnd.create_nd_panel_factory( klass_name='Panel5D', - axis_orders=['cool1', 'labels1', 'items', 'major_axis', - 'minor_axis'], - axis_slices={'labels1': 'labels1', 'items': 'items', - 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['cool1', 'labels1', 'items', 'major_axis', + 'minor_axis'], + slices={'labels1': 'labels1', 'items': 'items', + 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer=Panel4D, - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2) p5d = Panel5D(dict(C1=p4d)) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 9a959fa789e05..9d6311b7e2118 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -13,7 +13,7 @@ import numpy.ma as ma import pandas as pd -from pandas import (Index, Series, TimeSeries, DataFrame, isnull, notnull, +from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range, date_range) from pandas.core.index import MultiIndex from pandas.tseries.index import Timestamp, DatetimeIndex @@ -25,7 +25,7 @@ import pandas.core.datetools as datetools import pandas.core.nanops as nanops -from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict +from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long from pandas import compat from pandas.util.testing import (assert_series_equal, assert_almost_equal, @@ -282,29 +282,50 @@ def setUp(self): self.empty = Series([], index=[]) + def test_scalar_conversion(self): + + # Pass in scalar is disabled + scalar = Series(0.5) + self.assert_(not isinstance(scalar, float)) + + # coercion + self.assert_(float(Series([1.])) == 1.0) + self.assert_(int(Series([1.])) == 1) + self.assert_(long(Series([1.])) == 1) + + self.assert_(bool(Series([True])) == True) + self.assert_(bool(Series([False])) == False) + + self.assert_(bool(Series([True,True])) == True) + self.assert_(bool(Series([False,True])) == True) + + def test_astype(self): + s = Series(np.random.randn(5),name='foo') + + for dtype in ['float32','float64','int64','int32']: + astyped = s.astype(dtype) + self.assert_(astyped.dtype == dtype) + self.assert_(astyped.name == s.name) + def test_constructor(self): # Recognize TimeSeries - tm.assert_isinstance(self.ts, TimeSeries) + self.assert_(self.ts.is_time_series == True) # Pass in Series derived = Series(self.ts) - tm.assert_isinstance(derived, TimeSeries) + self.assert_(derived.is_time_series == True) self.assert_(tm.equalContents(derived.index, self.ts.index)) # Ensure new index is not created self.assertEquals(id(self.ts.index), id(derived.index)) - # Pass in scalar - scalar = Series(0.5) - tm.assert_isinstance(scalar, float) - # Mixed type Series mixed = Series(['hello', np.NaN], index=[0, 1]) self.assert_(mixed.dtype == np.object_) self.assert_(mixed[1] is np.NaN) - self.assert_(not isinstance(self.empty, TimeSeries)) - self.assert_(not isinstance(Series({}), TimeSeries)) + self.assert_(not self.empty.is_time_series) + self.assert_(not Series({}).is_time_series) self.assertRaises(Exception, Series, np.random.randn(3, 3), index=np.arange(3)) @@ -580,7 +601,7 @@ def test_setindex(self): # wrong length series = self.series.copy() - self.assertRaises(AssertionError, setattr, series, 'index', + self.assertRaises(Exception, setattr, series, 'index', np.arange(len(series) - 1)) # works @@ -726,6 +747,7 @@ def test_getitem_generator(self): def test_getitem_boolean_object(self): # using column from DataFrame + s = self.series mask = s > s.median() omask = mask.astype(object) @@ -736,10 +758,11 @@ def test_getitem_boolean_object(self): assert_series_equal(result, expected) # setitem + s2 = s.copy() cop = s.copy() cop[omask] = 5 - s[mask] = 5 - assert_series_equal(cop, s) + s2[mask] = 5 + assert_series_equal(cop, s2) # nans raise exception omask[5:10] = np.nan @@ -749,11 +772,18 @@ def test_getitem_boolean_object(self): def test_getitem_setitem_boolean_corner(self): ts = self.ts mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median() + + # these used to raise...?? + self.assertRaises(Exception, ts.__getitem__, mask_shifted) self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1) + #ts[mask_shifted] + #ts[mask_shifted] = 1 self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted) self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1) + #ts.ix[mask_shifted] + #ts.ix[mask_shifted] = 2 def test_getitem_setitem_slice_integers(self): s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16]) @@ -913,6 +943,32 @@ def test_setitem(self): self.assertRaises(Exception, self.series.__setitem__, 'foobar', 1) + def test_setitem_dtypes(self): + + # change dtypes + # GH 4463 + expected = Series([np.nan,2,3]) + + s = Series([1,2,3]) + s.iloc[0] = np.nan + assert_series_equal(s,expected) + + s = Series([1,2,3]) + s.loc[0] = np.nan + assert_series_equal(s,expected) + + s = Series([1,2,3]) + s[0] = np.nan + assert_series_equal(s,expected) + + s = Series([False]) + s.loc[0] = np.nan + assert_series_equal(s,Series([np.nan])) + + s = Series([False,True]) + s.loc[0] = np.nan + assert_series_equal(s,Series([np.nan,1.0])) + def test_set_value(self): idx = self.ts.index[10] res = self.ts.set_value(idx, 0) @@ -1099,8 +1155,18 @@ def test_where(self): assert(s.shape == rs.shape) assert(rs is not s) - rs = s.where(cond[:3], -s) - assert_series_equal(rs, s.abs()[:3].append(s[3:])) + # test alignment + cond = Series([True,False,False,True,False],index=s.index) + s2 = -(s.abs()) + + expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index) + rs = s2.where(cond[:3]) + assert_series_equal(rs, expected) + + expected = s2.abs() + expected.ix[0] = s2[0] + rs = s2.where(cond[:3], -s2) + assert_series_equal(rs, expected) self.assertRaises(ValueError, s.where, 1) self.assertRaises(ValueError, s.where, cond[:3].values, -s) @@ -1160,7 +1226,18 @@ def test_where(self): s = Series(np.arange(10)) mask = s > 5 - self.assertRaises(ValueError, s.__setitem__, mask, ([0] * 5,)) + def f(): + s[mask] = [5,4,3,2,1] + self.assertRaises(ValueError, f) + def f(): + s[mask] = [0] * 5 + self.assertRaises(ValueError, f) + + # dtype changes + s = Series([1,2,3,4]) + result = s.where(s>2,np.nan) + expected = Series([np.nan,np.nan,3,4]) + assert_series_equal(result, expected) def test_where_broadcast(self): # Test a variety of differently sized series @@ -1461,7 +1538,7 @@ def test_median(self): self._check_stat_op('median', np.median) # test with integers, test failure - int_ts = TimeSeries(np.ones(10, dtype=int), index=lrange(10)) + int_ts = Series(np.ones(10, dtype=int), index=lrange(10)) self.assertAlmostEqual(np.median(int_ts), int_ts.median()) def test_prod(self): @@ -1568,7 +1645,11 @@ def test_cummax(self): self.assert_(np.array_equal(result, expected)) def test_npdiff(self): + raise nose.SkipTest + + # no longer works as the return type of np.diff is now nd.array s = Series(np.arange(5)) + r = np.diff(s) assert_series_equal(Series([nan, 0, 0, 0, nan]), r) @@ -2494,6 +2575,61 @@ def test_idxmax(self): result = s.idxmax() self.assert_(result == 4) + def test_ndarray_compat(self): + + # test numpy compat with Series as sub-class of NDFrame + tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'], + index=date_range('1/1/2000', periods=1000)) + + def f(x): + return x[x.argmax()] + result = tsdf.apply(f) + expected = tsdf.max() + assert_series_equal(result,expected) + + # .item() + s = Series([1]) + result = s.item() + self.assert_(result == 1) + self.assert_(s.item() == s.iloc[0]) + + # using an ndarray like function + s = Series(np.random.randn(10)) + result = np.ones_like(s) + expected = Series(1,index=range(10),dtype='float64') + #assert_series_equal(result,expected) + + def test_underlying_data_conversion(self): + + # GH 4080 + df = DataFrame(dict((c, [1,2,3]) for c in ['a', 'b', 'c'])) + df.set_index(['a', 'b', 'c'], inplace=True) + s = Series([1], index=[(2,2,2)]) + df['val'] = 0 + df + df['val'].update(s) + + expected = DataFrame(dict(a = [1,2,3], b = [1,2,3], c = [1,2,3], val = [0,1,0])) + expected.set_index(['a', 'b', 'c'], inplace=True) + tm.assert_frame_equal(df,expected) + + # GH 3970 + df = DataFrame({ "aa":range(5), "bb":[2.2]*5}) + df["cc"] = 0.0 + ck = [True]*len(df) + df["bb"].iloc[0] = .13 + df_tmp = df.iloc[ck] + df["bb"].iloc[0] = .15 + self.assert_(df['bb'].iloc[0] == 0.15) + + # GH 3217 + df = DataFrame(dict(a = [1,3], b = [np.nan, 2])) + df['c'] = np.nan + df['c'].update(pd.Series(['foo'],index=[0])) + + expected = DataFrame(dict(a = [1,3], b = [np.nan, 2], c = ['foo',np.nan])) + tm.assert_frame_equal(df,expected) + def test_operators_corner(self): series = self.ts @@ -2879,6 +3015,10 @@ def test_unique(self): expected = np.array([1, 2, 3, None], dtype=object) self.assert_(np.array_equal(result, expected)) + def test_dropna_empty(self): + s = Series([]) + self.assert_(len(s.dropna()) == 0) + def test_drop_duplicates(self): s = Series([1, 2, 3, 3]) @@ -2964,7 +3104,8 @@ def test_rank(self): mask = np.isnan(self.ts) filled = self.ts.fillna(np.inf) - exp = rankdata(filled) + # rankdata returns a ndarray + exp = Series(rankdata(filled),index=filled.index) exp[mask] = np.nan assert_almost_equal(ranks, exp) @@ -4050,19 +4191,19 @@ def test_preserveRefs(self): self.assertFalse(np.isnan(self.ts[10])) def test_ne(self): - ts = TimeSeries([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) + ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) expected = [True, True, False, True, True] self.assert_(tm.equalContents(ts.index != 5, expected)) self.assert_(tm.equalContents(~(ts.index == 5), expected)) def test_pad_nan(self): - x = TimeSeries([np.nan, 1., np.nan, 3., np.nan], - ['z', 'a', 'b', 'c', 'd'], dtype=float) + x = Series([np.nan, 1., np.nan, 3., np.nan], + ['z', 'a', 'b', 'c', 'd'], dtype=float) x.fillna(method='pad', inplace=True) - expected = TimeSeries([np.nan, 1.0, 1.0, 3.0, 3.0], - ['z', 'a', 'b', 'c', 'd'], dtype=float) + expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], + ['z', 'a', 'b', 'c', 'd'], dtype=float) assert_series_equal(x[1:], expected[1:]) self.assert_(np.isnan(x[0]), np.isnan(expected[0])) @@ -4246,16 +4387,6 @@ def test_replace(self): rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3]) assert_series_equal(rs, rs2) - # replace with forward fill not considering np.nan missing - s2 = ser.copy() - s2[5] = np.nan - rs3 = s2.replace(['foo', 'bar']) - self.assert_(isnull(rs3[6])) - - # replace with back fill considering np.nan as missing - rs4 = ser.replace([np.nan, 'foo', 'bar'], method='bfill') - assert_almost_equal(rs4[4], ser[5]) - # replace inplace ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True) @@ -4277,7 +4408,7 @@ def test_replace(self): # malformed self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0]) - self.assertRaises(ValueError, ser.replace, range(1, 3), [np.nan, 0]) + self.assertRaises(TypeError, ser.replace, range(1, 3), [np.nan, 0]) ser = Series([0, 1, 2, 3, 4]) result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0]) @@ -4559,13 +4690,13 @@ def test_set_index_makes_timeseries(self): s = Series(lrange(10)) s.index = idx - self.assertTrue(isinstance(s, TimeSeries)) + self.assertTrue(s.is_time_series == True) def test_timeseries_coercion(self): idx = tm.makeDateIndex(10000) ser = Series(np.random.randn(len(idx)), idx.astype(object)) - tm.assert_isinstance(ser, TimeSeries) - tm.assert_isinstance(ser.index, DatetimeIndex) + self.assert_(ser.is_time_series == True) + self.assert_(isinstance(ser.index, DatetimeIndex)) def test_replace(self): N = 100 @@ -4595,16 +4726,6 @@ def test_replace(self): rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3]) assert_series_equal(rs, rs2) - # replace with forward fill not considering np.nan missing - s2 = ser.copy() - s2[5] = np.nan - rs3 = s2.replace(['foo', 'bar']) - self.assert_(isnull(rs3[6])) - - # replace with back fill considering np.nan as missing - rs4 = ser.replace([np.nan, 'foo', 'bar'], method='bfill') - assert_almost_equal(rs4[4], ser[5]) - # replace inplace ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True) self.assert_((ser[:5] == -1).all()) diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index c1d8a0d876866..765dbc07b464f 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -17,8 +17,7 @@ from pandas.core.internals import (IntBlock, BoolBlock, BlockManager, make_block, _consolidate) from pandas.util.decorators import cache_readonly, Appender, Substitution -from pandas.core.common import PandasError -from pandas.sparse.frame import SparseDataFrame +from pandas.core.common import PandasError, ABCSeries import pandas.core.common as com import pandas.lib as lib @@ -305,8 +304,8 @@ def _get_merge_keys(self): left_drop = [] left, right = self.left, self.right - is_lkey = lambda x: isinstance(x, np.ndarray) and len(x) == len(left) - is_rkey = lambda x: isinstance(x, np.ndarray) and len(x) == len(right) + is_lkey = lambda x: isinstance(x, (np.ndarray, ABCSeries)) and len(x) == len(left) + is_rkey = lambda x: isinstance(x, (np.ndarray, ABCSeries)) and len(x) == len(right) # ugh, spaghetti re #733 if _any(self.left_on) and _any(self.right_on): @@ -669,7 +668,7 @@ def _prepare_blocks(self): join_blocks = unit.get_upcasted_blocks() type_map = {} for blk in join_blocks: - type_map.setdefault(blk.dtype, []).append(blk) + type_map.setdefault(blk.ftype, []).append(blk) blockmaps.append((unit, type_map)) return blockmaps @@ -718,11 +717,11 @@ def _merge_blocks(self, merge_chunks): funit, fblock = merge_chunks[0] fidx = funit.indexer - out_shape = list(fblock.values.shape) + out_shape = list(fblock.get_values().shape) n = len(fidx) if fidx is not None else out_shape[self.axis] - out_shape[0] = sum(len(blk) for unit, blk in merge_chunks) + out_shape[0] = sum(blk.get_merge_length() for unit, blk in merge_chunks) out_shape[self.axis] = n # Should use Fortran order?? @@ -732,7 +731,7 @@ def _merge_blocks(self, merge_chunks): sofar = 0 for unit, blk in merge_chunks: out_chunk = out[sofar: sofar + len(blk)] - com.take_nd(blk.values, unit.indexer, self.axis, out=out_chunk) + com.take_nd(blk.get_values(), unit.indexer, self.axis, out=out_chunk) sofar += len(blk) # does not sort @@ -889,8 +888,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, class _Concatenator(object): """ - Orchestrates a concatenation operation for BlockManagers, with little hacks - to support sparse data structures, etc. + Orchestrates a concatenation operation for BlockManagers """ def __init__(self, objs, axis=0, join='outer', join_axes=None, @@ -943,7 +941,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, if isinstance(sample, DataFrame): axis = 1 if axis == 0 else 0 - self._is_series = isinstance(sample, Series) + self._is_series = isinstance(sample, ABCSeries) if not ((0 <= axis <= sample.ndim)): raise AssertionError() @@ -963,8 +961,9 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, def get_result(self): if self._is_series and self.axis == 0: - new_data = com._concat_compat([x.values for x in self.objs]) + new_data = com._concat_compat([x.get_values() for x in self.objs]) name = com._consensus_name_attr(self.objs) + new_data = self._post_merge(new_data) return Series(new_data, index=self.new_axes[0], name=name) elif self._is_series: data = dict(zip(range(len(self.objs)), self.objs)) @@ -975,20 +974,25 @@ def get_result(self): return tmpdf else: new_data = self._get_concatenated_data() + new_data = self._post_merge(new_data) return self.objs[0]._from_axes(new_data, self.new_axes) + def _post_merge(self, data): + if isinstance(data, BlockManager): + data = data.post_merge(self.objs) + return data + def _get_fresh_axis(self): return Index(np.arange(len(self._get_concat_axis()))) def _prepare_blocks(self): reindexed_data = self._get_reindexed_data() + # we are consolidating as we go, so just add the blocks, no-need for dtype mapping blockmaps = [] for data in reindexed_data: data = data.consolidate() - - type_map = dict((blk.dtype, blk) for blk in data.blocks) - blockmaps.append(type_map) + blockmaps.append(data.get_block_map(typ='dict')) return blockmaps, reindexed_data def _get_concatenated_data(self): @@ -997,9 +1001,15 @@ def _get_concatenated_data(self): kinds = _get_all_block_kinds(blockmaps) try: + # need to conform to same other (joined) axes for block join new_blocks = [] for kind in kinds: - klass_blocks = [mapping.get(kind) for mapping in blockmaps] + klass_blocks = [] + for mapping in blockmaps: + l = mapping.get(kind) + if l is None: + l = [ None ] + klass_blocks.extend(l) stacked_block = self._concat_blocks(klass_blocks) new_blocks.append(stacked_block) @@ -1010,8 +1020,10 @@ def _get_concatenated_data(self): blk.ref_items = self.new_axes[0] new_data = BlockManager(new_blocks, self.new_axes) + # Eventual goal would be to move everything to PandasError or other explicit error except (Exception, PandasError): # EAFP + # should not be possible to fail here for the expected reason with # axis = 0 if self.axis == 0: # pragma: no cover @@ -1027,22 +1039,20 @@ def _get_reindexed_data(self): # HACK: ugh reindexed_data = [] - if isinstance(self.objs[0], SparseDataFrame): - pass - else: - axes_to_reindex = list(enumerate(self.new_axes)) - axes_to_reindex.pop(self.axis) + axes_to_reindex = list(enumerate(self.new_axes)) + axes_to_reindex.pop(self.axis) - for obj in self.objs: - data = obj._data - for i, ax in axes_to_reindex: - data = data.reindex_axis(ax, axis=i, copy=False) - reindexed_data.append(data) + for obj in self.objs: + data = obj._data.prepare_for_merge() + for i, ax in axes_to_reindex: + data = data.reindex_axis(ax, axis=i, copy=False) + reindexed_data.append(data) return reindexed_data def _concat_blocks(self, blocks): - values_list = [b.values for b in blocks if b is not None] + + values_list = [b.get_values() for b in blocks if b is not None] concat_values = com._concat_compat(values_list, axis=self.axis) if self.axis > 0: @@ -1085,13 +1095,11 @@ def _concat_single_item(self, objs, item): all_values = [] dtypes = set() - # le sigh - if isinstance(self.objs[0], SparseDataFrame): - objs = [x._data for x in self.objs] - for data, orig in zip(objs, self.objs): if item in orig: values = data.get(item) + if hasattr(values,'to_dense'): + values = values.to_dense() dtypes.add(values.dtype) all_values.append(values) else: diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index 624f3ec41e1e5..9bca698cd4304 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -291,7 +291,7 @@ def _all_key(): def _convert_by(by): if by is None: by = [] - elif (np.isscalar(by) or isinstance(by, np.ndarray) + elif (np.isscalar(by) or isinstance(by, (np.ndarray, Series)) or hasattr(by, '__call__')): by = [by] else: diff --git a/pandas/tools/rplot.py b/pandas/tools/rplot.py index 5928472df1c22..1c3d17ee908cb 100644 --- a/pandas/tools/rplot.py +++ b/pandas/tools/rplot.py @@ -1,5 +1,6 @@ import random from copy import deepcopy +from pandas.core.common import _values_from_object import numpy as np from pandas.compat import range, zip @@ -498,7 +499,7 @@ def work(self, fig=None, ax=None): else: ax = fig.gca() x = self.data[self.aes['x']] - ax.hist(x, self.bins, facecolor=self.colour) + ax.hist(_values_from_object(x), self.bins, facecolor=self.colour) ax.set_xlabel(self.aes['x']) return fig, ax diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 285ff312bbf5a..7af1dd657267a 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -7,7 +7,8 @@ import numpy as np from pandas.core.common import (isnull, _NS_DTYPE, _INT64_DTYPE, - is_list_like,_possibly_cast_to_timedelta) + is_list_like,_possibly_cast_to_timedelta, + _values_from_object, _maybe_box) from pandas.core.index import Index, Int64Index import pandas.compat as compat from pandas.compat import u @@ -1157,12 +1158,10 @@ def get_value(self, series, key): know what you're doing """ if isinstance(key, datetime): - # needed to localize naive datetimes - stamp = Timestamp(key, tz=self.tz) - return self._engine.get_value(series, stamp) + return self.get_value_maybe_box(series, key) try: - return Index.get_value(self, series, key) + return _maybe_box(self, Index.get_value(self, series, key), series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -1175,11 +1174,19 @@ def get_value(self, series, key): return series.take(locs) try: - stamp = Timestamp(key, tz=self.tz) - return self._engine.get_value(series, stamp) - except (KeyError, ValueError): + return self.get_value_maybe_box(series, key) + except (TypeError, ValueError, KeyError): raise KeyError(key) + def get_value_maybe_box(self, series, key): + # needed to localize naive datetimes + if self.tz is not None: + key = Timestamp(key, tz=self.tz) + elif not isinstance(key, Timestamp): + key = Timestamp(key) + values = self._engine.get_value(_values_from_object(series), key) + return _maybe_box(self, values, series, key) + def get_loc(self, key): """ Get integer location for requested label @@ -1303,6 +1310,8 @@ def __getitem__(self, key): return self._simple_new(result, self.name, new_offset, self.tz) + _getitem_slice = __getitem__ + # Try to run function on index first, and then on elements of index # Especially important for group-by functionality def map(self, f): diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 2dfb6a0d3d723..bf9d7b2cf0b24 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -13,9 +13,9 @@ import pandas.tseries.frequencies as _freq_mod import pandas.core.common as com -from pandas.core.common import isnull, _NS_DTYPE, _INT64_DTYPE +from pandas.core.common import (isnull, _NS_DTYPE, _INT64_DTYPE, + _maybe_box, _values_from_object) from pandas import compat - from pandas.lib import Timestamp import pandas.lib as lib import pandas.tslib as tslib @@ -884,8 +884,9 @@ def get_value(self, series, key): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ + s = _values_from_object(series) try: - return super(PeriodIndex, self).get_value(series, key) + return _maybe_box(self, super(PeriodIndex, self).get_value(s, key), series, key) except (KeyError, IndexError): try: asdt, parsed, reso = parse_time_string(key, self.freq) @@ -907,15 +908,15 @@ def get_value(self, series, key): key = slice(pos[0], pos[1] + 1) return series[key] else: - key = Period(asdt, freq=self.freq) - return self._engine.get_value(series, key.ordinal) + key = Period(asdt, freq=self.freq).ordinal + return _maybe_box(self, self._engine.get_value(s, key), series, key) except TypeError: pass except KeyError: pass - key = Period(key, self.freq) - return self._engine.get_value(series, key.ordinal) + key = Period(key, self.freq).ordinal + return _maybe_box(self, self._engine.get_value(s, key), series, key) def get_loc(self, key): """ @@ -1052,6 +1053,8 @@ def __getitem__(self, key): return PeriodIndex(result, name=self.name, freq=self.freq) + _getitem_slice = __getitem__ + def _format_with_header(self, header, **kwargs): return header + self._format_native_types(**kwargs) diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 1b75961cb2721..357c64407dc49 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -564,20 +564,21 @@ def test_resample_median_bug_1688(self): tm.assert_frame_equal(result, exp) def test_how_lambda_functions(self): - ts = _simple_ts('1/1/2000', '4/1/2000') + ts = _simple_ts('1/1/2000', '4/1/2000') + result = ts.resample('M', how=lambda x: x.mean()) exp = ts.resample('M', how='mean') tm.assert_series_equal(result, exp) - + self.assertRaises(Exception, ts.resample, 'M', - how=[lambda x: x.mean(), lambda x: x.std()]) - + how=[lambda x: x.mean(), lambda x: x.std(ddof=1)]) + result = ts.resample('M', how={'foo': lambda x: x.mean(), - 'bar': lambda x: x.std()}) + 'bar': lambda x: x.std(ddof=1)}) foo_exp = ts.resample('M', how='mean') bar_exp = ts.resample('M', how='std') - + tm.assert_series_equal(result['foo'], foo_exp) tm.assert_series_equal(result['bar'], bar_exp) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 46402ad859b05..172172f667eca 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -1967,7 +1967,10 @@ def test_join_self(self): joined = index.join(index, how=kind) self.assert_(index is joined) -class TestLegacySupport(unittest.TestCase): +# infortunately, too much has changed to handle these legacy pickles +# class TestLegacySupport(unittest.TestCase): +class LegacySupport(object): + _multiprocess_can_split_ = True @classmethod @@ -2726,15 +2729,7 @@ def test_set_none_nan(self): def test_intercept_astype_object(self): # this test no longer makes sense as series is by default already M8[ns] - - # Work around NumPy 1.6 bugs - #result = self.series.astype(object) - #result2 = self.series.astype('O') - - expected = Series(self.series, dtype=object) - - #assert_series_equal(result, expected) - #assert_series_equal(result2, expected) + expected = self.series.astype('object') df = DataFrame({'a': self.series, 'b': np.random.randn(len(self.series))}) diff --git a/pandas/util/rwproperty.py b/pandas/util/rwproperty.py new file mode 100644 index 0000000000000..2d0dada68cc0e --- /dev/null +++ b/pandas/util/rwproperty.py @@ -0,0 +1,75 @@ +# Read & write properties +# +# Copyright (c) 2006 by Philipp "philiKON" von Weitershausen +# philikon@philikon.de +# +# Freely distributable under the terms of the Zope Public License, v2.1. +# +# See rwproperty.txt for detailed explanations +# +import sys + +__all__ = ['getproperty', 'setproperty', 'delproperty'] + +class rwproperty(object): + + def __new__(cls, func): + name = func.__name__ + + # ugly, but common hack + frame = sys._getframe(1) + locals = frame.f_locals + + if name not in locals: + return cls.createProperty(func) + + oldprop = locals[name] + if isinstance(oldprop, property): + return cls.enhanceProperty(oldprop, func) + + raise TypeError("read & write properties cannot be mixed with " + "other attributes except regular property objects.") + + # this might not be particularly elegant, but it's easy on the eyes + + @staticmethod + def createProperty(func): + raise NotImplementedError + + @staticmethod + def enhanceProperty(oldprop, func): + raise NotImplementedError + +class getproperty(rwproperty): + + @staticmethod + def createProperty(func): + return property(func) + + @staticmethod + def enhanceProperty(oldprop, func): + return property(func, oldprop.fset, oldprop.fdel) + +class setproperty(rwproperty): + + @staticmethod + def createProperty(func): + return property(None, func) + + @staticmethod + def enhanceProperty(oldprop, func): + return property(oldprop.fget, func, oldprop.fdel) + +class delproperty(rwproperty): + + @staticmethod + def createProperty(func): + return property(None, None, func) + + @staticmethod + def enhanceProperty(oldprop, func): + return property(oldprop.fget, oldprop.fset, func) + +if __name__ == "__main__": + import doctest + doctest.testfile('rwproperty.txt') diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index 2fe13d1cddbc8..f6909802f2d77 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -40,8 +40,8 @@ # reindex both axes setup = common_setup + """ -df = DataFrame(randn(1000, 1000)) -idx = np.arange(400, 700) +df = DataFrame(randn(10000, 10000)) +idx = np.arange(4000, 7000) """ frame_reindex_axis0 = Benchmark('df.reindex(idx)', setup) @@ -83,7 +83,7 @@ # iteritems (monitor no-copying behaviour) setup = common_setup + """ -df = DataFrame(randn(10000, 100)) +df = DataFrame(randn(10000, 1000)) def f(): if hasattr(df, '_item_cache'): diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index f38f42c89f5de..4b2f097c212f8 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -261,7 +261,7 @@ def f(g): groupby_frame_apply_overhead = Benchmark("df.groupby('key').apply(f)", setup, start_date=datetime(2011, 10, 1)) -groupbym_frame_apply = Benchmark("df.groupby(['key', 'key2']).apply(f)", setup, +groupby_frame_apply = Benchmark("df.groupby(['key', 'key2']).apply(f)", setup, start_date=datetime(2011, 10, 1)) #---------------------------------------------------------------------- diff --git a/vb_suite/sparse.py b/vb_suite/sparse.py index bfee959ab982f..1cb0f9233f7e9 100644 --- a/vb_suite/sparse.py +++ b/vb_suite/sparse.py @@ -11,8 +11,8 @@ K = 50 N = 50000 -rng = np.asarray(DateRange('1/1/2000', periods=N, - offset=datetools.Minute())) +rng = np.asarray(date_range('1/1/2000', periods=N, + freq='T')) # rng2 = np.asarray(rng).astype('M8[ns]').astype('i8')
Major refactor primarily to make Series inherit from NDFrame affects #4080, #3862, #816, #3217, #3386, #4463, #4204, #4118 , #4555 Preserves pickle compat very few tests were changed (and only for compat on return objects) a few performance enhancements, a couple of regressions (see bottom) _obviously this is a large change in terms of the codebase, but it brings more consistency between series/frame/panel (not all of this is there yet, but future changes are much easier)_ _Series is now like Frame in that it has a BlockManager (called SingleBlockManager), which holds a block (of any type we support). This introduced some overhead in doing certain operations, which I spent a lot of time optimizing away, further optimizations will come from cythonizing the core/internals, which should be straightforward at this point_ Highlites below: In 0.13.0 there is a major refactor primarily to subclass `Series` from `NDFrame`, which is the base class currently for `DataFrame` and `Panel`, to unify methods and behaviors. Series formerly subclassed directly from `ndarray`. - Refactor of series.py/frame.py/panel.py to move common code to generic.py - added `_setup_axes` to created generic NDFrame structures - moved methods - `from_axes,_wrap_array,axes,ix,shape,empty,swapaxes,transpose,pop` - `__iter__,keys,__contains__,__len__,__neg__,__invert__` - `convert_objects,as_blocks,as_matrix,values` - `__getstate__,__setstate__` (though compat remains in frame/panel) - `__getattr__,__setattr__` - `_indexed_same,reindex_like,align,where,mask,replace` - `filter` (also added axis argument to selectively filter on a different axis) - `reindex,reindex_axis` (which was the biggest change to make generic) - `truncate` (moved to become part of `NDFrame`) - These are API changes which make `Panel` more consistent with `DataFrame` - swapaxes on a Panel with the same axes specified now return a copy - support attribute access for setting - filter supports same api as original `DataFrame` filter - Reindex called with no arguments will now return a copy of the input object - Series now inherits from `NDFrame` rather than directly from `ndarray`. There are several minor changes that affect the API. - numpy functions that do not support the array interface will now return `ndarrays` rather than series, e.g. `np.diff` and `np.where` - `Series(0.5)` would previously return the scalar `0.5`, this is no longer supported - several methods from frame/series have moved to `NDFrame` (convert_objects,where,mask) - `TimeSeries` is now an alias for `Series`. the property `is_time_series` can be used to distinguish (if desired) - Refactor of Sparse objects to use BlockManager - Created a new block type in internals, `SparseBlock`, which can hold multi-dtypes and is non-consolidatable. `SparseSeries` and `SparseDataFrame` now inherit more methods from there hierarchy (Series/DataFrame), and no longer inherit from `SparseArray` (which instead is the object of the `SparseBlock`) - Sparse suite now supports integration with non-sparse data. Non-float sparse data is supportable (partially implemented) - Operations on sparse structures within DataFrames should preserve sparseness, merging type operations will convert to dense (and back to sparse), so might be somewhat inefficient - enable setitem on `SparseSeries` for boolean/integer/slices - `SparsePanels` implementation is unchanged (e.g. not using BlockManager, needs work) - added `ftypes` method to Series/DataFame, similar to `dtypes`, but indicates if the underlying is sparse/dense (as well as the dtype) - All `NDFrame` objects now have a `_prop_attributes`, which can be used to indcated various values to propogate to a new object from an existing (e.g. name in `Series` will follow more automatically now) Perf changed a bit primarily in groupby where a Series has to be reconstructed in order to be passed to the function (in some cases). I basically pass a Series-like class to the grouped function to see if it doesn't raise, if its ok, then it is used rather than a full Series in order to reduce overhead of the Series creation for each group. ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- groupby_multi_python | 109.3636 | 78.3370 | 1.3961 | frame_iteritems | 3.4664 | 2.0154 | 1.7200 | frame_fancy_lookup | 3.3991 | 1.6137 | 2.1064 | sparse_frame_constructor | 11.7100 | 5.3363 | 2.1944 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Target [c5d9495] : BUG: fix ujson handling of new series object Base [1b91f4f] : BUG: Fixed non-unique indexing memory allocation issue with .ix/.loc (GH4280) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3482
2013-04-29T14:37:35Z
2013-08-16T19:27:02Z
2013-08-16T19:27:01Z
2014-06-12T18:13:47Z
Support mrecarrays in DataFrame constructor
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 977dc9e2b56ff..9305fa18e23e2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -392,15 +392,6 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, mgr = self._init_mgr(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = self._init_dict(data, index, columns, dtype=dtype) - elif isinstance(data, ma.MaskedArray): - mask = ma.getmaskarray(data) - if mask.any(): - data, fill_value = _maybe_upcast(data, copy=True) - data[mask] = fill_value - else: - data = data.copy() - mgr = self._init_ndarray(data, index, columns, dtype=dtype, - copy=copy) elif isinstance(data, np.ndarray): if data.dtype.names: data_columns, data = _rec_to_dict(data) @@ -408,8 +399,8 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, columns = data_columns mgr = self._init_dict(data, index, columns, dtype=dtype) else: - mgr = self._init_ndarray(data, index, columns, dtype=dtype, - copy=copy) + mgr = self._init_ndarray(_unmask(data), index, columns, + dtype=dtype, copy=copy) elif isinstance(data, list): if len(data) > 0: if index is None and isinstance(data[0], Series): @@ -5424,10 +5415,21 @@ def convert(v): return values +def _unmask(arr): + if isinstance(arr, ma.MaskedArray): + mask = ma.getmaskarray(arr) + if mask.any(): + arr, fill_value = _maybe_upcast(arr, copy=True) + arr[mask] = fill_value + return arr.copy() + return arr.copy() + return arr + + def _rec_to_dict(arr): if isinstance(arr, np.ndarray): columns = list(arr.dtype.names) - sdict = dict((k, arr[k]) for k in columns) + sdict = dict((k, _unmask(arr[k])) for k in columns) elif isinstance(arr, DataFrame): columns = list(arr.columns) sdict = dict((k, v.values) for k, v in arr.iteritems()) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7bafed216b9b9..63e3a59d09c35 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3,6 +3,8 @@ from datetime import datetime, timedelta, time from StringIO import StringIO import cPickle as pickle +import functools +import itertools import operator import os import unittest @@ -13,6 +15,7 @@ from numpy.random import randn import numpy as np import numpy.ma as ma +import numpy.ma.mrecords as mrecords from numpy.testing import assert_array_equal import pandas as pan @@ -2491,6 +2494,39 @@ def test_constructor_maskedarray_nonfloat(self): self.assertEqual(True, frame['A'][1]) self.assertEqual(False, frame['C'][2]) + def test_constructor_mrecarray(self): + """Ensure mrecarray produces frame identical to dict of masked arrays + """ + assert_fr_equal = functools.partial(assert_frame_equal, + check_index_type=True, + check_column_type=True, + check_frame_type=True) + arrays = [ + ('float', np.array([1.5, 2.0])), + ('int', np.array([1, 2])), + ('str', np.array(['abc', 'def'])), + ] + for name, arr in arrays[:]: + arrays.append(('masked1_' + name, + np.ma.masked_array(arr, mask=[False, True]))) + arrays.append(('masked_all', np.ma.masked_all((2,)))) + arrays.append(('masked_none', + np.ma.masked_array([1.0, 2.5], mask=False))) + + # call assert_frame_equal for all selections of 3 arrays + for comb in itertools.combinations(arrays, 3): + names, data = zip(*comb) + print(names) + mrecs = mrecords.fromarrays(data, names=names) + assert_fr_equal(DataFrame(mrecs), + DataFrame(dict(comb), columns=names)) + # specify columns + assert_fr_equal(DataFrame(mrecs, columns=names[::-1]), + DataFrame(dict(comb), columns=names[::-1])) + # specify index + assert_fr_equal(DataFrame(mrecs, index=[1, 2]), + DataFrame(dict(comb), columns=names, index=[1,2])) + def test_constructor_corner(self): df = DataFrame(index=[]) self.assertEqual(df.values.shape, (0, 0))
This is intended as a fix for #3478. ~~Sorry I do not have the time to write rigorous tests.~~
https://api.github.com/repos/pandas-dev/pandas/pulls/3479
2013-04-29T06:00:17Z
2013-09-13T21:39:53Z
null
2014-07-17T09:17:17Z
Read html tables into DataFrames
diff --git a/ci/install.sh b/ci/install.sh index 8d9ab3aac3374..cd897cf7313c2 100755 --- a/ci/install.sh +++ b/ci/install.sh @@ -75,6 +75,8 @@ if ( ! $VENV_FILE_AVAILABLE ); then pip install $PIP_ARGS xlrd>=0.9.0 pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r=' pip install $PIP_ARGS patsy + pip install $PIP_ARGS lxml + pip install $PIP_ARGS beautifulsoup4 # fool statsmodels into thinking pandas was already installed # so it won't refuse to install itself. We want it in the zipped venv diff --git a/doc/source/api.rst b/doc/source/api.rst index eb65e6087c66c..ca95a739ed661 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -50,6 +50,13 @@ File IO read_csv ExcelFile.parse +.. currentmodule:: pandas.io.html + +.. autosummary:: + :toctree: generated/ + + read_html + HDFStore: PyTables (HDF5) ~~~~~~~~~~~~~~~~~~~~~~~~~ .. currentmodule:: pandas.io.pytables diff --git a/doc/source/install.rst b/doc/source/install.rst index 742acff04148e..9d14d1b11c6b1 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -99,6 +99,12 @@ Optional Dependencies * `openpyxl <http://packages.python.org/openpyxl/>`__, `xlrd/xlwt <http://www.python-excel.org/>`__ * openpyxl version 1.6.1 or higher * Needed for Excel I/O + * `lxml <http://lxml.de>`__, or `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for reading HTML tables + * The differences between lxml and Beautiful Soup 4 are mostly speed (lxml + is faster), however sometimes Beautiful Soup returns what you might + intuitively expect. Both backends are implemented, so try them both to + see which one you like. They should return very similar results. + * Note that lxml requires Cython to build successfully .. note:: diff --git a/pandas/__init__.py b/pandas/__init__.py index 3c06db57a54ae..bf5bcc81bc21e 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -33,6 +33,7 @@ read_fwf, to_clipboard, ExcelFile, ExcelWriter) from pandas.io.pytables import HDFStore, Term, get_store, read_hdf +from pandas.io.html import read_html from pandas.util.testing import debug from pandas.tools.describe import value_range diff --git a/pandas/io/html.py b/pandas/io/html.py new file mode 100644 index 0000000000000..c29d16db8132b --- /dev/null +++ b/pandas/io/html.py @@ -0,0 +1,701 @@ +""":mod:`pandas.io.html` is a module containing functionality for dealing with +HTML IO. + +""" + +import os +import re +import numbers +import urllib2 +import contextlib +import collections +import urlparse + +try: + from importlib import import_module +except ImportError: + import_module = __import__ + +from pandas import DataFrame, MultiIndex +from pandas.io.parsers import _is_url + + +############# +# READ HTML # +############# +_RE_WHITESPACE = re.compile(r'([\r\n]+|\s{2,})') + + +def _remove_whitespace(s, regex=_RE_WHITESPACE): + """Replace extra whitespace inside of a string with a single space. + + Parameters + ---------- + s : str or unicode + The string from which to remove extra whitespace. + + regex : regex + The regular expression to use to remove extra whitespace. + + Returns + ------- + subd : str or unicode + `s` with all extra whitespace replaced with a single space. + """ + return regex.sub(' ', s.strip()) + + +def _get_skiprows_iter(skiprows): + """Get an iterator given an integer, slice or container. + + Parameters + ---------- + skiprows : int, slice, container + The iterator to use to skip rows; can also be a slice. + + Raises + ------ + TypeError + * If `skiprows` is not a slice, integer, or Container + + Raises + ------ + TypeError + * If `skiprows` is not a slice, integer, or Container + + Returns + ------- + it : iterable + A proper iterator to use to skip rows of a DataFrame. + """ + if isinstance(skiprows, slice): + return range(skiprows.start or 0, skiprows.stop, skiprows.step or 1) + elif isinstance(skiprows, numbers.Integral): + return range(skiprows) + elif isinstance(skiprows, collections.Container): + return skiprows + else: + raise TypeError('{0} is not a valid type for skipping' + ' rows'.format(type(skiprows))) + + def _parse_columns(self, row): + return row.xpath('.//td|.//th') + + +class _HtmlFrameParser(object): + """Base class for parsers that parse HTML into DataFrames. + + Parameters + ---------- + io : str or file-like + This can be either a string of raw HTML, a valid URL using the HTTP, + FTP, or FILE protocols or a file-like object. + + match : str or regex + The text to match in the document. + + attrs : dict + List of HTML <table> element attributes to match. + + Attributes + ---------- + io : str or file-like + raw HTML, URL, or file-like object + + match : regex + The text to match in the raw HTML + + attrs : dict-like + A dictionary of valid table attributes to use to search for table + elements. + + Notes + ----- + To subclass this class effectively you must override the following methods: + * :func:`_build_doc` + * :func:`_text_getter` + * :func:`_parse_columns` + * :func:`_parse_table` + * :func:`_parse_rows` + See each method's respective documentation for details on their + functionality. + """ + def __init__(self, io, match, attrs): + self.io = io + self.match = match + self.attrs = attrs + + def parse_rows(self): + """Return a list of list of each table's rows. + + Returns + ------- + row_list : list of list of node-like + A list of each table's rows, which are DOM nodes (usually <th> or + <tr> elements). + """ + tables = self._parse_tables(self._build_doc(), self.match, self.attrs) + assert tables, 'No tables found' + return (self._parse_rows(table) for table in tables) + + def parse_raw_data(self): + """Return a list of the raw data from each table. + + Returns + ------- + data : list of list of lists of str or unicode + Each table's data is contained in a list of lists of str or + unicode. + """ + return [self._parse_raw_data(rows, self._text_getter, + self._parse_columns) + for rows in self.parse_rows()] + + def _parse_raw_data(self, rows, text_getter, column_finder): + """Parse the raw data into a list of lists. + + Parameters + ---------- + rows : iterable of node-like + A list of row elements. + + text_getter : callable + A callable that gets the text from an individual node. This must be + defined by subclasses. + + column_finder : callable + A callable that takes a row node as input and returns a list of the + column node in that row. This must be defined by subclasses. + + Raises + ------ + AssertionError + * If `text_getter` is not callable + * If `column_finder` is not callable + + Returns + ------- + data : list of list of strings + """ + # callable is back in Python 3.2 + assert callable(text_getter), '"text_getter" must be callable' + assert callable(column_finder), '"column_finder" must be callable' + + data = [] + + for row in rows: + if _remove_whitespace(text_getter(row)): + col = [] + + for el in column_finder(row): + t = _remove_whitespace(text_getter(el)) + + if t: + col.append(t) + data.append(col) + + return data + + def _text_getter(self, obj): + """Return the text of an individual DOM node. + + Parameters + ---------- + obj : node-like + A DOM node. + + Returns + ------- + text : str or unicode + The text from an individual DOM node. + """ + raise NotImplementedError + + def _parse_columns(self, obj): + """Return the column elements from a row element. + + Parameters + ---------- + obj : node-like + + Returns + ------- + columns : list of node-like + These are the elements of each row, i.e., the columns. + """ + raise NotImplementedError + + def _parse_tables(self, doc, match, attrs): + """Return all tables from the parsed DOM. + + Parameters + ---------- + doc : tree-like + The DOM from which to parse the table element. + + match : str or regular expression + The text to search for in the DOM tree. + + attrs : dict + A dictionary of table attributes that can be used to disambiguate + mutliple tables on a page. + + Raises + ------ + AssertionError + * If `match` does not match any text in the document. + + Returns + ------- + tables : list of node-like + A list of <table> elements to be parsed into raw data. + """ + raise NotImplementedError + + def _parse_rows(self, table): + """Return the list of row elements from the parsed table element. + + Parameters + ---------- + table : node-like + A table element that contains row elements. + + Returns + ------- + rows : list of node-like + A list row elements of a table, usually <tr> or <th> elements. + """ + raise NotImplementedError + + def _build_doc(self): + """Return a tree-like object that can be used to iterate over the DOM. + + Returns + ------- + obj : tree-like + """ + raise NotImplementedError + + +class _BeautifulSoupFrameParser(_HtmlFrameParser): + """HTML to DataFrame parser that uses BeautifulSoup under the hood. + + See Also + -------- + pandas.io.html._HtmlFrameParser + pandas.io.html._LxmlFrameParser + + Notes + ----- + Documentation strings for this class are in the base class + :class:`pandas.io.html._HtmlFrameParser`. + """ + def __init__(self, *args, **kwargs): + super(_BeautifulSoupFrameParser, self).__init__(*args, **kwargs) + + def _text_getter(self, obj): + return obj.text + + def _parse_columns(self, row): + return row.find_all(('td', 'th')) + + def _parse_rows(self, table): + return table.find_all(('tr', 'thead', 'tfoot')) + + def _parse_tables(self, doc, match, attrs): + tables = doc.find_all('table', attrs=attrs) + assert tables, 'No tables found' + + tables = [table for table in tables + if table.find(text=match) is not None] + assert tables, "No tables found matching '{0}'".format(match.pattern) + return tables + + def _build_doc(self): + if _is_url(self.io): + try: + with contextlib.closing(urllib2.urlopen(self.io)) as url: + raw_text = url.read() + except urllib2.URLError: + raise ValueError('Invalid URL: "{0}"'.format(self.io)) + elif hasattr(self.io, 'read'): + raw_text = self.io.read() + elif os.path.isfile(self.io): + with open(self.io) as f: + raw_text = f.read() + elif isinstance(self.io, basestring): + raw_text = self.io + else: + raise ValueError("Cannot read object of" + " type '{0}'".format(type(self.io))) + assert raw_text, 'No text parsed from document' + + from bs4 import BeautifulSoup, SoupStrainer + strainer = SoupStrainer('table') + return BeautifulSoup(raw_text, parse_only=strainer) + + +def _build_node_xpath_expr(attrs): + """Build an xpath expression to simulate bs4's ability to pass in kwargs to + search for attributes when using the lxml parser. + + Parameters + ---------- + attrs : dict + A dict of HTML attributes. These are NOT checked for validity. + + Returns + ------- + expr : unicode + An XPath expression that checks for the given HTML attributes. + """ + # give class attribute as class_ because class is a python keyword + if 'class_' in attrs: + attrs['class'] = attrs.pop('class_') + + s = (u"@{k}='{v}'".format(k=k, v=v) for k, v in attrs.iteritems()) + return u'[{0}]'.format(' and '.join(s)) + + +_re_namespace = {'re': 'http://exslt.org/regular-expressions'} + + +class _LxmlFrameParser(_HtmlFrameParser): + """HTML to DataFrame parser that uses lxml under the hood. + + Warning + ------- + This parser can only handle HTTP, FTP, and FILE urls. + + See Also + -------- + _HtmlFrameParser + _BeautifulSoupFrameParser + + Notes + ----- + Documentation strings for this class are in the base class + :class:`_HtmlFrameParser`. + """ + def __init__(self, *args, **kwargs): + super(_LxmlFrameParser, self).__init__(*args, **kwargs) + + def _text_getter(self, obj): + return obj.text_content() + + def _parse_columns(self, row): + return row.xpath('.//td|.//th') + + def _parse_rows(self, table): + return table.xpath('(.//tr|.//thead|.//tfoot)[normalize-space()]') + + def _parse_tables(self, doc, match, kwargs): + pattern = match.pattern + + # check all descendants for the given pattern + check_all_expr = u'//*' + if pattern: + check_all_expr += u"[re:test(text(), '{0}')]".format(pattern) + + # go up the tree until we find a table + check_table_expr = '/ancestor::table' + xpath_expr = check_all_expr + check_table_expr + + # if any table attributes were given build an xpath expression to + # search for them + if kwargs: + xpath_expr += _build_node_xpath_expr(kwargs) + tables = doc.xpath(xpath_expr, namespaces=_re_namespace) + assert tables, "No tables found matching regex '{0}'".format(pattern) + return tables + + def _build_doc(self): + """ + Raises + ------ + IOError + * If a valid URL is detected, but for some reason cannot be parsed. + This is probably due to a faulty or non-existent internet + connection. + ValueError + * If a URL that lxml cannot parse is passed. + + See Also + -------- + pandas.io.html._HtmlFrameParser._build_doc + """ + from lxml.html import parse, fromstring + + try: + # try to parse the input in the simplest way + return parse(self.io) + except (UnicodeDecodeError, IOError): + # something went wrong, check for not-a-url because it's probably a + # huge string blob + if not _is_url(self.io): + return fromstring(self.io) + elif urlparse.urlparse(self.io).scheme not in ('http', 'ftp', + 'file'): + raise ValueError('"{0}" does not have a valid URL' + ' protocol'.format(self.io)) + else: + raise IOError('"{0}" is a valid URL, so you probably are not' + ' properly connected to the' + ' internet'.format(self.io)) + + +def _data_to_frame(data, header, index_col, infer_types, skiprows): + """Parse a BeautifulSoup table into a DataFrame. + + Parameters + ---------- + data : list of lists of str or unicode + The raw data to be placed into a DataFrame. This is a list of lists of + strings or unicode. If it helps, it can be thought of as a matrix of + strings instead. + + header : int or None + An integer indicating the row to use for the column header or None + indicating no header will be used. + + index_col : int or None + An integer indicating the column to use for the index or None + indicating no column will be used. + + infer_types : bool + Whether to convert numbers and dates. + + skiprows : collections.Container or int or slice + Iterable used to skip rows. + + Returns + ------- + df : DataFrame + A DataFrame containing the data from `data` + + Raises + ------ + ValueError + * If `skiprows` is not found in the rows of the parsed DataFrame. + + Raises + ------ + ValueError + * If `skiprows` is not found in the rows of the parsed DataFrame. + + See Also + -------- + read_html + + Notes + ----- + The `data` parameter is guaranteed not to be a list of empty lists. + """ + df = DataFrame(data) + + if skiprows is not None: + it = _get_skiprows_iter(skiprows) + + try: + df = df.drop(it) + except ValueError: + raise ValueError('Labels {0} not found when trying to skip' + ' rows'.format(it)) + + if header is not None: + header_rows = df.iloc[header] + + if header_rows.ndim == 2: + names = header_rows.index + df.columns = MultiIndex.from_arrays(header_rows.values, + names=names) + else: + df.columns = header_rows + + df = df.drop(df.index[header]) + + # convert to numbers/dates where possible + # must be sequential since dates trump numbers if both args are given + if infer_types: + df = df.convert_objects(convert_numeric=True) + df = df.convert_objects(convert_dates='coerce') + + if index_col is not None: + cols = df.columns[index_col] + + try: + cols = cols.tolist() + except AttributeError: + pass + + # drop by default + df.set_index(cols, inplace=True) + + return df + + +_possible_parsers = {'lxml': _LxmlFrameParser, + 'bs4': _BeautifulSoupFrameParser} + + +def read_html(io, match='.+', flavor='bs4', header=None, index_col=None, + skiprows=None, infer_types=True, attrs=None): + r"""Read an HTML table into a DataFrame. + + Parameters + ---------- + io : str or file-like + A string or file like object that can be either a url, a file-like + object, or a raw string containing HTML. Note that lxml only accepts + the http, ftp and file url protocols. + + match : str or regex, optional + The set of tables containing text matching this regex or string will be + returned. Unless the HTML is extremely simple you will probably need to + pass a non-empty string here. Defaults to '.+' (match any non-empty + string). The default value will return all tables contained on a page. + This value is converted to a regular expression so that there is + consistent behavior between Beautiful Soup and lxml. + + flavor : str, {'lxml', 'bs4'} + The parsing engine to use under the hood. lxml is faster and bs4 + (Beautiful Soup 4) is better at parsing nested tags, which are not + uncommon when parsing tables. Defaults to 'bs4'. + + header : int or array-like or None, optional + The row (or rows for a MultiIndex) to use to make the columns headers. + Note that this row will be removed from the data. Defaults to None. + + index_col : int or array-like or None, optional + The column to use to make the index. Note that this column will be + removed from the data. Defaults to None. + + skiprows : int or collections.Container or slice or None, optional + If an integer is given then skip this many rows after parsing the + column header. If a sequence of integers is given skip those specific + rows (0-based). Defaults to None, i.e., no rows are skipped. Note that + + .. code-block:: python + + skiprows == 0 + + yields the same result as + + .. code-block:: python + + skiprows is None + + If `skiprows` is a positive integer, say :math:`n`, then + it is treated as "skip :math:`n` rows", *not* as "skip the + :math:`n^\textrm{th}` row". + + infer_types : bool, optional + Whether to convert numeric types and date-appearing strings to numbers + and dates, respectively. Defaults to True. + + attrs : dict or None, optional + This is a dictionary of attributes that you can pass to use to identify + the table in the HTML. These are not checked for validity before being + passed to lxml or Beautiful Soup. However, these attributes must be + valid HTML table attributes to work correctly. Defaults to None. For + example, + + .. code-block:: python + + attrs = {'id': 'table'} + + is a valid attribute dictionary because the 'id' HTML tag attribute is + a valid HTML attribute for *any* HTML tag as per `this document + <http://www.w3.org/TR/html-markup/global-attributes.html>`__. + + .. code-block:: python + + attrs = {'asdf': 'table'} + + is *not* a valid attribute dictionary because 'asdf' is not a valid + HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 + table attributes can be found `here + <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A + working draft of the HTML 5 spec can be found `here + <http://www.w3.org/TR/html-markup/table.html>`__. It contains the + latest information on table attributes for the modern web. + + Returns + ------- + dfs : list of DataFrames + A list of DataFrames, each of which is the parsed data from each of the + tables on the page. + + Notes + ----- + There's as little cleaning of the data as possible due to the heterogeneity + and general disorder of HTML on the web. + + Expect some cleanup after you call this function. For example, + you might need to pass `infer_types=False` and perform manual conversion if + the column names are converted to NaN when you pass the `header=0` + argument. We try to assume as little as possible about the structure of the + table and push the idiosyncrasies of the HTML contained in the table to + you, the user. + + This function only searches for <table> elements and only for <tr> and <th> + rows and <td> elements within those rows. This could be extended by + subclassing one of the parser classes contained in :mod:`pandas.io.html`. + + Similar to :func:`read_csv` the `header` argument is applied **after** + `skiprows` is applied. + + This function will *always* return a list of :class:`DataFrame` *or* + it will fail, e.g., it will *not* return an empty list. + + Examples + -------- + Parse a table from a list of failed banks from the FDIC: + + >>> from pandas import read_html, DataFrame + >>> url = 'http://www.fdic.gov/bank/individual/failed/banklist.html' + >>> dfs = read_html(url, match='Florida', attrs={'id': 'table'}) + >>> assert dfs # will not be empty if the call to read_html doesn't fail + >>> assert isinstance(dfs, list) # read_html returns a list of DataFrames + >>> assert all(map(lambda x: isinstance(x, DataFrame), dfs)) + + Parse some spam infomation from the USDA: + + >>> url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&' + ... 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam') + >>> dfs = read_html(url, match='Water', header=0) + >>> assert dfs + >>> assert isinstance(dfs, list) + >>> assert all(map(lambda x: isinstance(x, DataFrame), dfs)) + + You can pass nothing to the `match` argument: + + >>> url = 'http://www.fdic.gov/bank/individual/failed/banklist.html' + >>> dfs = read_html(url) + >>> print(len(dfs)) # this will most likely be greater than 1 + + Try a different parser: + + >>> url = 'http://www.fdic.gov/bank/individual/failed/banklist.html' + >>> dfs = read_html(url, 'Florida', flavor='lxml', attrs={'id': 'table'}) + >>> assert dfs + >>> assert isinstance(dfs, list) + >>> assert all(map(lambda x: isinstance(x, DataFrame), dfs)) + """ + # annoying type check here because we don't want to spend time parsing HTML + # only to end up failing because of an invalid value of skiprows + if isinstance(skiprows, numbers.Integral): + assert skiprows >= 0, ('cannot skip rows starting from the end of the ' + 'data (you passed a negative value)') + + valid_backends = _possible_parsers.keys() + assert flavor in valid_backends, ("'{0}' is not a valid backend, the valid" + " backends are " + "{1}".format(flavor, valid_backends)) + parser = _possible_parsers[flavor] + + # bonus: re.compile is idempotent under function iteration so you can pass + # a compiled regex to it and it will return itself + p = parser(io, re.compile(match), attrs) + return [_data_to_frame(data, header, index_col, infer_types, skiprows) + for data in p.parse_raw_data()] diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 89f892daf9389..161e7a521b997 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -4,7 +4,7 @@ from StringIO import StringIO import re from itertools import izip -from urlparse import urlparse +import urlparse import csv import numpy as np @@ -166,14 +166,26 @@ class DateConversionError(Exception): """ % (_parser_params % _fwf_widths) +_VALID_URLS = set(urlparse.uses_relative + urlparse.uses_netloc + + urlparse.uses_params) +_VALID_URLS.discard('') + + def _is_url(url): + """Check to see if a URL has a valid protocol. + + Parameters + ---------- + url : str or unicode + + Returns + ------- + isurl : bool + If `url` has a valid protocol return True otherwise False. """ - Very naive check to see if url is an http(s), ftp, or file location. - """ - parsed_url = urlparse(url) - if parsed_url.scheme in ['http', 'file', 'ftp', 'https']: - return True - else: + try: + return urlparse.urlparse(url).scheme in _VALID_URLS + except: return False diff --git a/pandas/io/tests/data/failed_banklist.html b/pandas/io/tests/data/failed_banklist.html new file mode 100644 index 0000000000000..ea2a5c27996bf --- /dev/null +++ b/pandas/io/tests/data/failed_banklist.html @@ -0,0 +1,5314 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> +<head> + +<!-- Instruction: In the title tag change Product Title to the approved product name --> + <title>FDIC: Failed Bank List</title> + <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" /> + + <link rel="stylesheet" type="text/css" media="print" href="http://www.fdic.gov/style_productprint.css" /> + + +<style> + +* {margin:0; padding:0; outline:none} +body {font:Arial,Helvetica; margin:10px; background-color:#fff} + +.sortable {width:925px; margin:0 auto 15px; font:13px Arial, Helvetica} +.sortable th {background-color:#003366; text-align:left; color:#fff} +.sortable th h3 {font-size:13px; padding:2px} +.sortable td {padding:2px} +.sortable .head h3 {background: url('images/sort.gif') no-repeat 5px center; cursor:pointer; padding-left:15px; text-decoration:underline} +.sortable .desc, .sortable .asc {background-color:#404040; font-style:italic; text-decoration:underline} +.sortable .desc h3 {background: url('images/desc.gif') no-repeat 5px center; cursor:pointer; padding-left:15px} +.sortable .asc h3 {background: url('images/asc.gif') no-repeat 5px center; cursor:pointer; padding-left:15px} +.sortable .head:hover, .sortable .desc:hover, .sortable .asc:hover {color:#fff} +.sortable .evenrow td {background:#fff} +.sortable .oddrow td {background:#fff} +.sortable td.evenselected {background:#ebebeb} +.sortable td.oddselected {background:#ebebeb} + +#controls {width:925px; margin:0 auto} +#perpage {float:left; width:190px} +#perpage select {float:left; font-size:11px} +#perpage span {float:left; margin:2px 0 0 5px} +#navigation {float:left; width:340px; text-align:center} +#navigation img {cursor:pointer} +#text {float:left; width:190px; text-align:right; margin-top:2px; font:13px Arial, Helvetica} +</style> +</head> +<body bgcolor="#ffffff" text="#000000"> + + +<!-- BEGIN HEADER INCLUDE --> +<!-- Instruction: The following statement is the header include statement. Do not revise this code. --> +<!-- begin header --> +<!-- Last Updated Date: 1-21-2011 Time: 9:00AM Version: 1.5 --> +<!--<script type="text/javascript" src="http://www.google.com/jsapi?key=ABQIAAAARFKFRzFbjPYbUgzSrdVg0hRrrNc1sGQv42gDojQ1Ll8KWy8MgRRQv_0u-KVSwjYfghDs3QJR40ZHtA"></script> +<script type="text/javascript"> +google.load("jquery", "1.4.2"); +</script>--> +<script type="text/javascript" src="/js/jquery-1.4.2.min.js"></script> +<script type="text/javascript" src="/header/js/navigation.js"></script> + +<!-- googleac.html includes Autocomplete functionality --> + <!-- Autocomplete files --> +<link rel="stylesheet" type="text/css" href="/header/css/jquery.autocomplete.css" /> +<script type="text/javascript" src="/header/js/jquery.autocomplete-1.4.2.js"></script> + +<script type="text/javascript"> +function findValue(li) { + if( li == null ) return alert("No match!"); + + // if coming from an AJAX call, let's use the Id as the value + if( !!li.extra ) var sValue = li.extra[0]; + + // otherwise, let's just display the value in the text box + else var sValue = li.selectValue; + + $('#googlesearch').submit(); + +} +function findValue2(li) { + if( li == null ) return alert("No match!"); + + // if coming from an AJAX call, let's use the Id as the value + if( !!li.extra ) var sValue = li.extra[0]; + + // otherwise, let's just display the value in the text box + else var sValue = li.selectValue; + + + $('#googlesearch2').submit(); +} +function selectItem(li) { + findValue(li); +} +function selectItem2(li) { + findValue2(li); +} + +$().ready(function() { + + function log(event, data, formatted) { + $("<li>").html( !data ? "No match!" : "Selected: " + formatted).appendTo("#result"); + } + + function formatItem(row) { + return row[0] + " (<strong>id: " + row[1] + "</strong>)"; + } + function formatResult(row) { + return row[0].replace(/(<.+?>)/gi, ''); + } + + + + + $("#newSearch").autocomplete("/searchjs.asp", { + width: 179, + autoFill: false, + //delay:10, + minChars:2, + cacheLength: 10, + onFindValue:findValue, + onItemSelect: selectItem, + selectFirst: false + + }); + + $("#search2").autocomplete("searchjs.asp", { + width: 160, + autoFill: false, + //delay:10, + minChars:2, + cacheLength: 10, + onFindValue:findValue2, + onItemSelect: selectItem2, + selectFirst: false + + }); + +}); + + + + + + +</script> + +<!-- Omniture SiteCatalyst Code --> +<script language="JavaScript" type="text/javascript" src="/js/s_code_v1.js"></script> +<!-- FORESEE Code --> +<script type="text/javascript" src="/foresee/foresee-trigger.js"></script> + +<link rel="stylesheet" type="text/css" href="/header/css/header_style.css" /> +<!--[if lt IE 7]> + <style media="screen" type="text/css"> + #site-container { + height: 100%; + } + #footer-container { + bottom: -1px; + } + </style> + <![endif]--> + +<div id ="site-container"> + <div id="header-container"> <!-- start of header container --> + <!-- everything inside the header is held within this container --> + <div id="header-nav"> + <div id="header-nav-left-container"> + + <div id="header-nav-left"> + <a href="/" alt="FDIC Logo" title="FDIC Home - Federal Deposit Insurance Corporation"> + <div id="fdic-logo" class="homeOff"></div> + </a> + </div> <!-- close header-nav-left --> + + <div id="header-nav-right"> + <div id="header-nav-right-top"> + <div id="fdic-title"></div> + </div> + <div id="header-nav-right-bottom"> + <h1>Each depositor insured to at least $250,000 per insured bank</h1> + </div> + </div> <!-- close header-nav-right --> + + </div> <!-- close header-nav-left-container --> + + <div id="header-nav-right-container"> + <div id="right-container-top"> + <div id="web2"> + <ul> + <li><a href="/social.html?site=http://twitter.com/FDICgov"><img src="/header/images/web2/twitter.png" alt="Twitter" title="Twitter" height="24px"/></a></li> + <li><a href="/social.html?site=http://www.facebook.com/FDICgov"><img src="/header/images/web2/facebook.png" alt="Facebook" title="Facebook" height="24px"/></a></li> + <li><a href="/social.html?site=http://www.youtube.com/user/FDICchannel"><img src="/header/images/web2/youtube.png" alt="YouTube" title="YouTube" height="24px"/></a></li> + <li><a href="/rss.html"><img src="/header/images/web2/rss.png" alt="RSS" title="RSS" height="24px"/></a></span></li> + <li><a href="http://service.govdelivery.com/service/multi_subscribe.html?code=USFDIC"><img src="/header/images/web2/subscribe.png" alt="Subscribe" title="Subscribe" height="24px"/></a></li> + </ul> + </div> + </div> <!-- close right-container-right-top --> + + <div id="right-container-center"> + <div id="advanced-search" title="Advanced Search"><a href="/search/advanced.html" class="search">Advanced Search</a></div> + </div> <!-- close right-container-right-center --> + + <div id="right-container-bottom"> + <div id="search"> + <form id="googlesearch" action="http://search.fdic.gov/search" method="get" name="Search box for FDIC.gov"> + <fieldset> + <div class="form" alt="Search box for FDIC.gov" title="Search box for FDIC.gov"> + <div class="search2"> + <label for="fdic_search"></label> + <label for="searchsubmit"></label> + </div> + <input id="newSearch" name="q" class="field" type="text" style="outline: 0 none;" value="Search FDIC..." onblur="if(this.value == '') {this.value = 'Search FDIC...';}" onfocus="if(this.value == 'Search FDIC...') {this.value = '';}" /> + <input id="searchsubmit" class="submit" alt="Search Icon" title="Search Icon" type="submit" value="" /> + <input value="date:D:L:d1" name="sort" type="hidden" /> + <input value="xml_no_dtd" name="output" type="hidden" /> + <input value="UTF-8" name="ie" type="hidden" /> + <input value="UTF-8" name="oe" type="hidden" /> + <input value="wwwGOV_new" name="client" type="hidden" /> + <input value="wwwGOV_new" name="proxystylesheet" type="hidden" /> + <input value="default" name="site" type="hidden" /> + </div> + </fieldset> + </form> + </div> <!-- close id="search" --> + </div> <!-- close right-container-right-bottom --> + </div> <!-- close header-nav-right-container --> + + </div> <!-- close header-nav **This is the top part of the header** --> + + <div id="top-nav"> <!-- start of top-nav class **This is the main navigation in header, color is light blue**--> + <!-- top-nav unordered list --> + <!-- lists all top-nav titles --> + <!-- **************************************************************** --> + <ul> + <li><span id="home" title="Home"><a href="/">Home</a></span></li> + <li><span>|</span></li> + <li><span id="deposit" title="Deposit Insurance"><a href="/deposit/">Deposit Insurance</a></span></li> + <li><span>|</span></li> + <li><span id="consumers" title="Consumer Protection"><a href="/consumers/">Consumer Protection</a></span></li> + <li><span>|</span></li> + <li><span id="bank" title="Industry Analysis"><a href="/bank/">Industry Analysis</a></span></li> + <li><span>|</span></li> + <li><span id="regulations" title="Regulations & Examinations"><a href="/regulations/">Regulations &amp; Examinations</a></span></li> + <li><span>|</span></li> + <li><span id="buying" title="Asset Sales"><a href="/buying/">Asset Sales</a></span></li> + <li><span>|</span></li> + <li><span id="news" title="News & Events"><a href="/news/">News &amp; Events</a></span></li> + <li><span>|</span></li> + <li><span id="about" title="About FDIC"><a href="/about/">About FDIC</a></span></li> + </ul> + <!-- **************************************************************** --> + <!-- close top-nav unordered list --> + </div> <!-- close top-nav id --> + + <div id="sub-nav-container"> <!-- start of sub-nav-container **sub-silo of main navigation, color is gold --> + <div id="sub-nav"> <!-- start of div id sub-nav --> + + <!-- lists all sub-nav ul tags --> + <!-- **************************************************************** --> + <!-- deposit sub --> + <div id="deposit_sub" class="sub-wrapper"> <!-- div 1 for "Deposit" --> + <ul> + <li><span id="deposit_sub1" title="Bank Find"><a href="http://research.fdic.gov/bankfind/">BankFind</a></span></li> + <li><span id="deposit_sub2" title="Are My Deposits Insured?"><a href="/deposit/deposits/">Are My Deposits Insured?</a></span></li> + <li><span id="deposit_sub3" title="Uninsured Investments"><a href="/deposit/investments/">Uninsured Investments</a></span></li> + <li><span id="deposit_sub4" title="The Deposit Insurance Fund"><a href="/deposit/insurance/index.html">The Deposit Insurance Fund</a></span></li> + <li><span id="deposit_sub5" title="International Deposit Insurance"><a href="/deposit/deposits/international/">International Deposit Insurance</a></span></li> + </ul> + </div> <!-- close div 1--> + + <!-- consumer sub --> + <div id="consumers_sub" class="sub-wrapper"> <!-- div 2 for "Consumer" --> + <ul> + <li><span id="consumers_sub1" title="Consumer News &amp; Information"><a href="/consumers/consumer/">Consumer News &amp; Information</a></span></li> + <li><span id="consumers_sub2" title="Loans &amp; Mortgages"><a href="/consumers/loans/">Loans &amp; Mortgages</a></span></li> + <li><span id="consumers_sub3" title="Banking &amp; Your Money"><a href="/consumers/banking/">Banking &amp; Your Money</a></span></li> + <li><span id="consumers_sub4" title="Financial Education &amp; Literacy"><a href="/consumers/education/">Financial Education &amp; Literacy</a></span></li> + <li><span id="consumers_sub5" title="Community Affairs"><a href="/consumers/community/">Community Affairs</a></span></li> + <li><span id="consumers_sub6" title="Identity Theft &amp; Fraud"><a href="/consumers/theft/">Identity Theft &amp; Fraud</a></span></li> + <li><span id="consumers_sub7" title="Consumer Financial Privacy"><a href="/consumers/privacy/">Consumer Financial Privacy</a></span></li> + </ul> + </div> <!-- close div 2 --> + + <!-- industry sub --> + <div id="bank_sub" class="sub-wrapper"> <!-- div 3 for "Industry" --> + <ul> + <li><span id="bank_sub1" title="Bank Data &amp; Statistics"><a href="/bank/statistical/">Bank Data &amp; Statistics</a></span></li> + <li><span id="bank_sub2" title="Research &amp; Analysis"><a href="/bank/analytical/">Research &amp; Analysis</a></span></li> + <li><span id="bank_sub3" title="Failed Banks"><a href="/bank/individual/failed/">Failed Banks</a></span></li> + </ul> + </div> <!-- close div 3 --> + + <!-- regulations sub --> + <div id="regulations_sub" class="sub-wrapper"> <!-- div 4 for "Regulations" --> + <ul> + <li><span id="regulations_sub1" title="Bank Examinations"><a href="/regulations/examinations/">Bank Examinations</a></span></li> + <li><span id="regulations_sub2" title="Laws &amp; Regulations"><a href="/regulations/laws/">Laws &amp; Regulations</a></span></li> + <li><span id="regulations_sub3" title="Resources for Bank Officers &amp; Directors"><a href="/regulations/resources/">Resources for Bank Officers &amp; Directors</a></span></li> + <li><span id="regulations_sub4" title="FDICconnect"><a href="http://www.fdicconnect.gov/">FDIC<em>connect</em></a></span></li> + <li><span id="regulations_sub5" title="Required Financial Reports"><a href="/regulations/required/">Required Financial Reports</a></span></li> + <li><span id="regulations_sub6" title="Examiner Training Programs"><a href="/regulations/examiner/">Examiner Training Programs</a></span></li> + </ul> + </div> <!-- close div 4 --> + + <!-- asset sub --> + <div id="buying_sub" class="sub-wrapper"> <!-- div 5 for "Asset" --> + <ul> + <li><span id="buying_sub1" title="Loan Sales"><a href="/buying/loan/">Loan Sales</a></span></li> + <li><span id="buying_sub2" title="Real Estate Sales"><a href="/buying/owned/">Real Estate and Property Marketplace</a></span></li> + <li><span id="buying_sub3" title="Financial Asset Sales"><a href="/buying/financial/">Financial Asset Sales</a></span></li> + <li><span id="buying_sub4" title="Servicing Sales Announcements"><a href="/buying/servicing/">Servicing Sales Announcements</a></span></li> + <li><span id="buying_sub5" title="Other Asset Sales"><a href="/buying/otherasset/">Other Asset Sales</a></span></li> + <li><span id="buying_sub6" title="Historical Sales"><a href="/buying/historical/">Historical Sales</a></span></li> + </ul> + </div> <!-- close div 5 --> + + <!-- news sub --> + <div id="news_sub" class="sub-wrapper"> <!-- div 6 for "News" --> + <ul> + <li><span id="news_sub1" title="Press Releases"><a href="/news/news/press/2013/">Press Releases</a></span></li> + <li><span id="news_sub2" title="Online Press Room"><a href="https://fdicsurvey.inquisiteasp.com/fdic/cgi-bin/qwebcorporate.dll?M58TRS">Online Press Room</a></span></li> + <li><span id="news_sub3" title="Conferences &amp; Events"><a href="/news/conferences/">Conferences &amp; Events</a></span></li> + <li><span id="news_sub4" title="Financial Institution Letters"><a href="/news/news/financial/2013/">Financial Institution Letters</a></span></li> + + <!-- include this lnk for year 2013 and remove 2012 link below <li><span id="news_sub5" title="Special Alerts"><a href="/news/news/SpecialAlert/2013/">Special Alerts</a></span></li>--> + <li><span id="news_sub5" title="Special Alerts"><a href="/news/news/SpecialAlert/2012/">Special Alerts</a></span></li> + <li><span id="news_sub6" title="Letters to the Editor/Opinion Editorials"><a href="/news/letters/">Letters to the Editor/Opinion Editorials</a></span></li> + <li><span id="news_sub7" title="Speeches &amp; Testimony"><a href="/news/news/speeches/">Speeches &amp; Testimony</a></span></li> + </ul> + </div> <!-- close div 6 --> + + <!-- news sub --> + <div id="about_sub" class="sub-wrapper"> <!-- div 6 for "News" --> + <ul> + <li><span id="about_sub1" title="Mission &amp; Purpose"><a href="/about/index.html#1">Mission &amp; Purpose</a></span></li> + <li><span id="about_sub2" title="Advisory Committees"><a href="/about/index.html#2">Advisory Committees</a></span></li> + <li><span id="about_sub3" title="Careers with the FDIC"><a href="/about/index.html#3">Careers with the FDIC</a></span></li> + <li><span id="about_sub4" title="Management Team"><a href="/about/index.html#4">Management Team</a></span></li> + <li><span id="about_sub5" title="Plans &amp; Reports"><a href="/about/index.html#5">Plans &amp; Reports</a></span></li> + <li><span id="about_sub6" title="What We Can Do for You"><a href="/about/index.html#6">What We Can Do for You</a></span></li> + <li><span id="about_sub7" title="Diversity with the FDIC"><a href="/about/index.html#7">Diversity at the FDIC</a></span></li> + </ul> + </div> <!-- close div 6 --> + + <!-- **************************************************************** --> + </div> <!-- close of id - sub-nav --> + </div> <!-- close of id - sub-nav-container --> + </div> <!-- end of the header-container --> +<div id="body"> +<!-- end header --> +<font face="arial, helvetica, sans-serif" size="2"> +<!-- END HEADER INCLUDE --> + +<!-- Instruction: The following meta tags are for the keywords and document author. If desired change "name of the document" owner to the actual name of the owner and change "add keywords here" to a list of keywords separated by a comma. --> +<meta name="author" content="DRR" /> +<meta http-equiv="keywords" name="keywords" content="banks, financial institutions, failed, failure, closing, deposits, depositors, +banking services, assuming institution, acquiring institution, claims" /> + + +<link rel="stylesheet" type="text/css" media="print" href="http://www.fdic.gov/style_productprint.css" /> + +<img src="http://www.fdic.gov/images/spacer.gif" width="1" height="2" alt="" border="0" /><br /> +<table width="670" cellspacing="0" cellpadding="0" border="0"> + <tr> + <td width="1" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /></td> + <td width="14" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="14" height="1" alt="" border="0" /></td> + <td width="739" bgcolor="#cccccc"><span class="noDisplay"><img src="http://www.fdic.gov/images/spacer.gif" width="739" height="1" alt="" border="0" /></span></td> + <td width="1" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /></td> + </tr> + + <tr> + <td bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="24" alt="" border="0" /><br /></td> + <td></td> + <td width="739"> + + <!-- BEGIN BREAD CRUMB TRAIL --> + + <!-- Instruction: Change the "Tertiary" link text to the correct third-level menu page name and the href value to the appropriate relative path to the third-level menu page. --> + + <!-- Instruction: Change the "Product Title" text to the name of the approved product title. --> + + <font face="arial, helvetica,sans-serif" size="1"><a href="/index.html">Home</a> &gt; <a href="/bank/index.html">Industry + Analysis</a> &gt; <a href="/bank/individual/failed/index.html">Failed Banks</a> &gt; Failed + Bank List</font><br /> + + <!-- END BREAD CRUMB TRAIL --> + + </td> + <td bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /><br /></td> + </tr> + + <tr> + <td colspan="4" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /><br /></td> + </tr> +</table> + +<table width="640" cellspacing="0" cellpadding="0" border="0"> + <tr> + <td width="25"><img src="http://www.fdic.gov/images/spacer.gif" width="25" height="1" alt="" border="0" /><br /></td> + <td colspan="2"> + <br /> +<!-- DRR BEGIN Product Title & Body--> +<!-- DRR BEGIN Product Title & Body--> +<table width="100%" cellpadding="0" cellspacing="0" border="0"> +<!-- BEGIN PRODUCT TITLE --> +<tr> + <td> + <!-- Instruction: Change the "Product Title" text to the name of the approved product title. --> + + <font face="arial, helvetica, sans-serif" size="4" color="#003366"><strong><a name="top">Failed + Bank List</a></strong></font> + <hr size="1" color="#003366" noshade /> + + + </td> +</tr> + +<!-- END PRODUCT TITLE --> +<!-- DOCUMENT BODY BEGINS HERE --> +<tr> + <td valign="top"> + <table border="0" cellpadding="0" cellspacing="0" width="900"> + + <tr> + <td> <font face="arial, helvetica, sans-serif" size="2"> + <br />The FDIC is often appointed as receiver for failed banks. This page contains useful information for the customers and vendors of these banks. This includes information on the acquiring bank (if applicable), how your accounts and loans are affected, and how vendors can file claims against the receivership. + <a href="http://www2.fdic.gov/drrip/cs/index.asp">Failed Financial Institution Contact Search</a> + displays point of contact information related to failed banks.<br /><br /> + + This list includes banks which have failed since October 1, 2000. To search for banks that failed prior to those on this page, visit this link: <a href="http://www2.fdic.gov/hsob/SelectRpt.asp?EntryTyp=30">Failures and Assistance Transactions +</a><br /><br /> + + <!-- <a href="banklist.csv">Open Bank List as CSV file</a> --> + <a href="banklist.csv">Failed Bank List</a> - CSV file (Updated on Mondays. Also opens in Excel - <a href="http://www.fdic.gov/excel.html">Excel + Help</a>) + <br /> + <script type="text/javascript"> + <!-- + document.writeln("<br /><em>Click arrows next to headers to sort in Ascending or Descending order.</em><br />"); +//--> + </script><br /> + </font> + </td> + </tr> + </table> + </td> +</tr> + +<tr> + <td> + <table cellpadding="0" cellspacing="0" bordercolordark="#003366" bordercolorlight="ebebeb" border="1" id="table" class="sortable"> + <thead> + <tr bgcolor="#003366"> + <th id="Institution"><h3>Bank Name</h3></th> + <th class="nosort" id="city" style="padding-left:3px"><h3>City</h3></th> + <th id="state"><h3>State</h3></th> + <th id="CERT #" class="nosort" style="padding-left:3px"><h3>CERT #</h3></th> + <th id="AI" style="padding-left:3px"><h3>Acquiring Institution</h3></th> + <th id="Closing"><h3>Closing Date</h3></th> + <th id="Updated"><h3>Updated Date</h3></th> + </tr> + </thead> + <tbody> + <tr> + +<tr> + <td><a href="chipola.html">Chipola Community Bank</a></td> + <td headers="city">Marianna</td> + <td headers="state">FL</td> + <td headers="CERT #">58034</td> + <td headers="AI">First Federal Bank of Florida</td> + <td headers="Closing Date">April 19, 2013</td> + <td headers="Updated">April 23, 2013</td> +</tr> +<tr> + <td><a href="heritagebank-fl.html">Heritage Bank of North Florida</a></td> + <td headers="city">Orange Park</td> + <td headers="state">FL</td> + <td headers="CERT #">26680</td> + <td headers="AI">FirstAtlantic Bank</td> + <td headers="Closing Date">April 19, 2013</td> + <td headers="Updated">April 23, 2013</td> +</tr> +<tr> + <td><a href="firstfederal-ky.html">First Federal Bank</a></td> + <td headers="city">Lexington</td> + <td headers="state">KY</td> + <td headers="CERT #">29594</td> + <td headers="AI">Your Community Bank</td> + <td headers="Closing Date">April 19, 2013</td> + <td headers="Updated">April 23, 2013</td> +</tr> +<td><a href="goldcanyon.html">Gold Canyon Bank</a></td> + <td headers="city">Gold Canyon</td> + <td headers="state">AZ</td> + <td headers="CERT #">58066</td> + <td headers="AI">First Scottsdale Bank, +National Association</td> + <td headers="Closing Date">April 5, 2013</td> + <td headers="Updated">April 9, 2013</td> +</tr> +<tr> + <td><a href="frontier-ga.html">Frontier Bank</a></td> + <td headers="city">LaGrange</td> + <td headers="state">GA</td> + <td headers="CERT #">16431</td> + <td headers="AI">HeritageBank of the South</td> + <td headers="Closing Date">March 8, 2013</td> + <td headers="Updated">March 26, 2013</td> +</tr> +<tr> + <td><a href="covenant-il.html">Covenant Bank</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">22476</td> + <td headers="AI">Liberty Bank and Trust Company</td> + <td headers="Closing Date">February 15, 2013</td> + <td headers="Updated">March 4, 2013</td> +</tr> +<tr> + <td><a href="1stregents.html">1st Regents Bank</a></td> + <td headers="city">Andover</td> + <td headers="state">MN</td> + <td headers="CERT #">57157</td> + <td headers="AI">First Minnesota Bank</td> + <td headers="Closing Date">January 18, 2013</td> + <td headers="Updated">February 28, 2013</td> +</tr> +<tr> + <td><a href="westside.html">Westside Community Bank</a></td> + <td headers="city">University Place</td> + <td headers="state">WA</td> + <td headers="CERT #">33997</td> + <td headers="AI">Sunwest Bank</td> + <td headers="Closing Date">January 11, 2013</td> + <td headers="Updated">January 24, 2013</td> +</tr> +<tr> + <td><a href="cmbkozarks.html">Community Bank of the Ozarks</a></td> + <td headers="city">Sunrise Beach</td> + <td headers="state">MO</td> + <td headers="CERT #">27331</td> + <td headers="AI">Bank of Sullivan</td> + <td headers="Closing Date">December 14, 2012</td> + <td headers="Updated">January 24, 2013</td> +</tr> +<tr> + <td><a href="hometown.html">Hometown Community Bank</a></td> + <td headers="city">Braselton</td> + <td headers="state">GA</td> + <td headers="CERT #">57928</td> + <td headers="AI">CertusBank, National Association</td> + <td headers="Closing Date">November 16, 2012</td> + <td headers="Updated">January 24, 2013</td> +</tr> +<tr> + <td><a href="cfnb.html">Citizens First National Bank</a></td> + <td headers="city">Princeton</td> + <td headers="state">IL</td> + <td headers="CERT #">3731</td> + <td headers="AI">Heartland Bank and Trust Company</td> + <td headers="Closing Date">November 2, 2012</td> + <td headers="Updated">January 24, 2013</td> +</tr> + <tr> + <td><a href="heritage_fl.html">Heritage Bank of Florida</a></td> + <td headers="city">Lutz</td> + <td headers="state">FL</td> + <td headers="CERT #">35009</td> + <td headers="AI">Centennial Bank</td> + <td headers="Closing Date">November 2, 2012</td> + <td headers="Updated">January 24, 2013</td> +</tr> +<tr> + <td><a href="novabank.html">NOVA Bank</a></td> + <td headers="city">Berwyn</td> + <td headers="state">PA</td> + <td headers="CERT #">27148</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">October 26, 2012</td> + <td headers="Updated">January 24, 2013</td> +</tr> +<tr> + <td><a href="excelbank.html">Excel Bank</a></td> + <td headers="city">Sedalia</td> + <td headers="state">MO</td> + <td headers="CERT #">19189</td> + <td headers="AI">Simmons First National Bank</td> + <td headers="Closing Date">October 19, 2012</td> + <td headers="Updated">January 24, 2013</td> +</tr> +<tr> + <td><a href="firsteastside.html">First East Side Savings Bank</a></td> + <td headers="city">Tamarac</td> + <td headers="state">FL</td> + <td headers="CERT #">28144</td> + <td headers="AI">Stearns Bank N.A.</td> + <td headers="Closing Date">October 19, 2012</td> + <td headers="Updated">January 24, 2013</td> +</tr> +<tr> + <td><a href="gulfsouth.html">GulfSouth Private Bank</a></td> + <td headers="city">Destin</td> + <td headers="state">FL</td> + <td headers="CERT #">58073</td> + <td headers="AI">SmartBank</td> + <td headers="Closing Date">October 19, 2012</td> + <td headers="Updated">January 24, 2013</td> +</tr> +<tr> + <td><a href="firstunited.html">First United Bank</a></td> + <td headers="city">Crete</td> + <td headers="state">IL</td> + <td headers="CERT #">20685</td> + <td headers="AI">Old Plank Trail Community Bank, National Association</td> + <td headers="Closing Date">September 28, 2012</td> + <td headers="Updated">November 15, 2012</td> +</tr> +<tr> + <td><a href="truman.html">Truman Bank</a></td> + <td headers="city">St. Louis</td> + <td headers="state">MO</td> + <td headers="CERT #">27316</td> + <td headers="AI">Simmons First National Bank</td> + <td headers="Closing Date">September 14, 2012</td> + <td headers="Updated">December 17, 2012</td> +</tr> +<tr> + <td><a href="firstcommbk_mn.html">First Commercial Bank</a></td> + <td headers="city">Bloomington</td> + <td headers="state">MN</td> + <td headers="CERT #">35246</td> + <td headers="AI">Republic Bank & Trust Company</td> + <td headers="Closing Date">September 7, 2012</td> + <td headers="Updated">December 17, 2012</td> +</tr> +<tr> + <td><a href="waukegan.html">Waukegan Savings Bank</a></td> + <td headers="city">Waukegan</td> + <td headers="state">IL</td> + <td headers="CERT #">28243</td> + <td headers="AI"> First Midwest Bank</td> + <td headers="Closing Date">August 3, 2012</td> + <td headers="Updated">October 11, 2012</td> +</tr> +<tr> + <td><a href="jasper.html">Jasper Banking Company</a></td> + <td headers="city">Jasper</td> + <td headers="state">GA</td> + <td headers="CERT #">16240</td> + <td headers="AI">Stearns Bank N.A.</td> + <td headers="Closing Date">July 27, 2012</td> + <td headers="Updated">December 17, 2012</td> +</tr> +<tr> + <td><a href="secondfederal.html">Second Federal Savings and Loan Association of Chicago</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">27986</td> + <td headers="AI">Hinsdale Bank & Trust Company</td> + <td headers="Closing Date">July 20, 2012</td> + <td headers="Updated">January 14, 2013</td> +</tr> +<tr> + <td><a href="heartland.html">Heartland Bank</a></td> + <td headers="city">Leawood</td> + <td headers="state">KS</td> + <td headers="CERT #">1361</td> + <td headers="AI">Metcalf Bank</td> + <td headers="Closing Date">July 20, 2012</td> + <td headers="Updated">December 17, 2012</td> +</tr> +<tr> + <td><a href="cherokee.html">First Cherokee State Bank</a></td> + <td headers="city">Woodstock</td> + <td headers="state">GA</td> + <td headers="CERT #">32711</td> + <td headers="AI">Community & Southern Bank</td> + <td headers="Closing Date">July 20, 2012</td> + <td headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="georgiatrust.html">Georgia Trust Bank</a></td> + <td headers="city">Buford</td> + <td headers="state">GA</td> + <td headers="CERT #">57847</td> + <td headers="AI">Community & Southern Bank</td> + <td headers="Closing Date">July 20, 2012</td> + <td headers="Updated">December 17, 2012</td> +</tr> +<tr> + <td><a href="royalpalm.html">The Royal Palm Bank of Florida</a></td> + <td headers="city">Naples</td> + <td headers="state">FL</td> + <td headers="CERT #">57096</td> + <td headers="AI">First National Bank of the Gulf Coast</td> + <td headers="Closing Date">July 20, 2012</td> + <td headers="Updated">January 7, 2013</td> +</tr> +<tr> + <td><a href="glasgow.html">Glasgow Savings Bank</a></td> + <td headers="city">Glasgow</td> + <td headers="state">MO</td> + <td headers="CERT #">1056</td> + <td headers="AI"> Regional Missouri Bank</td> + <td headers="Closing Date">July 13, 2012</td> + <td headers="Updated">October 11, 2012</td> +</tr> +<tr> + <td><a href="montgomery.html">Montgomery Bank & Trust</a></td> + <td headers="city">Ailey</td> + <td headers="state">GA</td> + <td headers="CERT #">19498</td> + <td headers="AI"> Ameris Bank</td> + <td headers="Closing Date">July 6, 2012</td> + <td headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="farmersbank.html">The Farmers Bank of Lynchburg</a></td> + <td headers="city">Lynchburg</td> + <td headers="state">TN</td> + <td headers="CERT #">1690</td> + <td headers="AI">Clayton Bank and Trust</td> + <td headers="Closing Date">June 15, 2012</td> + <td headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="securityexchange.html">Security Exchange Bank</a></td> + <td headers="city">Marietta</td> + <td headers="state">GA</td> + <td headers="CERT #">35299</td> + <td headers="AI">Fidelity Bank</td> + <td headers="Closing Date">June 15, 2012</td> + <td headers="Updated">October 10, 2012</td> +</tr> +<tr> + <td><a href="putnam.html">Putnam State Bank</a></td> + <td headers="city">Palatka</td> + <td headers="state">FL</td> + <td headers="CERT #">27405</td> + <td headers="AI">Harbor Community Bank</td> + <td headers="Closing Date">June 15, 2012</td> + <td headers="Updated">October 10, 2012</td> +</tr> +<tr> + <td><a href="waccamaw.html">Waccamaw Bank</a></td> + <td headers="city">Whiteville</td> + <td headers="state">NC</td> + <td headers="CERT #">34515</td> + <td headers="AI">First Community Bank</td> + <td headers="Closing Date">June 8, 2012</td> + <td headers="Updated">November 8, 2012</td> +</tr> +<tr> + <td><a href="ftsb.html">Farmers' and Traders' State Bank</a></td> + <td headers="city">Shabbona</td> + <td headers="state">IL</td> + <td headers="CERT #">9257</td> + <td headers="AI">First State Bank</td> + <td headers="Closing Date">June 8, 2012</td> + <td headers="Updated">October 10, 2012</td> +</tr> +<tr> + <td><a href="carolina.html">Carolina Federal Savings Bank</a></td> + <td headers="city">Charleston</td> + <td headers="state">SC</td> + <td headers="CERT #">35372</td> + <td headers="AI">Bank of North Carolina</td> + <td headers="Closing Date">June 8, 2012</td> + <td headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="firstcapital.html">First Capital Bank</a></td> + <td headers="city">Kingfisher</td> + <td headers="state">OK</td> + <td headers="CERT #">416</td> + <td headers="AI">F & M Bank</td> + <td headers="Closing Date">June 8, 2012</td> + <td headers="Updated">October 10, 2012</td> +</tr> +<tr> + <td><a href="alabamatrust.html">Alabama Trust Bank, National Association</a></td> + <td headers="city">Sylacauga</td> + <td headers="state">AL</td> + <td headers="CERT #">35224</td> + <td headers="AI">Southern States Bank</td> + <td headers="Closing Date">May 18, 2012</td> + <td headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="securitybank.html">Security Bank, National Association</a></td> + <td headers="city">North Lauderdale</td> + <td headers="state">FL</td> + <td headers="CERT #">23156</td> + <td headers="AI">Banesco USA</td> + <td headers="Closing Date">May 4, 2012</td> + <td headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="palmdesert.html">Palm Desert National Bank</a></td> + <td headers="city">Palm Desert</td> + <td headers="state">CA</td> + <td headers="CERT #">23632</td> + <td headers="AI">Pacific Premier Bank</td> + <td headers="Closing Date">April 27, 2012</td> + <td headers="Updated">August 31, 2012</td> +</tr> +<tr> + <td><a href="plantation.html">Plantation Federal Bank</a></td> + <td headers="city">Pawleys Island</td> + <td headers="state">SC</td> + <td headers="CERT #">32503</td> + <td headers="AI">First Federal Bank</td> + <td headers="Closing Date">April 27, 2012</td> + <td headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="interbank.html">Inter Savings Bank, fsb D/B/A InterBank, fsb</a></td> + <td headers="city">Maple Grove</td> + <td headers="state">MN</td> + <td headers="CERT #">31495</td> + <td headers="AI">Great Southern Bank</td> + <td headers="Closing Date">April 27, 2012</td> + <td headers="Updated">October 17, 2012</td> +</tr> +<tr> + <td><a href="harvest.html">HarVest Bank of Maryland</a></td> + <td headers="city">Gaithersburg</td> + <td headers="state">MD</td> + <td headers="CERT #">57766</td> + <td headers="AI">Sonabank</td> + <td headers="Closing Date">April 27, 2012</td> + <td headers="Updated">October 17, 2012</td> +</tr> +<tr> + <td><a href="easternshore.html">Bank of the Eastern Shore</a></td> + <td headers="city">Cambridge</td> + <td headers="state">MD</td> + <td headers="CERT #">26759</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">April 27, 2012</td> + <td headers="Updated">October 17, 2012</td> +</tr> +<tr> + <td><a href="fortlee.html">Fort Lee Federal Savings Bank, FSB</a></td> + <td headers="city">Fort Lee</td> + <td headers="state">NJ</td> + <td headers="CERT #">35527</td> + <td headers="AI">Alma Bank</td> + <td headers="Closing Date">April 20, 2012</td> + <td headers="Updated">August 31, 2012</td> +</tr> +<tr> + <td><a href="fidelity.html">Fidelity Bank</a></td> + <td headers="city">Dearborn</td> + <td headers="state">MI</td> + <td headers="CERT #">33883</td> + <td headers="AI">The Huntington National Bank</td> + <td headers="Closing Date">March 30, 2012</td> + <td headers="Updated">August 9, 2012</td> +</tr> +<tr> + <td><a href="premier-il.html">Premier Bank</a></td> + <td headers="city">Wilmette</td> + <td headers="state">IL</td> + <td headers="CERT #">35419</td> + <td headers="AI">International Bank of Chicago</td> + <td headers="Closing Date">March 23, 2012</td> + <td headers="Updated">October 17, 2012</td> +</tr> +<tr> + <td><a href="covenant.html">Covenant Bank & Trust</a></td> + <td headers="city">Rock Spring</td> + <td headers="state">GA</td> + <td headers="CERT #">58068</td> + <td headers="AI">Stearns Bank, N.A.</td> + <td headers="Closing Date">March 23, 2012</td> + <td headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="newcity.html">New City Bank </a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">57597</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">March 9, 2012</td> + <td headers="Updated">October 29, 2012</td> +</tr> +<tr> + <td><a href="global.html">Global Commerce Bank</a></td> + <td headers="city">Doraville</td> + <td headers="state">GA</td> + <td headers="CERT #">34046</td> + <td headers="AI">Metro City Bank</td> + <td headers="Closing Date">March 2, 2012</td> + <td headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="homesvgs.html">Home Savings of America</a></td> + <td headers="city">Little Falls</td> + <td headers="state">MN</td> + <td headers="CERT #">29178</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">February 24, 2012</td> + <td headers="Updated">December 17, 2012</td> +</tr> +<tr> + <td><a href="cbg.html">Central Bank of Georgia</a></td> + <td headers="city">Ellaville</td> + <td headers="state">GA</td> + <td headers="CERT #">5687</td> + <td headers="AI">Ameris Bank</td> + <td headers="Closing Date">February 24, 2012</td> + <td headers="Updated">August 9, 2012</td> +</tr> +<tr> + <td><a href="scbbank.html">SCB Bank</a></td> + <td headers="city">Shelbyville</td> + <td headers="state">IN</td> + <td headers="CERT #">29761</td> + <td headers="AI">First Merchants Bank, National Association</td> + <td headers="Closing Date">February 10, 2012</td> + <td headers="Updated">March 25, 2013</td> +</tr> +<tr> + <td><a href="cnbt.html">Charter National Bank and Trust</a></td> + <td headers="city">Hoffman Estates</td> + <td headers="state">IL</td> + <td headers="CERT #">23187</td> + <td headers="AI">Barrington Bank & Trust +Company, National Association</td> + <td headers="Closing Date">February 10, 2012</td> + <td headers="Updated">March 25, 2013</td> +</tr> +<tr> + <td><a href="bankeast.html">BankEast</a></td> + <td headers="city">Knoxville</td> + <td headers="state">TN</td> + <td headers="CERT #">19869</td> + <td headers="AI">U.S.Bank National Association </td> + <td headers="Closing Date">January 27, 2012</td> + <td headers="Updated">March 8, 2013</td> +</tr> +<tr> + <td><a href="patriot-mn.html">Patriot Bank Minnesota</a></td> + <td headers="city">Forest Lake</td> + <td headers="state">MN</td> + <td headers="CERT #">34823</td> + <td headers="AI">First Resource Bank</td> + <td headers="Closing Date">January 27, 2012</td> + <td headers="Updated">September 12, 2012</td> +</tr> +<tr> + <td><a href="tcb.html">Tennessee Commerce Bank +</a></td> + <td headers="city">Franklin</td> + <td headers="state">TN</td> + <td headers="CERT #">35296</td> + <td headers="AI">Republic Bank & Trust Company</td> + <td headers="Closing Date">January 27, 2012</td> + <td headers="Updated">November 20, 2012</td> +</tr> +<tr> + <td><a href="fgbtcj.html">First Guaranty Bank and Trust Company of Jacksonville</a></td> + <td headers="city">Jacksonville</td> + <td headers="state">FL</td> + <td headers="CERT #">16579</td> + <td headers="AI">CenterState Bank of Florida, N.A.</td> + <td headers="Closing Date">January 27, 2012</td> + <td headers="Updated">September 12, 2012</td> +</tr> +<tr> + <td><a href="americaneagle.html">American Eagle Savings Bank</a></td> + <td headers="city">Boothwyn</td> + <td headers="state">PA</td> + <td headers="CERT #">31581</td> + <td headers="AI">Capital Bank, N.A.</td> + <td headers="Closing Date">January 20, 2012</td> + <td headers="Updated">January 25, 2013</td> +</tr> +<tr> + <td><a href="firststatebank-ga.html">The First State Bank</a></td> + <td headers="city">Stockbridge</td> + <td headers="state">GA</td> + <td headers="CERT #">19252</td> + <td headers="AI">Hamilton State Bank</td> + <td headers="Closing Date">January 20, 2012</td> + <td headers="Updated">January 25, 2013</td> +</tr> +<tr> + <td><a href="cfsb.html">Central Florida State Bank</a></td> + <td headers="city">Belleview</td> + <td headers="state">FL</td> + <td headers="CERT #">57186</td> + <td headers="AI">CenterState Bank of Florida, N.A.</td> + <td headers="Closing Date">January 20, 2012</td> + <td headers="Updated">January 25, 2013</td> +</tr> +<tr> + <td><a href="westernnatl.html">Western National Bank</a></td> + <td headers="city">Phoenix</td> + <td headers="state">AZ</td> + <td headers="CERT #">57917</td> + <td headers="AI"> Washington Federal</td> + <td headers="Closing Date">December 16, 2011</td> + <td headers="Updated">August 13, 2012</td> +</tr> +<tr> +<td><a href="premier-fl.html">Premier Community Bank of the Emerald Coast</a></td> + <td headers="city">Crestview</td> + <td headers="state">FL</td> + <td headers="CERT #">58343</td> + <td headers="AI"> Summit Bank</td> + <td headers="Closing Date">December 16, 2011</td> + <td headers="Updated">September 12, 2012</td> +</tr> +<tr> + <td><a href="centralprog.html">Central Progressive Bank</a></td> + <td headers="city">Lacombe</td> + <td headers="state">LA</td> + <td headers="CERT #">19657</td> + <td headers="AI"> First NBC Bank</td> + <td headers="Closing Date">November 18, 2011</td> + <td headers="Updated">August 13, 2012</td> +</tr> +<tr> + <td><a href="polkcounty.html">Polk County Bank</a></td> + <td headers="city">Johnston</td> + <td headers="state">IA</td> + <td headers="CERT #">14194</td> + <td headers="AI">Grinnell State Bank</td> + <td headers="Closing Date">November 18, 2011</td> + <td headers="Updated">August 15, 2012</td> +</tr> +<tr> + <td><a href="rockmart.html">Community Bank of Rockmart</a></td> + <td headers="city">Rockmart</td> + <td headers="state">GA</td> + <td headers="CERT #">57860</td> + <td headers="AI">Century Bank of Georgia</td> + <td headers="Closing Date">November 10, 2011</td> + <td headers="Updated">August 13, 2012</td> +</tr> +<tr> + <td><a href="sunfirst.html">SunFirst Bank</a></td> + <td headers="city">Saint George</td> + <td headers="state">UT</td> + <td headers="CERT #">57087</td> + <td headers="AI">Cache Valley Bank</td> + <td headers="Closing Date">November 4, 2011</td> + <td headers="Updated">November 16, 2012</td> +</tr> +<tr> + <td><a href="midcity.html">Mid City Bank, Inc.</a></td> + <td headers="city">Omaha</td> + <td headers="state">NE</td> + <td headers="CERT #">19397</td> + <td headers="AI">Premier Bank</td> + <td headers="Closing Date">November 4, 2011</td> + <td headers="Updated">August 15, 2012</td> +</tr> +<tr> + <td><a href="allamerican.html ">All American Bank</a></td> + <td headers="city">Des Plaines</td> + <td headers="state">IL</td> + <td headers="CERT #">57759</td> + <td headers="AI">International Bank of Chicago</td> + <td headers="Closing Date">October 28, 2011</td> + <td headers="Updated">August 15, 2012</td> +</tr> +<tr> + <td><a href="commbanksco.html">Community Banks of Colorado</a></td> + <td headers="city">Greenwood Village</td> + <td headers="state">CO</td> + <td headers="CERT #">21132</td> + <td headers="AI">Bank Midwest, N.A.</td> + <td headers="Closing Date">October 21, 2011</td> + <td headers="Updated">January 2, 2013</td> +</tr> +<tr> + <td><a href="commcapbk.html">Community Capital Bank</a></td> + <td headers="city">Jonesboro</td> + <td headers="state">GA</td> + <td headers="CERT #">57036</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">October 21, 2011</td> + <td headers="Updated">November 8, 2012</td> +</tr> +<tr> + <td><a href="decatur.html">Decatur First Bank</a></td> + <td headers="city">Decatur</td> + <td headers="state">GA</td> + <td headers="CERT #">34392</td> + <td headers="AI">Fidelity Bank</td> + <td headers="Closing Date">October 21, 2011</td> + <td headers="Updated">November 8, 2012</td> +</tr> +<tr> + <td><a href="oldharbor.html">Old Harbor Bank</a></td> + <td headers="city">Clearwater</td> + <td headers="state">FL</td> + <td headers="CERT #">57537</td> + <td headers="AI">1st United Bank</td> + <td headers="Closing Date">October 21, 2011</td> + <td headers="Updated">November 8, 2012</td> +</tr> +<tr> + <td><a href="countrybank.html">Country Bank</a></td> + <td headers="city">Aledo</td> + <td headers="state">IL</td> + <td headers="CERT #">35395</td> + <td headers="AI">Blackhawk Bank & Trust</td> + <td headers="Closing Date">October 14, 2011</td> + <td headers="Updated">August 15, 2012</td> +</tr> +<tr> + <td><a href="firststatebank-nj.html">First State Bank</a></td> + <td headers="city">Cranford</td> + <td headers="state">NJ</td> + <td headers="CERT #">58046</td> + <td headers="AI">Northfield Bank</td> + <td headers="Closing Date">October 14, 2011</td> + <td headers="Updated">November 8, 2012</td> +</tr> +<tr> + <td><a href="blueridge.html">Blue Ridge Savings Bank, Inc.</a></td> + <td headers="city">Asheville</td> + <td headers="state">NC</td> + <td headers="CERT #">32347</td> + <td headers="AI">Bank of North Carolina</td> + <td headers="Closing Date">October 14, 2011</td> + <td headers="Updated">November 8, 2012</td> +</tr> +<tr> + <td><a href="piedmont-ga.html">Piedmont Community Bank</a></td> + <td headers="city">Gray</td> + <td headers="state">GA</td> + <td headers="CERT #">57256</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">October 14, 2011</td> + <td headers="Updated">January 22, 2013</td> +</tr> +<tr> + <td><a href="sunsecurity.html">Sun Security Bank</a></td> + <td headers="city">Ellington</td> + <td headers="state">MO</td> + <td headers="CERT #">20115</td> + <td headers="AI"> Great Southern Bank </td> + <td headers="Closing Date">October 7, 2011</td> + <td headers="Updated">November 7, 2012</td> +</tr> +<tr> + <td><a href="riverbank.html">The RiverBank</a></td> + <td headers="city">Wyoming</td> + <td headers="state">MN</td> + <td headers="CERT #">10216</td> + <td headers="AI"> Central Bank </td> + <td headers="Closing Date">October 7, 2011</td> + <td headers="Updated">November 7, 2012</td> +</tr> +<tr> + <td><a href="firstintlbank.html">First International Bank</a></td> + <td headers="city">Plano</td> + <td headers="state">TX</td> + <td headers="CERT #">33513</td> + <td headers="AI"> American First National Bank </td> + <td headers="Closing Date">September 30, 2011</td> + <td headers="Updated">October 9, 2012</td> +</tr> +<tr> + <td><a href="cbnc.html">Citizens Bank of Northern California</a></td> + <td headers="city">Nevada City</td> + <td headers="state">CA</td> + <td headers="CERT #">33983</td> + <td headers="AI"> Tri Counties Bank</td> + <td headers="Closing Date">September 23, 2011</td> + <td headers="Updated">October 9, 2012</td> +</tr> +<tr> + <td><a href="boc-va.html">Bank of the Commonwealth</a></td> + <td headers="city">Norfolk</td> + <td headers="state">VA</td> + <td headers="CERT #">20408</td> + <td headers="AI">Southern Bank and Trust Company</td> + <td headers="Closing Date">September 23, 2011</td> + <td headers="Updated">October 9, 2012</td> +</tr> +<tr> + <td><a href="fnbf.html">The First National Bank of Florida</a></td> + <td headers="city">Milton</td> + <td headers="state">FL</td> + <td headers="CERT #">25155</td> + <td headers="AI">CharterBank</td> + <td headers="Closing Date">September 9, 2011</td> + <td headers="Updated">September 6, 2012</td> +</tr> +<tr> + <td><a href="creekside.html">CreekSide Bank</a></td> + <td headers="city">Woodstock</td> + <td headers="state">GA</td> + <td headers="CERT #">58226</td> + <td headers="AI">Georgia Commerce Bank</td> + <td headers="Closing Date">September 2, 2011</td> + <td headers="Updated">September 6, 2012</td> +</tr> +<tr> + <td><a href="patriot.html">Patriot Bank of Georgia</a></td> + <td headers="city">Cumming</td> + <td headers="state">GA</td> + <td headers="CERT #">58273</td> + <td headers="AI">Georgia Commerce Bank</td> + <td headers="Closing Date">September 2, 2011</td> + <td headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="firstchoice-il.html">First Choice Bank</a></td> + <td headers="city">Geneva</td> + <td headers="state">IL</td> + <td headers="CERT #">57212</td> + <td headers="AI">Inland Bank & Trust</td> + <td headers="Closing Date">August 19, 2011</td> + <td headers="Updated">August 15, 2012</td> +</tr> +<tr> + <td><a href="firstsouthern-ga.html">First Southern National Bank</a></td> + <td headers="city">Statesboro</td> + <td headers="state">GA</td> + <td headers="CERT #">57239</td> + <td headers="AI">Heritage Bank of the South</td> + <td headers="Closing Date">August 19, 2011</td> + <td headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="lydian.html">Lydian Private Bank</a></td> + <td headers="city">Palm Beach</td> + <td headers="state">FL</td> + <td headers="CERT #">35356</td> + <td headers="AI">Sabadell United Bank, N.A.</td> + <td headers="Closing Date">August 19, 2011</td> + <td headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="publicsvgs.html">Public Savings Bank</a></td> + <td headers="city">Huntingdon Valley</td> + <td headers="state">PA</td> + <td headers="CERT #">34130</td> + <td headers="AI">Capital Bank, N.A.</td> + <td headers="Closing Date">August 18, 2011</td> + <td headers="Updated">August 15, 2012</td> +</tr> +<tr> + <td><a href="fnbo.html">The First National Bank of Olathe</a></td> + <td headers="city">Olathe</td> + <td headers="state">KS</td> + <td headers="CERT #">4744</td> + <td headers="AI">Enterprise Bank & Trust</td> + <td headers="Closing Date">August 12, 2011</td> + <td headers="Updated">August 23, 2012</td> +</tr> +<tr> + <td><a href="whitman.html">Bank of Whitman</a></td> + <td headers="city">Colfax</td> + <td headers="state">WA</td> + <td headers="CERT #">22528</td> + <td headers="AI">Columbia State Bank</td> + <td headers="Closing Date">August 5, 2011</td> + <td headers="Updated">August 16, 2012</td> +</tr> +<tr> + <td><a href="shorewood.html">Bank of Shorewood</a></td> + <td headers="city">Shorewood</td> + <td headers="state">IL</td> + <td headers="CERT #">22637</td> + <td headers="AI">Heartland Bank and Trust Company</td> + <td headers="Closing Date">August 5, 2011</td> + <td headers="Updated">August 16, 2012</td> +</tr> +<tr> + <td><a href="integra.html">Integra Bank National Association</a></td> + <td headers="city">Evansville</td> + <td headers="state">IN</td> + <td headers="CERT #">4392</td> + <td headers="AI">Old National Bank</td> + <td headers="Closing Date">July 29, 2011</td> + <td headers="Updated">August 16, 2012</td> +</tr> +<tr> + <td><a href="bankmeridian.html">BankMeridian, N.A.</a></td> + <td headers="city">Columbia</td> + <td headers="state">SC</td> + <td headers="CERT #">58222</td> + <td headers="AI">SCBT National Association</td> + <td headers="Closing Date">July 29, 2011</td> + <td headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="vbb.html">Virginia Business Bank</a></td> + <td headers="city">Richmond</td> + <td headers="state">VA</td> + <td headers="CERT #">58283</td> + <td headers="AI">Xenith Bank</td> + <td headers="Closing Date">July 29, 2011</td> + <td headers="Updated">October 9, 2012</td> +</tr> +<tr> + <td><a href="bankofchoice.html">Bank of Choice</a></td> + <td headers="city">Greeley</td> + <td headers="state">CO</td> + <td headers="CERT #">2994</td> + <td headers="AI">Bank Midwest, N.A.</td> + <td headers="Closing Date">July 22, 2011</td> + <td headers="Updated">September 12, 2012</td> +</tr> +<tr> + <td><a href="landmark.html">LandMark Bank of Florida</a></td> + <td headers="city">Sarasota</td> + <td headers="state">FL</td> + <td headers="CERT #">35244</td> + <td headers="AI">American Momentum Bank</td> + <td headers="Closing Date">July 22, 2011</td> + <td headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="southshore.html">Southshore Community Bank</a></td> + <td headers="city">Apollo Beach</td> + <td headers="state">FL</td> + <td headers="CERT #">58056</td> + <td headers="AI">American Momentum Bank</td> + <td headers="Closing Date">July 22, 2011</td> + <td headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="summitbank.html">Summit Bank</a></td> + <td headers="city">Prescott </td> + <td headers="state">AZ</td> + <td headers="CERT #">57442 </td> + <td headers="AI">The Foothills Bank</td> + <td width="125" headers="Closing Date">July 15, 2011</td> + <td width="125" headers="Updated">August 16, 2012</td> +</tr> +<tr> + <td><a href="firstpeoples.html">First Peoples Bank</a></td> + <td headers="city">Port St. Lucie </td> + <td headers="state">FL</td> + <td headers="CERT #">34870 </td> + <td headers="AI">Premier American Bank, N.A.</td> + <td width="125" headers="Closing Date">July 15, 2011</td> + <td width="125" headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="hightrust.html">High Trust Bank</a></td> + <td headers="city">Stockbridge </td> + <td headers="state">GA</td> + <td headers="CERT #">19554 </td> + <td headers="AI">Ameris Bank</td> + <td width="125" headers="Closing Date">July 15, 2011</td> + <td width="125" headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="onegeorgia.html">One Georgia Bank</a></td> + <td headers="city">Atlanta </td> + <td headers="state">GA</td> + <td headers="CERT #">58238 </td> + <td headers="AI">Ameris Bank</td> + <td width="125" headers="Closing Date">July 15, 2011</td> + <td width="125" headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="signaturebank.html">Signature Bank</a></td> + <td headers="city">Windsor </td> + <td headers="state">CO</td> + <td headers="CERT #">57835 </td> + <td headers="AI">Points West Community Bank</td> + <td width="125" headers="Closing Date">July 8, 2011</td> + <td width="125" headers="Updated">October 26, 2012</td> +</tr> +<tr> + <td><a href="coloradocapital.html">Colorado Capital Bank</a></td> + <td headers="city">Castle Rock </td> + <td headers="state">CO</td> + <td headers="CERT #">34522</td> + <td headers="AI">First-Citizens Bank & Trust Company</td> + <td width="125" headers="Closing Date">July 8, 2011</td> + <td width="125" headers="Updated">January 15, 2013</td> +</tr> +<tr> + <td><a href="firstchicago.html">First Chicago Bank & Trust</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">27935</td> + <td headers="AI">Northbrook Bank & Trust Company</td> + <td width="125" headers="Closing Date">July 8, 2011</td> + <td width="125" headers="Updated">September 9, 2012</td> +</tr> +<tr> + <td><a href="mountain.html">Mountain Heritage Bank</a></td> + <td headers="city">Clayton</td> + <td headers="state">GA</td> + <td headers="CERT #">57593</td> + <td headers="AI">First American Bank and Trust Company</td> + <td width="125" headers="Closing Date">June 24, 2011</td> + <td width="125" headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="fcbtb.html">First Commercial Bank of Tampa Bay</a></td> + <td headers="city">Tampa</td> + <td headers="state">FL</td> + <td headers="CERT #">27583</td> + <td headers="AI">Stonegate Bank</td> + <td width="125" headers="Closing Date">June 17, 2011</td> + <td width="125" headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="mcintoshstate.html">McIntosh State Bank</a></td> + <td headers="city">Jackson</td> + <td headers="state">GA</td> + <td headers="CERT #">19237</td> + <td headers="AI">Hamilton State Bank</td> + <td width="125" headers="Closing Date">June 17, 2011</td> + <td width="125" headers="Updated">November 2, 2012</td> +</tr> +<tr> + <td><a href="atlanticbanktrust.html">Atlantic Bank and Trust</a> + </td> + <td headers="city">Charleston</td> + <td headers="state">SC</td> + <td headers="CERT #">58420</td> + <td headers="AI">First Citizens Bank and Trust Company, Inc.</td> + <td width="125" headers="Closing Date">June 3, 2011</td> + <td width="125" headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="firstheritage.html">First Heritage Bank</a></td> + <td headers="city">Snohomish</td> + <td headers="state">WA</td> + <td headers="CERT #">23626</td> + <td headers="AI">Columbia State Bank</td> + <td width="125" headers="Closing Date">May 27, 2011</td> + <td width="125" headers="Updated">January 28, 2013</td> +</tr> +<tr> + <td><a href="summit.html">Summit Bank</a></td> + <td headers="city">Burlington</td> + <td headers="state">WA</td> + <td headers="CERT #">513</td> + <td headers="AI">Columbia State Bank</td> + <td width="125" headers="Closing Date">May 20, 2011</td> + <td width="125" headers="Updated">January 22, 2013</td> +</tr> +<tr> + <td><a href="fgbc.html">First Georgia Banking Company</a></td> + <td headers="city">Franklin</td> + <td headers="state">GA</td> + <td headers="CERT #">57647</td> + <td headers="AI">CertusBank, National Association</td> + <td width="125" headers="Closing Date">May 20, 2011</td> + <td width="125" headers="Updated">November 13, 2012</td> +</tr> +<tr> + <td><a href="atlanticsthrn.html">Atlantic Southern Bank</a></td> + <td headers="city">Macon</td> + <td headers="state">GA</td> + <td headers="CERT #">57213</td> + <td headers="AI">CertusBank, National Association</td> + <td width="125" headers="Closing Date">May 20, 2011</td> + <td width="125" headers="Updated">October 31, 2012</td> +</tr> +<tr> + <td><a href="coastal_fl.html">Coastal Bank</a></td> + <td headers="city">Cocoa Beach</td> + <td headers="state">FL</td> + <td headers="CERT #">34898</td> + <td headers="AI">Florida Community Bank, a division of Premier American Bank, N.A.</td> + <td width="125" headers="Closing Date">May 6, 2011</td> + <td width="125" headers="Updated">November 30, 2012</td> +</tr> +<tr> + <td><a href="communitycentral.html">Community Central Bank</a></td> + <td headers="city">Mount Clemens</td> + <td headers="state">MI</td> + <td headers="CERT #">34234</td> + <td headers="AI">Talmer Bank & Trust</td> + <td headers="Closing Date">April 29, 2011</td> + <td headers="Updated">August 16, 2012</td> +</tr> +<tr> + <td><a href="parkavenue_ga.html">The Park Avenue Bank</a></td> + <td headers="city">Valdosta</td> + <td headers="state">GA</td> + <td headers="CERT #">19797</td> + <td headers="AI">Bank of the Ozarks</td> + <td headers="Closing Date">April 29, 2011</td> + <td headers="Updated">November 30, 2012</td> +</tr> +<tr> + <td><a href="firstchoice.html">First Choice Community Bank</a></td> + <td headers="city">Dallas</td> + <td headers="state">GA</td> + <td headers="CERT #">58539</td> + <td headers="AI">Bank of the Ozarks</td> + <td headers="Closing Date">April 29, 2011</td> + <td headers="Updated">January 22, 2013</td> +</tr> +<tr> + <td><a href="cortez.html">Cortez Community Bank</a></td> + <td headers="city">Brooksville</td> + <td headers="state">FL</td> + <td headers="CERT #">57625</td> + <td headers="AI">Florida Community Bank, a division of Premier American Bank, N.A. + </td> + <td headers="Closing Date">April 29, 2011</td> + <td headers="Updated">November 30, 2012</td> +</tr> +<tr> + <td><a href="fnbcf.html">First National Bank of Central Florida</a></td> + <td headers="city">Winter Park</td> + <td headers="state">FL</td> + <td headers="CERT #">26297</td> + <td headers="AI">Florida Community Bank, a division of Premier American Bank, N.A.</td> + <td headers="Closing Date">April 29, 2011</td> + <td headers="Updated">November 30, 2012</td> +</tr> +<tr> + <td><a href="heritage_ms.html">Heritage Banking Group</a></td> + <td headers="city">Carthage</td> + <td headers="state">MS</td> + <td headers="CERT #">14273</td> + <td headers="AI">Trustmark National Bank</td> + <td headers="Closing Date">April 15, 2011</td> + <td headers="Updated">November 30, 2012</td> +</tr> +<tr> + <td><a href="rosemount.html">Rosemount National Bank</a></td> + <td headers="city">Rosemount</td> + <td headers="state">MN</td> + <td headers="CERT #">24099</td> + <td headers="AI">Central Bank</td> + <td headers="Closing Date">April 15, 2011</td> + <td headers="Updated">August 16, 2012</td> +</tr> +<tr> + <td><a href="superior_al.html">Superior Bank</a></td> + <td headers="city">Birmingham</td> + <td headers="state">AL</td> + <td headers="CERT #">17750</td> + <td headers="AI">Superior Bank, National Association</td> + <td headers="Closing Date">April 15, 2011</td> + <td headers="Updated">November 30, 2012</td> +</tr> +<tr> + <td><a href="nexity.html">Nexity Bank</a></td> + <td headers="city">Birmingham</td> + <td headers="state">AL</td> + <td headers="CERT #">19794</td> + <td headers="AI">AloStar Bank of Commerce</td> + <td headers="Closing Date">April 15, 2011</td> + <td headers="Updated">September 4, 2012</td> +</tr> +<tr> + <td><a href="newhorizons.html">New Horizons Bank</a></td> + <td headers="city">East Ellijay</td> + <td headers="state">GA</td> + <td headers="CERT #">57705</td> + <td headers="AI">Citizens South Bank</td> + <td headers="Closing Date">April 15, 2011</td> + <td headers="Updated">August 16, 2012</td> +</tr> +<tr> + <td><a href="bartow.html">Bartow County Bank</a></td> + <td headers="city">Cartersville</td> + <td headers="state">GA</td> + <td headers="CERT #">21495</td> + <td headers="AI">Hamilton State Bank</td> + <td headers="Closing Date">April 15, 2011</td> + <td headers="Updated">January 22, 2013</td> +</tr> +<tr> + <td><a href="nevadacommerce.html">Nevada Commerce Bank</a></td> + <td headers="city">Las Vegas</td> + <td headers="state">NV</td> + <td headers="CERT #">35418</td> + <td headers="AI">City National Bank</td> + <td headers="Closing Date">April 8, 2011</td> + <td headers="Updated">September 9, 2012</td> +</tr> +<tr> + <td><a href="westernsprings.html">Western Springs National Bank and Trust</a></td> + <td headers="city">Western Springs</td> + <td headers="state">IL</td> + <td headers="CERT #">10086</td> + <td headers="AI">Heartland Bank and Trust Company</td> + <td headers="Closing Date">April 8, 2011</td> + <td headers="Updated">January 22, 2013</td> +</tr> +<tr> + <td><a href="bankofcommerce.html">The Bank of Commerce</a></td> + <td headers="city">Wood Dale</td> + <td headers="state">IL</td> + <td headers="CERT #">34292</td> + <td headers="AI">Advantage National Bank Group</td> + <td headers="Closing Date">March 25, 2011</td> + <td headers="Updated">January 22, 2013</td> +</tr> +<tr> + <td><a href="legacy-wi.html">Legacy Bank</a></td> + <td headers="city">Milwaukee</td> + <td headers="state">WI</td> + <td headers="CERT #">34818</td> + <td headers="AI">Seaway Bank and Trust Company</td> + <td headers="Closing Date">March 11, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + <tr> + <td><a href="firstnatldavis.html">First National Bank of Davis</a></td> + <td headers="city">Davis</td> + <td headers="state">OK</td> + <td headers="CERT #">4077</td> + <td headers="AI">The Pauls Valley National Bank</td> + <td headers="Closing Date">March 11, 2011</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="valleycomm.html">Valley Community Bank</a></td> + <td headers="city">St. Charles</td> + <td headers="state">IL</td> + <td headers="CERT #">34187</td> + <td headers="AI">First State Bank</td> + <td headers="Closing Date">February 25, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + <tr> + <td><a href="sanluistrust.html">San Luis Trust Bank, FSB </a></td> + <td headers="city">San Luis Obispo</td> + <td headers="state">CA</td> + <td headers="CERT #">34783</td> + <td headers="AI">First California Bank</td> + <td headers="Closing Date">February 18, 2011</td> + <td headers="Updated">August 20, 2012</td> + </tr> + + <tr> + <td><a href="charteroak.html">Charter Oak Bank</a></td> + <td headers="city">Napa</td> + <td headers="state">CA</td> + <td headers="CERT #">57855</td> + <td headers="AI">Bank of Marin</td> + <td headers="Closing Date">February 18, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + <tr> + <td><a href="citizensbk_ga.html">Citizens Bank of Effingham</a></td> + <td headers="city">Springfield</td> + <td headers="state">GA</td> + <td headers="CERT #">34601</td> + <td headers="AI">Heritage Bank of the South</td> + <td headers="Closing Date">February 18, 2011</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + <tr> + <td><a href="habersham.html">Habersham Bank</a></td> + <td headers="city">Clarkesville</td> + <td headers="state">GA</td> + <td headers="CERT #">151</td> + <td headers="AI">SCBT National Association</td> + <td headers="Closing Date">February 18, 2011</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + + <tr> + <td><a href="canyonstate.html">Canyon National Bank</a></td> + <td headers="city">Palm Springs</td> + <td headers="state">CA</td> + <td headers="CERT #">34692</td> + <td headers="AI">Pacific Premier Bank</td> + <td headers="Closing Date">February 11, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + + + <tr> + <td><a href="badgerstate.html">Badger State Bank</a></td> + <td headers="city">Cassville</td> + <td headers="state">WI</td> + <td headers="CERT #">13272</td> + <td headers="AI">Royal Bank </td> + <td headers="Closing Date">February 11, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + + <tr> + <td><a href="peoplesstatebank.html">Peoples State Bank</a></td> + <td headers="city">Hamtramck</td> + <td headers="state">MI</td> + <td headers="CERT #">14939</td> + <td headers="AI">First Michigan Bank</td> + <td headers="Closing Date">February 11, 2011</td> + <td headers="Updated">January 22, 2013</td> + </tr> + + + <tr> + <td><a href="sunshinestate.html">Sunshine State Community Bank</a></td> + <td headers="city">Port Orange</td> + <td headers="state">FL</td> + <td headers="CERT #">35478</td> + <td headers="AI">Premier American Bank, N.A.</td> + <td headers="Closing Date">February 11, 2011</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + <tr> + <td><a href="commfirst_il.html">Community First Bank Chicago</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">57948</td> + <td headers="AI">Northbrook Bank & Trust Company</td> + <td headers="Closing Date">February 4, 2011</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="northgabank.html">North Georgia Bank</a></td> + <td headers="city">Watkinsville</td> + <td headers="state">GA</td> + <td headers="CERT #">35242</td> + <td headers="AI">BankSouth</td> + <td headers="Closing Date">February 4, 2011</td> + <td headers="Updated">November 2, 2012</td> + </tr> + <tr> + <td><a href="americantrust.html">American Trust Bank</a></td> + <td headers="city">Roswell</td> + <td headers="state">GA</td> + <td headers="CERT #">57432</td> + <td headers="AI">Renasant Bank</td> + <td headers="Closing Date">February 4, 2011</td> + <td headers="Updated">October 31, 2012</td> + </tr> + <tr> + <td><a href="firstcomm_nm.html">First Community Bank</a></td> + <td headers="city">Taos</td> + <td headers="state">NM</td> + <td headers="CERT #">12261</td> + <td headers="AI">U.S. Bank, N.A.</td> + <td headers="Closing Date">January 28, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + <tr> + <td><a href="firstier.html">FirsTier Bank</a></td> + <td headers="city">Louisville</td> + <td headers="state">CO</td> + <td headers="CERT #">57646</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">January 28, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + <tr> + <td><a href="evergreenstatewi.html">Evergreen State Bank</a></td> + <td headers="city">Stoughton</td> + <td headers="state">WI</td> + <td headers="CERT #">5328</td> + <td headers="AI">McFarland State Bank</td> + <td headers="Closing Date">January 28, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + + <tr> + <td><a href="firststatebank_ok.html">The First State Bank</a></td> + <td headers="city">Camargo</td> + <td headers="state">OK</td> + <td headers="CERT #">2303</td> + <td headers="AI">Bank 7</td> + <td headers="Closing Date">January 28, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + + <tr> + <td><a href="unitedwestern.html">United Western Bank</a></td> + <td headers="city">Denver</td> + <td headers="state">CO</td> + <td headers="CERT #">31293</td> + <td headers="AI">First-Citizens Bank & Trust Company</td> + <td headers="Closing Date">January 21, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + + <tr> + <td><a href="bankofasheville.html">The Bank of Asheville</a></td> + <td headers="city">Asheville</td> + <td headers="state">NC</td> + <td headers="CERT #">34516</td> + <td headers="AI">First Bank</td> + <td headers="Closing Date">January 21, 2011</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + + <tr> + <td><a href="commsouth.html">CommunitySouth Bank & Trust</a></td> + <td headers="city">Easley</td> + <td headers="state">SC</td> + <td headers="CERT #">57868</td> + <td headers="AI">CertusBank, National Association</td> + <td headers="Closing Date">January 21, 2011</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + <tr> + <td><a href="enterprise.html">Enterprise Banking Company</a></td> + <td headers="city">McDonough</td> + <td headers="state">GA</td> + <td headers="CERT #">19758</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">January 21, 2011</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + + <tr> + <td><a href="oglethorpe.html">Oglethorpe Bank</a></td> + <td headers="city">Brunswick</td> + <td headers="state">GA</td> + <td headers="CERT #">57440</td> + <td headers="AI">Bank of the Ozarks </td> + <td headers="Closing Date">January 14, 2011</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + <tr> + <td><a href="legacybank.html">Legacy Bank</a></td> + <td headers="city">Scottsdale</td> + <td headers="state">AZ</td> + <td headers="CERT #">57820</td> + <td headers="AI">Enterprise Bank & Trust </td> + <td headers="Closing Date">January 7, 2011</td> + <td headers="Updated">September 12, 2012</td> + </tr> + <tr> + <td><a href="firstcommercial.html">First Commercial Bank of Florida</a></td> + <td headers="city">Orlando</td> + <td headers="state">FL</td> + <td headers="CERT #">34965</td> + <td headers="AI">First Southern Bank</td> + <td headers="Closing Date">January 7, 2011</td> + <td headers="Updated">November 2, 2012</td> + </tr> + <tr> + <td><a href="communitynatl.html">Community National Bank</a></td> + <td headers="city">Lino Lakes</td> + <td headers="state">MN</td> + <td headers="CERT #">23306</td> + <td headers="AI">Farmers & Merchants Savings Bank</td> + <td headers="Closing Date">December 17, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + + + <tr> + <td><a href="firstsouthern.html">First Southern Bank </a></td> + <td headers="city">Batesville</td> + <td headers="state">AR</td> + <td headers="CERT #">58052</td> + <td headers="AI">Southern Bank</td> + <td headers="Closing Date">December 17, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + + + <tr> + <td><a href="unitedamericas.html">United Americas Bank, N.A.</a></td> + <td headers="city">Atlanta</td> + <td headers="state">GA</td> + <td headers="CERT #">35065</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">December 17, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + + <tr> + <td><a href="appalachianga.html">Appalachian Community Bank, FSB </a></td> + <td headers="city">McCaysville</td> + <td headers="state">GA</td> + <td headers="CERT #">58495</td> + <td headers="AI">Peoples Bank of East Tennessee</td> + <td headers="Closing Date">December 17, 2010</td> + <td headers="Updated">October 31, 2012</td> + </tr> + + + <tr> + <td><a href="chestatee.html">Chestatee State Bank</a></td> + <td headers="city">Dawsonville</td> + <td headers="state">GA</td> + <td headers="CERT #">34578</td> + <td headers="AI">Bank of the Ozarks</td> + <td headers="Closing Date">December 17, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + + <tr> + <td><a href="bankofmiami.html">The Bank of Miami,N.A.</a></td> + <td headers="city">Coral Gables</td> + <td headers="state">FL</td> + <td headers="CERT #">19040</td> + <td headers="AI">1st United Bank </td> + <td headers="Closing Date">December 17, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + <tr> + <td><a href="earthstar.html">Earthstar Bank</a></td> + <td headers="city">Southampton</td> + <td headers="state">PA</td> + <td headers="CERT #">35561</td> + <td headers="AI">Polonia Bank</td> + <td headers="Closing Date">December 10, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="paramount.html">Paramount Bank</a></td> + <td headers="city">Farmington Hills</td> + <td headers="state">MI</td> + <td headers="CERT #">34673</td> + <td headers="AI">Level One Bank</td> + <td headers="Closing Date">December 10, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="firstbanking.html">First Banking Center</a></td> + <td headers="city">Burlington</td> + <td headers="state">WI</td> + <td headers="CERT #">5287</td> + <td headers="AI">First Michigan Bank</td> + <td headers="Closing Date">November 19, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + + + + <tr> + <td><a href="allegbank.html">Allegiance Bank of North America</a></td> + <td headers="city">Bala Cynwyd</td> + <td headers="state">PA</td> + <td headers="CERT #">35078</td> + <td headers="AI">VIST Bank</td> + <td headers="Closing Date">November 19, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + + + <tr> + <td><a href="gulfstate.html">Gulf State Community Bank</a></td> + <td headers="city">Carrabelle</td> + <td headers="state">FL</td> + <td headers="CERT #">20340</td> + <td headers="AI">Centennial Bank</td> + <td headers="Closing Date">November 19, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + + <tr> + <td><a href="copperstar.html">Copper Star Bank</a></td> + <td headers="city">Scottsdale</td> + <td headers="state">AZ</td> + <td headers="CERT #">35463</td> + <td headers="AI">Stearns Bank, N.A.</td> + <td headers="Closing Date">November 12, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="darbybank.html">Darby Bank & Trust Co.</a></td> + <td headers="city">Vidalia</td> + <td headers="state">GA</td> + <td headers="CERT #">14580</td> + <td headers="AI">Ameris Bank</td> + <td headers="Closing Date">November 12, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="tifton.html">Tifton Banking Company</a></td> + <td headers="city">Tifton</td> + <td headers="state">GA</td> + <td headers="CERT #">57831</td> + <td headers="AI">Ameris Bank</td> + <td headers="Closing Date">November 12, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + <tr> + <td><a href="firstvietnamese.html">First Vietnamese American Bank</a><br /> + <a href="firstvietnamese_viet.pdf">In Vietnamese</a></td> + <td headers="city">Westminster</td> + <td headers="state">CA</td> + <td headers="CERT #">57885</td> + <td headers="AI">Grandpoint Bank</td> + <td headers="Closing Date">November 5, 2010</td> + <td headers="Updated">September 12, 2012</td> + </tr> + <tr> + <td><a href="piercecommercial.html">Pierce Commercial Bank</a></td> + <td headers="city">Tacoma</td> + <td headers="state">WA</td> + <td headers="CERT #">34411</td> + <td headers="AI">Heritage Bank</td> + <td headers="Closing Date">November 5, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="westerncommercial_ca.html">Western Commercial Bank</a></td> + <td headers="city">Woodland Hills</td> + <td headers="state">CA</td> + <td headers="CERT #">58087</td> + <td headers="AI">First California Bank</td> + <td headers="Closing Date">November 5, 2010</td> + <td headers="Updated">September 12, 2012</td> + </tr> + <tr> + <td><a href="kbank.html">K Bank</a></td> + <td headers="city">Randallstown</td> + <td headers="state">MD</td> + <td headers="CERT #">31263</td> + <td headers="AI">Manufacturers and Traders Trust Company (M&T Bank)</td> + <td headers="Closing Date">November 5, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="firstazfsb.html">First Arizona Savings, A FSB</a></td> + <td headers="city">Scottsdale</td> + <td headers="state">AZ</td> + <td headers="CERT #">32582</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">October 22, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="hillcrest_ks.html">Hillcrest Bank</a></td> + <td headers="city">Overland Park</td> + <td headers="state">KS</td> + <td headers="CERT #">22173</td> + <td headers="AI">Hillcrest Bank, N.A.</td> + <td headers="Closing Date">October 22, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + + + <tr> + <td><a href="firstsuburban.html">First Suburban National Bank</a></td> + <td headers="city">Maywood</td> + <td headers="state">IL</td> + <td headers="CERT #">16089</td> + <td headers="AI">Seaway Bank and Trust Company</td> + <td headers="Closing Date">October 22, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + + + + <tr> + <td><a href="fnbbarnesville.html">The First National Bank of Barnesville</a></td> + <td headers="city">Barnesville</td> + <td headers="state">GA</td> + <td headers="CERT #">2119</td> + <td headers="AI">United Bank</td> + <td headers="Closing Date">October 22, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + + + <tr> + <td><a href="gordon.html">The Gordon Bank</a></td> + <td headers="city">Gordon</td> + <td headers="state">GA</td> + <td headers="CERT #">33904</td> + <td headers="AI">Morris Bank</td> + <td headers="Closing Date">October 22, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + + + <tr> + <td><a href="progress_fl.html">Progress Bank of Florida</a></td> + <td headers="city">Tampa</td> + <td headers="state">FL</td> + <td headers="CERT #">32251</td> + <td headers="AI">Bay Cities Bank</td> + <td headers="Closing Date">October 22, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + + + <tr> + <td><a href="firstbankjacksonville.html">First Bank of Jacksonville</a></td> + <td headers="city">Jacksonville</td> + <td headers="state">FL</td> + <td headers="CERT #">27573</td> + <td headers="AI">Ameris Bank</td> + <td headers="Closing Date">October 22, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + + <tr> + <td><a href="premier_mo.html">Premier Bank</a></td> + <td headers="city">Jefferson City</td> + <td headers="state">MO</td> + <td headers="CERT #">34016</td> + <td headers="AI">Providence Bank</td> + <td headers="Closing Date">October 15, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="westbridge.html">WestBridge Bank and Trust Company</a></td> + <td headers="city">Chesterfield</td> + <td headers="state">MO</td> + <td headers="CERT #">58205</td> + <td headers="AI">Midland States Bank</td> + <td headers="Closing Date">October 15, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="securitysavingsfsb.html">Security Savings Bank, F.S.B.</a></td> + <td headers="city">Olathe</td> + <td headers="state">KS</td> + <td headers="CERT #">30898</td> + <td headers="AI">Simmons First National Bank</td> + <td headers="Closing Date">October 15, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="shoreline.html">Shoreline Bank</a></td> + <td headers="city">Shoreline</td> + <td headers="state">WA</td> + <td headers="CERT #">35250</td> + <td headers="AI">GBC International Bank</td> + <td headers="Closing Date">October 1, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="wakulla.html">Wakulla Bank</a></td> + <td headers="city">Crawfordville</td> + <td headers="state"> FL </td> + <td headers="CERT #">21777</td> + <td headers="AI">Centennial Bank</td> + <td headers="Closing Date">October 1, 2010</td> + <td headers="Updated">November 2, 2012</td> + </tr> + <tr> + <td><a href="northcounty.html">North County Bank</a></td> + <td headers="city">Arlington</td> + <td headers="state"> WA </td> + <td headers="CERT #">35053</td> + <td headers="AI">Whidbey Island Bank</td> + <td headers="Closing Date">September 24, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="haventrust_fl.html">Haven Trust Bank Florida</a></td> + <td headers="city">Ponte Vedra Beach</td> + <td headers="state"> FL </td> + <td headers="CERT #">58308</td> + <td headers="AI">First Southern Bank</td> + <td headers="Closing Date">September 24, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + + <tr> + <td><a href="maritimesavings.html">Maritime Savings Bank</a></td> + <td headers="city">West Allis</td> + <td headers="state"> WI </td> + <td headers="CERT #">28612</td> + <td headers="AI">North Shore Bank, FSB</td> + <td headers="Closing Date">September 17, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + + <tr> + <td><a href="bramblesavings.html">Bramble Savings Bank</a></td> + <td headers="city">Milford</td> + <td headers="state"> OH </td> + <td headers="CERT #">27808</td> + <td headers="AI">Foundation Bank</td> + <td headers="Closing Date">September 17, 2010</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="peoplesbank_ga.html">The Peoples Bank</a></td> + <td headers="city">Winder</td> + <td headers="state"> GA </td> + <td headers="CERT #">182</td> + <td headers="AI">Community & Southern Bank</td> + <td headers="Closing Date">September 17, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + + <tr> + <td><a href="firstcommerce_ga.html">First Commerce Community Bank</a></td> + <td headers="city">Douglasville</td> + <td headers="state"> GA </td> + <td headers="CERT #">57448</td> + <td headers="AI">Community & Southern Bank</td> + <td headers="Closing Date">September 17, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + + <tr> + <td><a href="ellijay.html">Bank of Ellijay</a></td> + <td headers="city"> Ellijay </td> + <td headers="state"> GA </td> + <td headers="CERT #">58197</td> + <td headers="AI">Community & Southern Bank</td> + <td headers="Closing Date">September 17, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + + + <tr> + <td><a href="isnbank.html">ISN Bank</a></td> + <td headers="city">Cherry Hill </td> + <td headers="state"> NJ </td> + <td headers="CERT #">57107</td> + <td headers="AI">Customers Bank</td> + <td headers="Closing Date">September 17, 2010</td> + <td headers="Updated">August 22, 2012</td> + </tr> + + <tr> + <td><a href="horizonfl.html">Horizon Bank</a></td> + <td headers="city">Bradenton</td> + <td headers="state"> FL </td> + <td headers="CERT #">35061</td> + <td headers="AI">Bank of the Ozarks</td> + <td headers="Closing Date">September 10, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + + <tr> + <td><a href="sonoma.html">Sonoma Valley Bank</a></td> + <td headers="city">Sonoma</td> + <td headers="state"> CA </td> + <td headers="CERT #">27259</td> + <td headers="AI">Westamerica Bank</td> + <td headers="Closing Date">August 20, 2010</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + + <tr> + <td><a href="lospadres.html">Los Padres Bank</a></td> + <td headers="city">Solvang </td> + <td headers="state">CA</td> + <td headers="CERT #">32165</td> + <td headers="AI">Pacific Western Bank</td> + <td headers="Closing Date">August 20, 2010</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + <tr> + <td><a href="butte.html">Butte Community Bank</a></td> + <td headers="city">Chico</td> + <td headers="state"> CA </td> + <td headers="CERT #">33219</td> + <td headers="AI">Rabobank, N.A.</td> + <td headers="Closing Date">August 20, 2010</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + <tr> + <td><a href="pacificbk.html">Pacific State Bank</a></td> + <td headers="city">Stockton</td> + <td headers="state"> CA </td> + <td headers="CERT #">27090</td> + <td headers="AI">Rabobank, N.A.</td> + <td headers="Closing Date">August 20, 2010</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + <tr> + <td><a href="shorebank.html">ShoreBank</a></td> + <td headers="city">Chicago </td> + <td headers="state">IL</td> + <td headers="CERT #">15640</td> + <td headers="AI">Urban Partnership Bank</td> + <td headers="Closing Date">August 20, 2010</td> + <td headers="Updated">September 12, 2012</td> + </tr> + + + <tr> + <td><a href="imperialsvgs.html">Imperial Savings and Loan Association</a></td> + <td headers="city">Martinsville</td> + <td headers="state">VA</td> + <td headers="CERT #">31623</td> + <td headers="AI">River Community Bank, N.A.</td> + <td headers="Closing Date">August 20, 2010</td> + <td headers="Updated">August 24, 2012</td> + </tr> + + + <tr> + <td><a href="inatbank.html">Independent National Bank</a></td> + <td headers="city">Ocala</td> + <td headers="state">FL</td> + <td headers="CERT #">27344</td> + <td headers="AI">CenterState Bank of Florida, N.A.</td> + <td headers="Closing Date">August 20, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + + <tr> + <td><a href="cnbbartow.html">Community National Bank at Bartow</a></td> + <td headers="city">Bartow</td> + <td headers="state">FL</td> + <td headers="CERT #">25266</td> + <td headers="AI">CenterState Bank of Florida, N.A.</td> + <td headers="Closing Date">August 20, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + + + + <tr> + <td><a href="palosbank.html">Palos Bank and Trust Company</a></td> + <td headers="city">Palos Heights</td> + <td headers="state">IL</td> + <td headers="CERT #">17599</td> + <td headers="AI">First Midwest Bank</td> + <td headers="Closing Date">August 13, 2010</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="ravenswood.html">Ravenswood Bank</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">34231</td> + <td headers="AI">Northbrook Bank & Trust Company</td> + <td headers="Closing Date">August 6, 2010</td> + <td headers="Updated">August 22, 2012</td> + </tr> + + + <tr> + <td><a href="libertyor.html">LibertyBank</a></td> + <td headers="city">Eugene</td> + <td headers="state">OR</td> + <td headers="CERT #">31964</td> + <td headers="AI">Home Federal Bank</td> + <td headers="Closing Date">July 30, 2010</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="cowlitz.html">The Cowlitz Bank</a></td> + <td headers="city">Longview</td> + <td headers="state">WA</td> + <td headers="CERT #">22643</td> + <td headers="AI">Heritage Bank</td> + <td headers="Closing Date">July 30, 2010</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="coastal.html">Coastal Community Bank</a></td> + <td headers="city">Panama City Beach</td> + <td headers="state">FL</td> + <td headers="CERT #">9619</td> + <td headers="AI">Centennial Bank</td> + <td headers="Closing Date">July 30, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="bayside.html">Bayside Savings Bank</a></td> + <td headers="city">Port Saint Joe</td> + <td headers="state">FL</td> + <td headers="CERT #">57669</td> + <td headers="AI">Centennial Bank</td> + <td headers="Closing Date">July 30, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="northwestga.html">Northwest Bank & Trust</a></td> + <td headers="city">Acworth</td> + <td headers="state">GA</td> + <td headers="CERT #">57658</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">July 30, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="homevalleyor.html">Home Valley Bank </a></td> + <td headers="city">Cave Junction</td> + <td headers="state">OR</td> + <td headers="CERT #">23181</td> + <td headers="AI">South Valley Bank & Trust</td> + <td headers="Closing Date">July 23, 2010</td> + <td headers="Updated">September 12, 2012</td> + </tr> + <tr> + <td><a href="southwestusanv.html">SouthwestUSA Bank </a></td> + <td headers="city">Las Vegas</td> + <td headers="state">NV</td> + <td headers="CERT #">35434</td> + <td headers="AI">Plaza Bank</td> + <td headers="Closing Date">July 23, 2010</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="communitysecmn.html">Community Security Bank </a></td> + <td headers="city">New Prague</td> + <td headers="state">MN</td> + <td headers="CERT #">34486</td> + <td headers="AI">Roundbank</td> + <td headers="Closing Date">July 23, 2010</td> + <td headers="Updated">September 12, 2012</td> + </tr> + <tr> + <td><a href="thunderbankks.html">Thunder Bank </a></td> + <td headers="city">Sylvan Grove</td> + <td headers="state">KS</td> + <td headers="CERT #">10506</td> + <td headers="AI">The Bennington State Bank</td> + <td headers="Closing Date">July 23, 2010</td> + <td headers="Updated">September 13, 2012</td> + </tr> + <tr> + <td><a href="williamsburgsc.html">Williamsburg First National Bank </a></td> + <td headers="city">Kingstree</td> + <td headers="state">SC</td> + <td headers="CERT #">17837</td> + <td headers="AI">First Citizens Bank and Trust Company, Inc.</td> + <td headers="Closing Date">July 23, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="crescentga.html">Crescent Bank and Trust Company </a></td> + <td headers="city">Jasper</td> + <td headers="state">GA</td> + <td headers="CERT #">27559</td> + <td headers="AI">Renasant Bank</td> + <td headers="Closing Date">July 23, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="sterlingfl.html">Sterling Bank </a></td> + <td headers="city">Lantana</td> + <td headers="state">FL</td> + <td headers="CERT #">32536</td> + <td headers="AI">IBERIABANK</td> + <td headers="Closing Date">July 23, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="mainstsvgs.html">Mainstreet Savings Bank, FSB</a></td> + <td headers="city">Hastings</td> + <td headers="state">MI</td> + <td headers="CERT #">28136</td> + <td headers="AI">Commercial Bank</td> + <td headers="Closing Date">July 16, 2010</td> + <td headers="Updated">September 13, 2012</td> + </tr> + <tr> + <td><a href="oldecypress.html">Olde Cypress Community Bank</a></td> + <td headers="city">Clewiston</td> + <td headers="state">FL</td> + <td headers="CERT #">28864</td> + <td headers="AI">CenterState Bank of Florida, N.A.</td> + <td headers="Closing Date">July 16, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="turnberry.html">Turnberry Bank</a></td> + <td headers="city">Aventura</td> + <td headers="state">FL</td> + <td headers="CERT #">32280</td> + <td headers="AI">NAFH National Bank</td> + <td headers="Closing Date">July 16, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="metrobankfl.html">Metro Bank of Dade County</a></td> + <td headers="city">Miami</td> + <td headers="state">FL</td> + <td headers="CERT #">25172</td> + <td headers="AI">NAFH National Bank</td> + <td headers="Closing Date">July 16, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="firstnatlsc.html">First National Bank of the South</a></td> + <td headers="city">Spartanburg</td> + <td headers="state">SC</td> + <td headers="CERT #">35383</td> + <td headers="AI">NAFH National Bank</td> + <td headers="Closing Date">July 16, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td height="24"><a href="woodlands.html">Woodlands Bank</a></td> + <td headers="city">Bluffton</td> + <td headers="state">SC</td> + <td headers="CERT #">32571</td> + <td headers="AI">Bank of the Ozarks</td> + <td headers="Closing Date">July 16, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="homenatlok.html">Home National Bank</a></td> + <td headers="city">Blackwell</td> + <td headers="state">OK</td> + <td headers="CERT #">11636</td> + <td headers="AI">RCB Bank</td> + <td headers="Closing Date">July 9, 2010</td> + <td headers="Updated">December 10, 2012</td> + </tr> + <tr> + <td><a href="usabankny.html">USA Bank</a></td> + <td headers="city">Port Chester</td> + <td headers="state">NY</td> + <td headers="CERT #">58072</td> + <td headers="AI">New Century Bank</td> + <td headers="Closing Date">July 9, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="idealfedsvngsmd.html">Ideal Federal Savings Bank</a></td> + <td headers="city">Baltimore</td> + <td headers="state">MD</td> + <td headers="CERT #">32456</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">July 9, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="baynatlmd.html">Bay National Bank</a></td> + <td headers="city">Baltimore</td> + <td headers="state">MD</td> + <td headers="CERT #">35462</td> + <td headers="AI">Bay Bank, FSB</td> + <td headers="Closing Date">July 9, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="highdesertnm.html">High Desert State Bank</a></td> + <td headers="city">Albuquerque</td> + <td headers="state">NM</td> + <td headers="CERT #">35279</td> + <td headers="AI">First American Bank</td> + <td headers="Closing Date">June 25, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="firstnatga.html">First National Bank</a></td> + <td headers="city">Savannah</td> + <td headers="state">GA</td> + <td headers="CERT #">34152</td> + <td headers="AI">The Savannah Bank, N.A.</td> + <td headers="Closing Date">June 25, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="peninsulafl.html">Peninsula Bank</a></td> + <td headers="city">Englewood</td> + <td headers="state">FL</td> + <td headers="CERT #">26563</td> + <td headers="AI">Premier American Bank, N.A.</td> + <td headers="Closing Date">June 25, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="nevsecbank.html">Nevada Security Bank</a></td> + <td headers="city">Reno</td> + <td headers="state">NV</td> + <td headers="CERT #">57110</td> + <td headers="AI">Umpqua Bank</td> + <td headers="Closing Date">June 18, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="washfirstintl.html">Washington First International Bank</a></td> + <td headers="city">Seattle</td> + <td headers="state">WA</td> + <td headers="CERT #">32955</td> + <td headers="AI">East West Bank</td> + <td headers="Closing Date">June 11, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="tieronebankne.html">TierOne Bank</a></td> + <td headers="city">Lincoln</td> + <td headers="state">NE</td> + <td headers="CERT #">29341</td> + <td headers="AI">Great Western Bank</td> + <td headers="Closing Date">June 4, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="arcolail.html">Arcola Homestead Savings Bank</a></td> + <td headers="city">Arcola</td> + <td headers="state">IL</td> + <td headers="CERT #">31813</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">June 4, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="firstnatms.html">First National Bank</a></td> + <td headers="city">Rosedale </td> + <td headers="state">MS</td> + <td headers="CERT #">15814</td> + <td headers="AI">The Jefferson Bank</td> + <td headers="Closing Date">June 4, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="swbnevada.html">Sun West Bank</a></td> + <td headers="city">Las Vegas </td> + <td headers="state">NV</td> + <td headers="CERT #">34785</td> + <td headers="AI">City National Bank</td> + <td headers="Closing Date">May 28, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="graniteca.html">Granite Community Bank, NA</a></td> + <td headers="city">Granite Bay </td> + <td headers="state">CA</td> + <td headers="CERT #">57315</td> + <td headers="AI">Tri Counties Bank</td> + <td headers="Closing Date">May 28, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="bankoffloridatb.html">Bank of Florida - Tampa</a></td> + <td headers="city">Tampa</td> + <td headers="state">FL</td> + <td headers="CERT #">57814</td> + <td headers="AI">EverBank</td> + <td headers="Closing Date">May 28, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="bankoffloridasw.html">Bank of Florida - Southwest</a></td> + <td headers="city">Naples </td> + <td headers="state">FL</td> + <td headers="CERT #">35106</td> + <td headers="AI">EverBank</td> + <td headers="Closing Date">May 28, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="bankoffloridase.html">Bank of Florida - Southeast</a></td> + <td headers="city">Fort Lauderdale </td> + <td headers="state">FL</td> + <td headers="CERT #">57360</td> + <td headers="AI">EverBank</td> + <td headers="Closing Date">May 28, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="pinehurstmn.html">Pinehurst Bank</a></td> + <td headers="city">Saint Paul </td> + <td headers="state">MN</td> + <td headers="CERT #">57735</td> + <td headers="AI">Coulee Bank</td> + <td headers="Closing Date">May 21, 2010</td> + <td headers="Updated">October 26, 2012</td> + </tr> + <tr> + <td><a href="midwestil.html">Midwest Bank and Trust Company</a></td> + <td headers="city">Elmwood Park </td> + <td headers="state">IL</td> + <td headers="CERT #">18117</td> + <td headers="AI">FirstMerit Bank, N.A.</td> + <td headers="Closing Date">May 14, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="swcmntymo.html">Southwest Community Bank</a></td> + <td headers="city">Springfield</td> + <td headers="state">MO</td> + <td headers="CERT #">34255</td> + <td headers="AI">Simmons First National Bank</td> + <td headers="Closing Date">May 14, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="newlibertymi.html">New Liberty Bank</a></td> + <td headers="city">Plymouth</td> + <td headers="state">MI</td> + <td headers="CERT #">35586</td> + <td headers="AI">Bank of Ann Arbor</td> + <td headers="Closing Date">May 14, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="satillacmntyga.html">Satilla Community Bank</a></td> + <td headers="city">Saint Marys</td> + <td headers="state">GA</td> + <td headers="CERT #">35114</td> + <td headers="AI">Ameris Bank</td> + <td headers="Closing Date">May 14, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="1stpacific.html">1st Pacific Bank of California</a></td> + <td headers="city">San Diego</td> + <td headers="state">CA</td> + <td headers="CERT #">35517</td> + <td headers="AI">City National Bank</td> + <td headers="Closing Date">May 7, 2010</td> + <td headers="Updated">December 13, 2012</td> + </tr> + <tr> + <td><a href="townebank.html">Towne Bank of Arizona</a></td> + <td headers="city">Mesa</td> + <td headers="state">AZ</td> + <td headers="CERT #">57697</td> + <td headers="AI">Commerce Bank of Arizona</td> + <td headers="Closing Date">May 7, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="accessbank.html">Access Bank</a></td> + <td headers="city">Champlin</td> + <td headers="state">MN</td> + <td headers="CERT #">16476</td> + <td headers="AI">PrinsBank</td> + <td headers="Closing Date">May 7, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="bonifay.html">The Bank of Bonifay</a></td> + <td headers="city">Bonifay</td> + <td headers="state">FL</td> + <td headers="CERT #">14246</td> + <td headers="AI">First Federal Bank of Florida</td> + <td headers="Closing Date">May 7, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="frontier.html">Frontier Bank</a></td> + <td headers="city">Everett</td> + <td headers="state">WA</td> + <td headers="CERT #">22710</td> + <td headers="AI">Union Bank, N.A.</td> + <td headers="Closing Date">April 30, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="bc-natl.html">BC National Banks</a></td> + <td headers="city">Butler</td> + <td headers="state">MO</td> + <td headers="CERT #">17792</td> + <td headers="AI">Community First Bank</td> + <td headers="Closing Date">April 30, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="champion.html">Champion Bank</a></td> + <td headers="city">Creve Coeur</td> + <td headers="state">MO</td> + <td headers="CERT #">58362</td> + <td headers="AI">BankLiberty</td> + <td headers="Closing Date">April 30, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="cfbancorp.html">CF Bancorp</a></td> + <td headers="city">Port Huron</td> + <td headers="state">MI</td> + <td headers="CERT #">30005</td> + <td headers="AI">First Michigan Bank</td> + <td headers="Closing Date">April 30, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td headers="Instituition"><a href="westernbank-puertorico.html">Westernbank Puerto Rico</a><br /> + <a href="westernbank-puertorico_spanish.html">En Espanol</a></td> + <td headers="city">Mayaguez</td> + <td headers="state">PR</td> + <td headers="CERT #">31027</td> + <td headers="AI">Banco Popular de Puerto Rico</td> + <td headers="Closing Date">April 30, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td headers="Instituition"><a href="r-gpremier-puertorico.html">R-G Premier Bank of Puerto Rico</a><br /> + <a href="r-gpremier-puertorico_spanish.html">En Espanol</a></td> + <td headers="city">Hato Rey</td> + <td headers="state">PR</td> + <td headers="CERT #">32185</td> + <td headers="AI">Scotiabank de Puerto Rico</td> + <td headers="Closing Date">April 30, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td headers="Instituition"><a href="eurobank-puertorico.html">Eurobank</a><br /> + <a href="eurobank-puertorico_spanish.html">En Espanol</a></td> + <td headers="city">San Juan</td> + <td headers="state">PR</td> + <td headers="CERT #">27150</td> + <td headers="AI">Oriental Bank and Trust</td> + <td headers="Closing Date">April 30, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="wheatland.html">Wheatland Bank</a></td> + <td headers="city">Naperville</td> + <td headers="state">IL</td> + <td headers="CERT #">58429</td> + <td headers="AI">Wheaton Bank & Trust</td> + <td headers="Closing Date">April 23, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="peotone.html">Peotone Bank and Trust Company</a></td> + <td headers="city">Peotone</td> + <td headers="state">IL</td> + <td headers="CERT #">10888</td> + <td headers="AI">First Midwest Bank</td> + <td headers="Closing Date">April 23, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="lincoln-park.html">Lincoln Park Savings Bank</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">30600</td> + <td headers="AI">Northbrook Bank & Trust Company</td> + <td headers="Closing Date">April 23, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="new-century-il.html">New Century Bank</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">34821</td> + <td headers="AI">MB Financial Bank, N.A.</td> + <td headers="Closing Date">April 23, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="citizens-bank.html">Citizens Bank and Trust Company of Chicago</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">34658</td> + <td headers="AI">Republic Bank of Chicago</td> + <td headers="Closing Date">April 23, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="broadway.html">Broadway Bank</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">22853</td> + <td headers="AI">MB Financial Bank, N.A.</td> + <td headers="Closing Date">April 23, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="amcore.html">Amcore Bank, National Association</a></td> + <td headers="city">Rockford</td> + <td headers="state">IL</td> + <td headers="CERT #">3735</td> + <td headers="AI">Harris N.A.</td> + <td headers="Closing Date">April 23, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + + <tr> + <td><a href="citybank.html">City Bank</a></td> + <td headers="city">Lynnwood</td> + <td headers="state">WA</td> + <td headers="CERT #">21521</td> + <td headers="AI">Whidbey Island Bank</td> + <td headers="Closing Date">April 16, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="tamalpais.html">Tamalpais Bank</a></td> + <td headers="city">San Rafael</td> + <td headers="state">CA</td> + <td headers="CERT #">33493</td> + <td headers="AI">Union Bank, N.A.</td> + <td headers="Closing Date">April 16, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="innovative.html">Innovative Bank</a></td> + <td headers="city">Oakland</td> + <td headers="state">CA</td> + <td headers="CERT #">23876</td> + <td headers="AI">Center Bank</td> + <td headers="Closing Date">April 16, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="butlerbank.html">Butler Bank</a></td> + <td headers="city">Lowell</td> + <td headers="state">MA</td> + <td headers="CERT #">26619</td> + <td headers="AI">People's United Bank</td> + <td headers="Closing Date">April 16, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="riverside-natl.html">Riverside National Bank of Florida</a></td> + <td headers="city">Fort Pierce</td> + <td headers="state">FL</td> + <td headers="CERT #">24067</td> + <td headers="AI">TD Bank, N.A.</td> + <td headers="Closing Date">April 16, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="americanfirst.html">AmericanFirst Bank</a></td> + <td headers="city">Clermont</td> + <td headers="state">FL</td> + <td headers="CERT #">57724</td> + <td headers="AI">TD Bank, N.A.</td> + <td headers="Closing Date">April 16, 2010</td> + <td headers="Updated">October 31, 2012</td> + </tr> + <tr> + <td><a href="ffbnf.html">First Federal Bank of North Florida</a></td> + <td headers="city">Palatka</td> + <td headers="state">FL </td> + <td headers="CERT #">28886</td> + <td headers="AI">TD Bank, N.A.</td> + <td headers="Closing Date">April 16, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="lakeside-comm.html">Lakeside Community Bank</a></td> + <td headers="city">Sterling Heights</td> + <td headers="state">MI</td> + <td headers="CERT #">34878</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">April 16, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="beachfirst.html">Beach First National Bank</a></td> + <td headers="city">Myrtle Beach</td> + <td headers="state">SC</td> + <td headers="CERT #">34242</td> + <td headers="AI">Bank of North Carolina</td> + <td headers="Closing Date">April 9, 2010</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="deserthills.html">Desert Hills Bank</a></td> + <td headers="city">Phoenix</td> + <td headers="state">AZ</td> + <td headers="CERT #">57060</td> + <td headers="AI">New York Community Bank</td> + <td headers="Closing Date">March 26, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="unity-natl.html">Unity National Bank</a></td> + <td headers="city">Cartersville</td> + <td headers="state">GA</td> + <td headers="CERT #">34678</td> + <td headers="AI">Bank of the Ozarks</td> + <td headers="Closing Date">March 26, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="key-west.html">Key West Bank</a></td> + <td headers="city">Key West</td> + <td headers="state">FL</td> + <td headers="CERT #">34684</td> + <td headers="AI">Centennial Bank</td> + <td headers="Closing Date">March 26, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="mcintosh.html">McIntosh Commercial Bank</a></td> + <td headers="city">Carrollton</td> + <td headers="state">GA</td> + <td headers="CERT #">57399</td> + <td headers="AI">CharterBank</td> + <td headers="Closing Date">March 26, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="state-aurora.html">State Bank of Aurora</a></td> + <td headers="city">Aurora</td> + <td headers="state">MN</td> + <td headers="CERT #">8221</td> + <td headers="AI">Northern State Bank</td> + <td headers="Closing Date">March 19, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="firstlowndes.html">First Lowndes Bank</a></td> + <td headers="city">Fort Deposit</td> + <td headers="state">AL</td> + <td headers="CERT #">24957</td> + <td headers="AI">First Citizens Bank</td> + <td headers="Closing Date">March 19, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="bankofhiawassee.html">Bank of Hiawassee</a></td> + <td headers="city">Hiawassee</td> + <td headers="state">GA</td> + <td headers="CERT #">10054</td> + <td headers="AI">Citizens South Bank</td> + <td headers="Closing Date">March 19, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="appalachian.html">Appalachian Community Bank</a></td> + <td headers="city">Ellijay</td> + <td headers="state">GA</td> + <td headers="CERT #">33989</td> + <td headers="AI">Community & Southern Bank</td> + <td headers="Closing Date">March 19, 2010</td> + <td headers="Updated">October 31, 2012</td> + </tr> + <tr> + <td><a href="advanta-ut.html">Advanta Bank Corp.</a></td> + <td headers="city">Draper</td> + <td headers="state">UT</td> + <td headers="CERT #">33535</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">March 19, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="cent-security.html">Century Security Bank</a></td> + <td headers="city">Duluth</td> + <td headers="state">GA</td> + <td headers="CERT #">58104</td> + <td headers="AI">Bank of Upson</td> + <td headers="Closing Date">March 19, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="amer-natl-oh.html">American National Bank</a></td> + <td headers="city">Parma</td> + <td headers="state">OH</td> + <td headers="CERT #">18806</td> + <td headers="AI">The National Bank and Trust Company</td> + <td headers="Closing Date">March 19, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="statewide.html">Statewide Bank</a></td> + <td headers="city">Covington</td> + <td headers="state">LA</td> + <td headers="CERT #">29561</td> + <td headers="AI">Home Bank</td> + <td headers="Closing Date">March 12, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> +<tr> + <td><a href="oldsouthern.html">Old Southern Bank</a></td> + <td headers="city">Orlando</td> + <td headers="state">FL</td> + <td headers="CERT #">58182</td> + <td headers="AI">Centennial Bank</td> + <td headers="Closing Date">March 12, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="parkavenue-ny.html">The Park Avenue Bank</a></td> + <td headers="city">New York</td> + <td headers="state">NY</td> + <td headers="CERT #">27096</td> + <td headers="AI">Valley National Bank</td> + <td headers="Closing Date">March 12, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="libertypointe.html">LibertyPointe Bank</a></td> + <td headers="city">New York</td> + <td headers="state">NY</td> + <td headers="CERT #">58071</td> + <td headers="AI">Valley National Bank</td> + <td headers="Closing Date">March 11, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="centennial-ut.html">Centennial Bank</a></td> + <td headers="city">Ogden</td> + <td headers="state">UT</td> + <td headers="CERT #">34430</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">March 5, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="waterfield.html">Waterfield Bank</a></td> + <td headers="city">Germantown</td> + <td headers="state">MD</td> + <td headers="CERT #">34976</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">March 5, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="bankofillinois.html">Bank of Illinois</a></td> + <td headers="city">Normal</td> + <td headers="state">IL</td> + <td headers="CERT #">9268</td> + <td headers="AI">Heartland Bank and Trust Company</td> + <td headers="Closing Date">March 5, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="sunamerican.html">Sun American Bank</a></td> + <td headers="city">Boca Raton</td> + <td headers="state">FL</td> + <td headers="CERT #">27126</td> + <td headers="AI">First-Citizens Bank & Trust Company</td> + <td headers="Closing Date">March 5, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="rainier.html">Rainier Pacific Bank</a></td> + <td headers="city">Tacoma</td> + <td headers="state">WA</td> + <td headers="CERT #">38129</td> + <td headers="AI">Umpqua Bank</td> + <td headers="Closing Date">February 26, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="carsonriver.html">Carson River Community Bank</a></td> + <td headers="city">Carson City</td> + <td headers="state">NV</td> + <td headers="CERT #">58352</td> + <td headers="AI">Heritage Bank of Nevada</td> + <td headers="Closing Date">February 26, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="lajolla.html">La Jolla Bank, FSB</a></td> + <td headers="city">La Jolla</td> + <td headers="state">CA</td> + <td headers="CERT #">32423</td> + <td headers="AI">OneWest Bank, FSB</td> + <td headers="Closing Date">February 19, 2010</td> + <td headers="Updated">August 24, 2012</td> + </tr> + <tr> + <td><a href="georgewashington.html">George Washington Savings Bank</a></td> + <td headers="city">Orland Park</td> + <td headers="state">IL</td> + <td headers="CERT #">29952</td> + <td headers="AI">FirstMerit Bank, N.A.</td> + <td headers="Closing Date">February 19, 2010</td> + <td headers="Updated">August 24, 2012</td> + </tr> + <tr> + <td><a href="lacoste.html">The La Coste National Bank</a></td> + <td headers="city">La Coste</td> + <td headers="state">TX</td> + <td headers="CERT #">3287</td> + <td headers="AI">Community National Bank</td> + <td headers="Closing Date">February 19, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="marco.html">Marco Community Bank</a></td> + <td headers="city">Marco Island</td> + <td headers="state">FL</td> + <td headers="CERT #">57586</td> + <td headers="AI">Mutual of Omaha Bank</td> + <td headers="Closing Date">February 19, 2010</td> + <td headers="Updated">August 24, 2012</td> + </tr> + <tr> + <td><a href="1stamerican.html">1st American State Bank of Minnesota</a></td> + <td headers="city">Hancock</td> + <td headers="state">MN</td> + <td headers="CERT #">15448</td> + <td headers="AI">Community Development Bank, FSB</td> + <td headers="Closing Date">February 5, 2010</td> + <td headers="Updated">August 24, 2012</td> + </tr> + <tr> + <td><a href="americanmarine.html">American Marine Bank</a></td> + <td headers="city">Bainbridge Island</td> + <td headers="state">WA</td> + <td headers="CERT #">16730</td> + <td headers="AI">Columbia State Bank</td> + <td headers="Closing Date">January 29, 2010</td> + <td headers="Updated">August 24, 2012</td> + </tr> + <tr> + <td><a href="firstregional.html">First Regional Bank</a></td> + <td headers="city">Los Angeles</td> + <td headers="state">CA</td> + <td headers="CERT #">23011</td> + <td headers="AI">First-Citizens Bank & Trust Company</td> + <td headers="Closing Date">January 29, 2010</td> + <td headers="Updated">August 24, 2012</td> + </tr> + <tr> + <td><a href="cbt-cornelia.html">Community Bank and Trust</a></td> + <td headers="city">Cornelia</td> + <td headers="state">GA</td> + <td headers="CERT #">5702</td> + <td headers="AI">SCBT National Association</td> + <td headers="Closing Date">January 29, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="marshall-mn.html">Marshall Bank, N.A.</a></td> + <td headers="city">Hallock</td> + <td headers="state">MN</td> + <td headers="CERT #">16133</td> + <td headers="AI">United Valley Bank</td> + <td headers="Closing Date">January 29, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="floridacommunity.html">Florida Community Bank</a></td> + <td headers="city">Immokalee</td> + <td headers="state">FL</td> + <td headers="CERT #">5672</td> + <td headers="AI">Premier American Bank, N.A.</td> + <td headers="Closing Date">January 29, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="firstnational-carrollton.html">First National Bank of Georgia</a></td> + <td headers="city">Carrollton</td> + <td headers="state">GA</td> + <td headers="CERT #">16480</td> + <td headers="AI">Community &amp; Southern Bank</td> + <td headers="Closing Date">January 29, 2010</td> + <td headers="Updated">December 13, 2012</td> + </tr> + <tr> + <td><a href="columbiariver.html">Columbia River Bank</a></td> + <td headers="city">The Dalles</td> + <td headers="state">OR</td> + <td headers="CERT #">22469</td> + <td headers="AI">Columbia State Bank</td> + <td headers="Closing Date">January 22, 2010</td> + <td headers="Updated">September 14, 2012</td> + </tr> + <tr> + <td><a href="evergreen-wa.html">Evergreen Bank</a></td> + <td headers="city">Seattle</td> + <td headers="state">WA</td> + <td headers="CERT #">20501</td> + <td headers="AI">Umpqua Bank</td> + <td headers="Closing Date">January 22, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="charter-nm.html">Charter Bank</a></td> + <td headers="city">Santa Fe</td> + <td headers="state">NM</td> + <td headers="CERT #">32498</td> + <td headers="AI">Charter Bank</td> + <td headers="Closing Date">January 22, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="leeton.html">Bank of Leeton</a></td> + <td headers="city">Leeton</td> + <td headers="state">MO</td> + <td headers="CERT #">8265</td> + <td headers="AI">Sunflower Bank, N.A.</td> + <td headers="Closing Date">January 22, 2010</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="premieramerican.html">Premier American Bank</a></td> + <td headers="city">Miami</td> + <td headers="state">FL</td> + <td headers="CERT #">57147</td> + <td headers="AI">Premier American Bank, N.A.</td> + <td headers="Closing Date">January 22, 2010</td> + <td headers="Updated">December 13, 2012</td> + </tr> + <tr> + <td><a href="barnes.html">Barnes Banking Company</a></td> + <td headers="city">Kaysville</td> + <td headers="state">UT</td> + <td headers="CERT #">1252</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">January 15, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="ststephen.html">St. Stephen State Bank</a></td> + <td headers="city">St. Stephen</td> + <td headers="state">MN</td> + <td headers="CERT #">17522</td> + <td headers="AI">First State Bank of St. Joseph</td> + <td headers="Closing Date">January 15, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="towncommunity.html">Town Community + Bank &amp; Trust</a></td> + <td headers="city">Antioch</td> + <td headers="state">IL</td> + <td headers="CERT #">34705</td> + <td headers="AI">First American Bank</td> + <td headers="Closing Date">January 15, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="horizon-wa.html">Horizon Bank</a></td> + <td headers="city">Bellingham</td> + <td headers="state">WA</td> + <td headers="CERT #">22977</td> + <td headers="AI">Washington Federal Savings and Loan Association</td> + <td headers="Closing Date">January 8, 2010</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="firstfederal-ca.html">First Federal Bank of California, F.S.B.</a></td> + <td headers="city">Santa Monica</td> + <td headers="state">CA</td> + <td headers="CERT #">28536</td> + <td headers="AI">OneWest Bank, FSB</td> + <td headers="Closing Date">December 18, 2009</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="imperialcapital.html">Imperial Capital Bank</a></td> + <td headers="city">La Jolla</td> + <td headers="state">CA</td> + <td headers="CERT #">26348</td> + <td headers="AI">City National Bank</td> + <td headers="Closing Date">December 18, 2009</td> + <td headers="Updated">September 5, 2012</td> + </tr> + <tr> + <td><a href="ibb.html">Independent Bankers' Bank</a></td> + <td headers="city">Springfield</td> + <td headers="state">IL</td> + <td headers="CERT #">26820</td> + <td headers="AI">The Independent BankersBank (TIB)</td> + <td headers="Closing Date">December 18, 2009</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="newsouth.html">New South Federal Savings Bank</a></td> + <td headers="city">Irondale</td> + <td headers="state">AL</td> + <td headers="CERT #">32276</td> + <td headers="AI">Beal Bank</td> + <td headers="Closing Date">December 18, 2009</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="citizensstate-mi.html">Citizens State Bank</a></td> + <td headers="city">New Baltimore</td> + <td headers="state">MI</td> + <td headers="CERT #">1006</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">December 18, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="peoplesfirst-fl.html">Peoples First Community Bank</a></td> + <td headers="city">Panama City</td> + <td headers="state">FL</td> + <td headers="CERT #">32167</td> + <td headers="AI">Hancock Bank</td> + <td headers="Closing Date">December 18, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="rockbridge.html">RockBridge Commercial Bank</a></td> + <td headers="city">Atlanta</td> + <td headers="state">GA</td> + <td headers="CERT #">58315</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">December 18, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="solutions.html">SolutionsBank</a></td> + <td headers="city">Overland Park</td> + <td headers="state">KS</td> + <td headers="CERT #">4731</td> + <td headers="AI">Arvest Bank</td> + <td headers="Closing Date">December 11, 2009</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="valleycapital.html">Valley Capital Bank, N.A.</a></td> + <td headers="city">Mesa</td> + <td headers="state">AZ</td> + <td headers="CERT #">58399</td> + <td headers="AI">Enterprise Bank & Trust</td> + <td headers="Closing Date">December 11, 2009</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="republicfederal.html">Republic Federal Bank, N.A.</a></td> + <td headers="city">Miami</td> + <td headers="state">FL</td> + <td headers="CERT #">22846</td> + <td headers="AI">1st United Bank</td> + <td headers="Closing Date">December 11, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="atlantic-va.html">Greater Atlantic Bank</a></td> + <td headers="city">Reston</td> + <td headers="state">VA</td> + <td headers="CERT #">32583</td> + <td headers="AI">Sonabank</td> + <td headers="Closing Date">December 4, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="benchmark-il.html">Benchmark Bank</a></td> + <td headers="city">Aurora</td> + <td headers="state">IL</td> + <td headers="CERT #">10440</td> + <td headers="AI">MB Financial Bank, N.A.</td> + <td headers="Closing Date">December 4, 2009</td> + <td headers="Updated">August 23, 2012</td> + </tr> + <tr> + <td><a href="amtrust.html">AmTrust Bank</a></td> + <td headers="city">Cleveland</td> + <td headers="state">OH</td> + <td headers="CERT #">29776</td> + <td headers="AI">New York Community Bank</td> + <td headers="Closing Date">December 4, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> +<tr> + <td><a href="tattnall.html">The Tattnall Bank</a></td> + <td headers="city">Reidsville</td> + <td headers="state">GA</td> + <td headers="CERT #">12080</td> + <td headers="AI">Heritage Bank of the South</td> + <td headers="Closing Date">December 4, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="firstsecurity.html">First Security National Bank</a></td> + <td headers="city">Norcross</td> + <td headers="state">GA</td> + <td headers="CERT #">26290</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">December 4, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> +<tr> + <td><a href="buckheadcommunity.html">The Buckhead Community Bank</a></td> + <td headers="city">Atlanta</td> + <td headers="state">GA</td> + <td headers="CERT #">34663</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">December 4, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="commercesw-fl.html">Commerce Bank of Southwest Florida</a></td> + <td headers="city">Fort Myers</td> + <td headers="state">FL</td> + <td headers="CERT #">58016</td> + <td headers="AI">Central Bank</td> + <td headers="Closing Date">November 20, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="pacificcoastnatl.html">Pacific Coast National Bank</a></td> + <td headers="city">San Clemente</td> + <td headers="state">CA</td> + <td headers="CERT #">57914</td> + <td headers="AI">Sunwest Bank</td> + <td headers="Closing Date">November 13, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="orion-fl.html">Orion Bank</a></td> + <td headers="city">Naples</td> + <td headers="state">FL</td> + <td headers="CERT #">22427</td> + <td headers="AI">IBERIABANK</td> + <td headers="Closing Date">November 13, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> +<tr> + <td><a href="centuryfsb.html">Century Bank, + F.S.B.</a></td> + <td headers="city">Sarasota</td> + <td headers="state">FL</td> + <td headers="CERT #">32267</td> + <td headers="AI">IBERIABANK</td> + <td headers="Closing Date">November 13, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + + <tr> + <td><a href="ucb.html">United Commercial Bank</a></td> + <td headers="city">San Francisco</td> + <td headers="state">CA</td> + <td headers="CERT #">32469</td> + <td headers="AI">East West Bank</td> + <td headers="Closing Date">November 6, 2009</td> + <td headers="Updated">November 5, 2012</td> + </tr> + <tr> + <td><a href="gateway-mo.html">Gateway Bank of St. Louis</a></td> + <td headers="city">St. Louis</td> + <td headers="state">MO</td> + <td headers="CERT #">19450</td> + <td headers="AI">Central Bank of Kansas City</td> + <td headers="Closing Date">November 6, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="prosperan.html">Prosperan Bank</a></td> + <td headers="city">Oakdale</td> + <td headers="state">MN</td> + <td headers="CERT #">35074</td> + <td headers="AI">Alerus Financial, N.A.</td> + <td headers="Closing Date">November 6, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="homefsb-mi.html">Home Federal Savings Bank</a></td> + <td headers="city">Detroit</td> + <td headers="state">MI</td> + <td headers="CERT #">30329</td> + <td headers="AI">Liberty Bank and Trust Company</td> + <td headers="Closing Date">November 6, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="unitedsecurity-ga.html">United Security Bank</a></td> + <td headers="city">Sparta</td> + <td headers="state">GA</td> + <td headers="CERT #">22286</td> + <td headers="AI">Ameris Bank</td> + <td headers="Closing Date">November 6, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="northhouston-tx.html">North Houston Bank</a></td> + <td headers="city">Houston</td> + <td headers="state">TX</td> + <td headers="CERT #">18776</td> + <td headers="AI">U.S. Bank N.A.</td> + <td headers="Closing Date">October 30, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> +<tr> + <td><a href="madisonville-tx.html">Madisonville State Bank</a></td> + <td headers="city">Madisonville</td> + <td headers="state">TX</td> + <td headers="CERT #">33782</td> + <td headers="AI">U.S. Bank N.A.</td> + <td headers="Closing Date">October 30, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> +<tr> + <td><a href="citizens-teague.html">Citizens National Bank</a></td> + <td headers="city">Teague</td> + <td headers="state">TX</td> + <td headers="CERT #">25222</td> + <td headers="AI">U.S. Bank N.A.</td> + <td headers="Closing Date">October 30, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> +<tr> + <td><a href="park-il.html">Park National Bank</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">11677</td> + <td headers="AI">U.S. Bank N.A.</td> + <td headers="Closing Date">October 30, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> +<tr> + <td><a href="pacificnational-ca.html">Pacific National Bank</a></td> + <td headers="city">San Francisco</td> + <td headers="state">CA</td> + <td headers="CERT #">30006</td> + <td headers="AI">U.S. Bank N.A.</td> + <td headers="Closing Date">October 30, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> +<tr> + <td><a href="calnational.html">California National Bank</a></td> + <td headers="city">Los Angeles</td> + <td headers="state">CA</td> + <td headers="CERT #">34659</td> + <td headers="AI">U.S. Bank N.A.</td> + <td headers="Closing Date">October 30, 2009</td> + <td headers="Updated">September 5, 2012</td> + </tr> +<tr> + <td><a href="sandiegonational.html">San Diego National Bank</a></td> + <td headers="city">San Diego</td> + <td headers="state">CA</td> + <td headers="CERT #">23594</td> + <td headers="AI">U.S. Bank N.A.</td> + <td headers="Closing Date">October 30, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> +<tr> + <td><a href="community-lemont.html">Community Bank of Lemont</a></td> + <td headers="city">Lemont</td> + <td headers="state">IL</td> + <td headers="CERT #">35291</td> + <td headers="AI">U.S. Bank N.A.</td> + <td headers="Closing Date">October 30, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> +<tr> + <td><a href="bankusa-az.html">Bank USA, N.A</a>.</td> + <td headers="city">Phoenix</td> + <td headers="state">AZ</td> + <td headers="CERT #">32218</td> + <td headers="AI">U.S. Bank N.A.</td> + <td headers="Closing Date">October 30, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="firstdupage.html">First DuPage Bank</a></td> + <td headers="city">Westmont</td> + <td headers="state">IL</td> + <td headers="CERT #">35038</td> + <td headers="AI">First Midwest Bank</td> + <td headers="Closing Date">October 23, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="riverview-mn.html">Riverview Community Bank</a></td> + <td headers="city">Otsego</td> + <td headers="state">MN</td> + <td headers="CERT #">57525</td> + <td headers="AI">Central Bank</td> + <td headers="Closing Date">October 23, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="elmwood.html">Bank of Elmwood</a></td> + <td headers="city">Racine</td> + <td headers="state">WI</td> + <td headers="CERT #">18321</td> + <td headers="AI">Tri City National Bank</td> + <td headers="Closing Date">October 23, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + + <tr> + <td><a href="flagship.html">Flagship National Bank</a></td> + <td headers="city">Bradenton</td> + <td headers="state">FL</td> + <td headers="CERT #">35044</td> + <td headers="AI">First Federal Bank of Florida</td> + <td headers="Closing Date">October 23, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="hillcrest-fl.html">Hillcrest Bank Florida</a></td> + <td headers="city">Naples</td> + <td headers="state">FL</td> + <td headers="CERT #">58336</td> + <td headers="AI">Stonegate Bank</td> + <td headers="Closing Date">October 23, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="americanunited.html">American United Bank</a></td> + <td headers="city">Lawrenceville</td> + <td headers="state">GA</td> + <td headers="CERT #">57794</td> + <td headers="AI">Ameris Bank</td> + <td headers="Closing Date">October 23, 2009</td> + <td headers="Updated">September 5, 2012</td> + </tr> + <tr> + <td><a href="partners-fl.html">Partners Bank</a></td> + <td headers="city">Naples</td> + <td headers="state">FL</td> + <td headers="CERT #">57959</td> + <td headers="AI">Stonegate Bank</td> + <td headers="Closing Date">October 23, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="sanjoaquin.html">San Joaquin Bank</a></td> + <td headers="city">Bakersfield</td> + <td headers="state">CA</td> + <td headers="CERT #">23266</td> + <td headers="AI">Citizens Business Bank</td> + <td headers="Closing Date">October 16, 2009</td> + <td headers="Updated">August 22, 2012</td> + </tr> + <tr> + <td><a href="scnb-co.html">Southern Colorado National Bank</a></td> + <td headers="city">Pueblo</td> + <td headers="state">CO</td> + <td headers="CERT #">57263</td> + <td headers="AI">Legacy Bank</td> + <td headers="Closing Date">October 2, 2009</td> + <td headers="Updated">September 5, 2012</td> + </tr> + <tr> + <td><a href="jennings-mn.html">Jennings State Bank</a></td> + <td headers="city">Spring Grove</td> + <td headers="state">MN</td> + <td headers="CERT #">11416</td> + <td headers="AI">Central Bank</td> + <td headers="Closing Date">October 2, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="warren-mi.html">Warren Bank</a></td> + <td headers="city">Warren</td> + <td headers="state">MI</td> + <td headers="CERT #">34824</td> + <td headers="AI">The Huntington National Bank</td> + <td headers="Closing Date">October 2, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="georgian.html">Georgian Bank</a></td> + <td headers="city">Atlanta</td> + <td headers="state">GA</td> + <td headers="CERT #">57151</td> + <td headers="AI">First Citizens Bank and Trust Company, Inc.</td> + <td headers="Closing Date">September 25, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="irwin-ky.html">Irwin Union Bank, F.S.B.</a></td> + <td headers="city">Louisville</td> + <td headers="state">KY</td> + <td headers="CERT #">57068</td> + <td headers="AI">First Financial Bank, N.A.</td> + <td headers="Closing Date">September 18, 2009</td> + <td headers="Updated">September 5, 2012</td> + </tr> + <tr> + <td><a href="irwin-in.html">Irwin Union Bank and Trust Company</a></td> + <td headers="city">Columbus</td> + <td headers="state">IN</td> + <td headers="CERT #">10100</td> + <td headers="AI">First Financial Bank, N.A.</td> + <td headers="Closing Date">September 18, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="venture-wa.html">Venture Bank</a></td> + <td headers="city">Lacey</td> + <td headers="state">WA</td> + <td headers="CERT #">22868</td> + <td headers="AI">First-Citizens Bank & Trust Company</td> + <td headers="Closing Date">September 11, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="brickwell-mn.html">Brickwell Community Bank</a></td> + <td headers="city">Woodbury</td> + <td headers="state">MN</td> + <td headers="CERT #">57736</td> + <td headers="AI">CorTrust Bank N.A.</td> + <td headers="Closing Date">September 11, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="corus.html">Corus Bank, N.A.</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">13693</td> + <td headers="AI">MB Financial Bank, N.A.</td> + <td headers="Closing Date">September 11, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="firststate-az.html">First State Bank</a></td> + <td headers="city">Flagstaff</td> + <td headers="state">AZ</td> + <td headers="CERT #">34875</td> + <td headers="AI">Sunwest Bank</td> + <td headers="Closing Date">September 4, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="platinum-il.html">Platinum Community Bank</a></td> + <td headers="city">Rolling Meadows</td> + <td headers="state">IL</td> + <td headers="CERT #">35030</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">September 4, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="vantus.html">Vantus Bank</a></td> + <td headers="city">Sioux City</td> + <td headers="state">IA</td> + <td headers="CERT #">27732</td> + <td headers="AI">Great Southern Bank</td> + <td headers="Closing Date">September 4, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="inbank.html">InBank</a></td> + <td headers="city">Oak Forest</td> + <td headers="state">IL</td> + <td headers="CERT #">20203</td> + <td headers="AI">MB Financial Bank, N.A.</td> + <td headers="Closing Date">September 4, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="firstbankkc-mo.html">First Bank of Kansas City</a></td> + <td headers="city">Kansas City</td> + <td headers="state">MO</td> + <td headers="CERT #">25231</td> + <td headers="AI">Great American Bank</td> + <td headers="Closing Date">September 4, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="affinity-ca.html">Affinity Bank</a></td> + <td headers="city">Ventura</td> + <td headers="state">CA</td> + <td headers="CERT #">27197</td> + <td headers="AI">Pacific Western Bank</td> + <td headers="Closing Date">August 28, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="mainstreet-mn.html">Mainstreet Bank</a></td> + <td headers="city">Forest Lake</td> + <td headers="state">MN</td> + <td headers="CERT #">1909</td> + <td headers="AI">Central Bank</td> + <td headers="Closing Date">August 28, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="bradford-md.html">Bradford Bank</a></td> + <td headers="city">Baltimore</td> + <td headers="state">MD</td> + <td headers="CERT #">28312</td> + <td headers="AI">Manufacturers and Traders Trust Company (M&T Bank)</td> + <td headers="Closing Date">August 28, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="guaranty-tx.html">Guaranty Bank</a></td> + <td headers="city">Austin</td> + <td headers="state">TX</td> + <td headers="CERT #">32618</td> + <td headers="AI">BBVA Compass</td> + <td headers="Closing Date">August 21, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="capitalsouth.html">CapitalSouth Bank</a></td> + <td headers="city">Birmingham </td> + <td headers="state">AL</td> + <td headers="CERT #">22130</td> + <td headers="AI">IBERIABANK</td> + <td headers="Closing Date">August 21, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="coweta.html">First Coweta Bank</a> </td> + <td headers="city">Newnan</td> + <td headers="state">GA</td> + <td headers="CERT #">57702</td> + <td headers="AI">United Bank</td> + <td headers="Closing Date">August 21, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="ebank.html">ebank</a></td> + <td headers="city">Atlanta</td> + <td headers="state">GA</td> + <td headers="CERT #">34682</td> + <td headers="AI">Stearns Bank, N.A.</td> + <td headers="Closing Date">August 21, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="community-nv.html">Community Bank of Nevada</a></td> + <td headers="city">Las Vegas</td> + <td headers="state">NV</td> + <td headers="CERT #">34043</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">August 14, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="community-az.html">Community Bank of Arizona</a></td> + <td headers="city">Phoenix</td> + <td headers="state">AZ</td> + <td headers="CERT #">57645</td> + <td headers="AI">MidFirst Bank</td> + <td headers="Closing Date">August 14, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="union-az.html">Union Bank, National Association</a></td> + <td headers="city">Gilbert</td> + <td headers="state">AZ</td> + <td headers="CERT #">34485</td> + <td headers="AI">MidFirst Bank</td> + <td headers="Closing Date">August 14, 2009</td> + <td headers="Updated">August 21, 2012</td> + </tr> + <tr> + <td><a href="colonial-al.html">Colonial Bank</a></td> + <td headers="city">Montgomery</td> + <td headers="state">AL</td> + <td headers="CERT #">9609</td> + <td headers="AI">Branch Banking & Trust Company, (BB&T) </td> + <td headers="Closing Date">August 14, 2009</td> + <td headers="Updated">September 5, 2012</td> + </tr> + <tr> + <td><a href="dwelling.html">Dwelling House Savings and Loan Association</a></td> + <td headers="city">Pittsburgh</td> + <td headers="state">PA</td> + <td headers="CERT #">31559</td> + <td headers="AI">PNC Bank, N.A.</td> + <td headers="Closing Date">August 14, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="community-prineville.html">Community First Bank</a></td> + <td headers="city">Prineville</td> + <td headers="state">OR</td> + <td headers="CERT #">23268</td> + <td headers="AI">Home Federal Bank</td> + <td headers="Closing Date">August 7, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="community-venice.html">Community National Bank of Sarasota County</a></td> + <td headers="city">Venice</td> + <td headers="state">FL</td> + <td headers="CERT #">27183</td> + <td headers="AI">Stearns Bank, N.A.</td> + <td headers="Closing Date">August 7, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="fsb-sarasota.html">First State Bank</a></td> + <td headers="city">Sarasota</td> + <td headers="state">FL</td> + <td headers="CERT #">27364</td> + <td headers="AI">Stearns Bank, N.A.</td> + <td headers="Closing Date">August 7, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="mutual-harvey.html">Mutual Bank</a></td> + <td headers="city">Harvey</td> + <td headers="state">IL</td> + <td headers="CERT #">18659</td> + <td headers="AI">United Central Bank</td> + <td headers="Closing Date">July 31, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="americano.html">First BankAmericano</a></td> + <td headers="city">Elizabeth</td> + <td headers="state">NJ</td> + <td headers="CERT #">34270</td> + <td headers="AI">Crown Bank</td> + <td headers="Closing Date">July 31, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="peoplescommunity-oh.html">Peoples Community Bank</a></td> + <td headers="city">West Chester</td> + <td headers="state">OH</td> + <td headers="CERT #">32288</td> + <td headers="AI">First Financial Bank, N.A.</td> + <td headers="Closing Date">July 31, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="integrity-fl.html">Integrity Bank</a></td> + <td headers="city">Jupiter</td> + <td headers="state">FL</td> + <td headers="CERT #">57604</td> + <td headers="AI">Stonegate Bank</td> + <td headers="Closing Date">July 31, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="fsb-altus.html">First State Bank of Altus</a></td> + <td headers="city">Altus</td> + <td headers="state">OK</td> + <td headers="CERT #">9873</td> + <td headers="AI">Herring Bank</td> + <td headers="Closing Date">July 31, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="sb-jones.html">Security Bank of Jones County</a></td> + <td headers="city">Gray</td> + <td headers="state">GA</td> + <td headers="CERT #">8486</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">July 24, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="sb-houston.html">Security Bank of Houston County</a></td> + <td headers="city">Perry</td> + <td headers="state">GA</td> + <td headers="CERT #">27048</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">July 24, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="sb-bibb.html">Security Bank of Bibb County</a></td> + <td headers="city">Macon</td> + <td headers="state">GA</td> + <td headers="CERT #">27367</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">July 24, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="sb-metro.html">Security Bank of North Metro</a></td> + <td headers="city">Woodstock</td> + <td headers="state">GA</td> + <td headers="CERT #">57105</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">July 24, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="sb-fulton.html">Security Bank of North Fulton</a></td> + <td headers="city">Alpharetta</td> + <td headers="state">GA</td> + <td headers="CERT #">57430</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">July 24, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="sb-gwinnett.html">Security Bank of Gwinnett County</a></td> + <td headers="city">Suwanee</td> + <td headers="state">GA</td> + <td headers="CERT #">57346</td> + <td headers="AI">State Bank and Trust Company</td> + <td headers="Closing Date">July 24, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="waterford.html">Waterford Village Bank</a></td> + <td headers="city">Williamsville</td> + <td headers="state">NY</td> + <td headers="CERT #">58065</td> + <td headers="AI">Evans Bank, N.A.</td> + <td headers="Closing Date">July 24, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="temecula.html">Temecula Valley Bank</a></td> + <td headers="city">Temecula</td> + <td headers="state">CA</td> + <td headers="CERT #">34341</td> + <td headers="AI">First-Citizens Bank & Trust Company</td> + <td headers="Closing Date">July 17, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="vineyard.html">Vineyard Bank</a></td> + <td headers="city">Rancho Cucamonga</td> + <td headers="state">CA</td> + <td headers="CERT #">23556</td> + <td headers="AI">California Bank & Trust</td> + <td headers="Closing Date">July 17, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="bankfirst.html">BankFirst</a></td> + <td headers="city">Sioux Falls</td> + <td headers="state">SD</td> + <td headers="CERT #">34103</td> + <td headers="AI">Alerus Financial, N.A.</td> + <td headers="Closing Date">July 17, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="piedmont.html">First Piedmont Bank</a></td> + <td headers="city">Winder</td> + <td headers="state">GA</td> + <td headers="CERT #">34594</td> + <td headers="AI">First American Bank and Trust Company</td> + <td headers="Closing Date">July 17, 2009</td> + <td headers="Updated">January 15, 2013</td> + </tr> + <tr> + <td><a href="wyoming.html">Bank of Wyoming</a></td> + <td headers="city">Thermopolis</td> + <td headers="state">WY</td> + <td headers="CERT #">22754</td> + <td headers="AI">Central Bank & Trust</td> + <td headers="Closing Date">July 10, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="founders.html">Founders Bank</a></td> + <td headers="city">Worth</td> + <td headers="state">IL</td> + <td headers="CERT #">18390</td> + <td headers="AI">The PrivateBank and Trust Company</td> + <td headers="Closing Date">July 2, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="millennium.html">Millennium State Bank of Texas</a></td> + <td headers="city">Dallas</td> + <td headers="state">TX</td> + <td headers="CERT #">57667</td> + <td headers="AI">State Bank of Texas</td> + <td headers="Closing Date">July 2, 2009</td> + <td headers="Updated">October 26, 2012</td> + </tr> + <tr> + <td><a href="danville.html">First National Bank of Danville</a></td> + <td headers="city">Danville</td> + <td headers="state">IL</td> + <td headers="CERT #">3644</td> + <td headers="AI">First Financial Bank, N.A.</td> + <td headers="Closing Date">July 2, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="elizabeth.html">Elizabeth State Bank</a></td> + <td headers="city">Elizabeth</td> + <td headers="state">IL</td> + <td headers="CERT #">9262</td> + <td headers="AI">Galena State Bank and Trust Company</td> + <td headers="Closing Date">July 2, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="rockriver.html">Rock River Bank</a></td> + <td headers="city">Oregon</td> + <td headers="state">IL</td> + <td headers="CERT #">15302</td> + <td headers="AI">The Harvard State Bank</td> + <td headers="Closing Date">July 2, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="winchester.html">First State Bank of Winchester</a></td> + <td headers="city">Winchester</td> + <td headers="state">IL</td> + <td headers="CERT #">11710</td> + <td headers="AI">The First National Bank of Beardstown</td> + <td headers="Closing Date">July 2, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="warner.html">John Warner Bank</a></td> + <td headers="city">Clinton</td> + <td headers="state">IL</td> + <td headers="CERT #">12093</td> + <td headers="AI">State Bank of Lincoln</td> + <td headers="Closing Date">July 2, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="mirae.html">Mirae Bank</a></td> + <td headers="city">Los Angeles</td> + <td headers="state">CA</td> + <td headers="CERT #">57332</td> + <td headers="AI">Wilshire State Bank</td> + <td headers="Closing Date">June 26, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="metropacific.html">MetroPacific Bank</a></td> + <td headers="city">Irvine</td> + <td headers="state">CA</td> + <td headers="CERT #">57893</td> + <td headers="AI">Sunwest Bank</td> + <td headers="Closing Date">June 26, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="horizon.html">Horizon Bank</a></td> + <td headers="city">Pine City</td> + <td headers="state">MN</td> + <td headers="CERT #">9744</td> + <td headers="AI">Stearns Bank, N.A.</td> + <td headers="Closing Date">June 26, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="neighbor.html">Neighborhood Community Bank</a></td> + <td headers="city">Newnan</td> + <td headers="state">GA</td> + <td headers="CERT #">35285</td> + <td headers="AI">CharterBank</td> + <td headers="Closing Date">June 26, 2009</td> + <td headers="Updated">August 20, 2012</td> + </tr> + <tr> + <td><a href="communityga.html">Community Bank of West Georgia</a></td> + <td headers="city">Villa Rica</td> + <td headers="state">GA</td> + <td headers="CERT #">57436</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">June 26, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="anthony.html">First National Bank of Anthony</a></td> + <td headers="city">Anthony</td> + <td headers="state">KS</td> + <td headers="CERT #">4614</td> + <td headers="AI">Bank of Kansas</td> + <td headers="Closing Date">June 19, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="cooperative.html">Cooperative Bank</a></td> + <td headers="city">Wilmington</td> + <td headers="state">NC</td> + <td headers="CERT #">27837</td> + <td headers="AI">First Bank</td> + <td headers="Closing Date">June 19, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="scb.html">Southern Community Bank</a></td> + <td headers="city">Fayetteville</td> + <td headers="state">GA</td> + <td headers="CERT #">35251</td> + <td headers="AI">United Community Bank</td> + <td headers="Closing Date">June 19, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="lincolnwood.html">Bank of Lincolnwood</a></td> + <td headers="city">Lincolnwood</td> + <td headers="state">IL</td> + <td headers="CERT #">17309</td> + <td headers="AI">Republic Bank of Chicago</td> + <td headers="Closing Date">June 5, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="citizensnational.html">Citizens National Bank</a></td> + <td headers="city">Macomb</td> + <td headers="state">IL</td> + <td headers="CERT #">5757</td> + <td headers="AI">Morton Community Bank</td> + <td headers="Closing Date">May 22, 2009</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="strategiccapital.html">Strategic Capital Bank</a></td> + <td headers="city">Champaign</td> + <td headers="state">IL</td> + <td headers="CERT #">35175</td> + <td headers="AI">Midland States Bank</td> + <td headers="Closing Date">May 22, 2009</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="bankunited.html">BankUnited, FSB</a></td> + <td headers="city">Coral Gables</td> + <td headers="state">FL</td> + <td headers="CERT #">32247</td> + <td headers="AI">BankUnited</td> + <td headers="Closing Date">May 21, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="westsound.html">Westsound Bank</a></td> + <td headers="city">Bremerton</td> + <td headers="state">WA</td> + <td headers="CERT #">34843</td> + <td headers="AI">Kitsap Bank</td> + <td headers="Closing Date">May 8, 2009</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="americawest.html">America West Bank</a></td> + <td headers="city">Layton</td> + <td headers="state">UT</td> + <td headers="CERT #">35461</td> + <td headers="AI">Cache Valley Bank</td> + <td headers="Closing Date">May 1, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="citizens.html">Citizens Community Bank</a></td> + <td headers="city">Ridgewood</td> + <td headers="state">NJ</td> + <td headers="CERT #">57563</td> + <td headers="AI">North Jersey Community Bank</td> + <td headers="Closing Date">May 1, 2009</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="silverton.html">Silverton Bank, NA</a></td> + <td headers="city">Atlanta</td> + <td headers="state">GA</td> + <td headers="CERT #">26535</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">May 1, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="firstbankidaho.html">First Bank of Idaho</a></td> + <td headers="city">Ketchum</td> + <td headers="state">ID</td> + <td headers="CERT #">34396</td> + <td headers="AI">U.S. Bank, N.A.</td> + <td headers="Closing Date">April 24, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="beverlyhills.html">First Bank of Beverly Hills</a></td> + <td headers="city">Calabasas</td> + <td headers="state">CA</td> + <td headers="CERT #">32069</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">April 24, 2009</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="michiganheritage.html">Michigan Heritage Bank</a></td> + <td headers="city">Farmington Hills</td> + <td headers="state">MI</td> + <td headers="CERT #">34369</td> + <td headers="AI">Level One Bank</td> + <td headers="Closing Date">April 24, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="amsouthern.html">American Southern Bank</a></td> + <td headers="city">Kennesaw</td> + <td headers="state">GA</td> + <td headers="CERT #">57943</td> + <td headers="AI">Bank of North Georgia</td> + <td headers="Closing Date">April 24, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="greatbasin.html">Great Basin Bank of Nevada</a></td> + <td headers="city">Elko</td> + <td headers="state">NV</td> + <td headers="CERT #">33824</td> + <td headers="AI">Nevada State Bank</td> + <td headers="Closing Date">April 17, 2009</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="amsterling.html">American Sterling Bank</a></td> + <td headers="city">Sugar Creek</td> + <td headers="state">MO</td> + <td headers="CERT #">8266</td> + <td headers="AI">Metcalf Bank</td> + <td headers="Closing Date">April 17, 2009</td> + <td headers="Updated">August 31, 2012</td> + </tr> + <tr> + <td><a href="newfrontier.html">New Frontier Bank</a></td> + <td headers="city">Greeley</td> + <td headers="state">CO</td> + <td headers="CERT #">34881</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">April 10, 2009</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="capefear.html">Cape Fear Bank</a></td> + <td headers="city">Wilmington</td> + <td headers="state">NC</td> + <td headers="CERT #">34639</td> + <td headers="AI">First Federal Savings and Loan Association</td> + <td headers="Closing Date">April 10, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="omni.html">Omni National Bank</a></td> + <td headers="city">Atlanta</td> + <td headers="state">GA</td> + <td headers="CERT #">22238</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">March 27, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="teambank.html">TeamBank, NA</a></td> + <td headers="city">Paola</td> + <td headers="state">KS</td> + <td headers="CERT #">4754</td> + <td headers="AI">Great Southern Bank</td> + <td headers="Closing Date">March 20, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="coloradonational.html">Colorado National Bank</a></td> + <td headers="city">Colorado Springs</td> + <td headers="state">CO</td> + <td headers="CERT #">18896</td> + <td headers="AI">Herring Bank</td> + <td headers="Closing Date">March 20, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="firstcity.html">FirstCity Bank</a></td> + <td headers="city">Stockbridge</td> + <td headers="state">GA</td> + <td headers="CERT #">18243</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">March 20, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="freedomga.html">Freedom Bank of Georgia</a></td> + <td headers="city">Commerce</td> + <td headers="state">GA</td> + <td headers="CERT #">57558</td> + <td headers="AI">Northeast Georgia Bank</td> + <td headers="Closing Date">March 6, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="securitysavings.html">Security Savings Bank</a></td> + <td headers="city">Henderson</td> + <td headers="state">NV</td> + <td headers="CERT #">34820</td> + <td headers="AI">Bank of Nevada</td> + <td headers="Closing Date">February 27, 2009</td> + <td headers="Updated">September 7, 2012</td> + </tr> + <tr> + <td><a href="heritagebank.html">Heritage Community Bank</a></td> + <td headers="city">Glenwood</td> + <td headers="state">IL</td> + <td headers="CERT #">20078</td> + <td headers="AI">MB Financial Bank, N.A.</td> + <td headers="Closing Date">February 27, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="silverfalls.html">Silver Falls Bank</a></td> + <td headers="city">Silverton</td> + <td headers="state">OR</td> + <td headers="CERT #">35399</td> + <td headers="AI">Citizens Bank</td> + <td headers="Closing Date">February 20, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="pinnacle.html">Pinnacle Bank of Oregon</a></td> + <td headers="city">Beaverton</td> + <td headers="state">OR</td> + <td headers="CERT #">57342</td> + <td headers="AI">Washington Trust Bank of Spokane</td> + <td headers="Closing Date">February 13, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="cornbelt.html">Corn Belt Bank & Trust Co.</a></td> + <td headers="city">Pittsfield</td> + <td headers="state">IL</td> + <td headers="CERT #">16500</td> + <td headers="AI">The Carlinville National Bank</td> + <td headers="Closing Date">February 13, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="riverside.html">Riverside Bank of the Gulf Coast</a></td> + <td headers="city">Cape Coral</td> + <td headers="state">FL</td> + <td headers="CERT #">34563</td> + <td headers="AI">TIB Bank</td> + <td headers="Closing Date">February 13, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="sherman.html">Sherman County Bank</a></td> + <td headers="city">Loup City</td> + <td headers="state">NE</td> + <td headers="CERT #">5431</td> + <td headers="AI">Heritage Bank</td> + <td headers="Closing Date">February 13, 2009</td> + <td headers="Updated">August 17, 2012</td> + </tr> + <tr> + <td><a href="county.html">County Bank</a></td> + <td headers="city">Merced</td> + <td headers="state">CA</td> + <td headers="CERT #">22574</td> + <td headers="AI">Westamerica Bank</td> + <td headers="Closing Date">February 6, 2009</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="alliance.html">Alliance Bank</a></td> + <td headers="city">Culver City</td> + <td headers="state">CA</td> + <td headers="CERT #"> 23124</td> + <td headers="AI">California Bank & Trust</td> + <td headers="Closing Date">February 6, 2009</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="firstbank.html">FirstBank Financial Services</a></td> + <td headers="city">McDonough</td> + <td headers="state">GA</td> + <td headers="CERT #">57017</td> + <td headers="AI">Regions Bank</td> + <td headers="Closing Date">February 6, 2009</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="ocala.html">Ocala National Bank</a></td> + <td headers="city">Ocala</td> + <td headers="state">FL</td> + <td headers="CERT #">26538</td> + <td headers="AI">CenterState Bank of Florida, N.A.</td> + <td headers="Closing Date">January 30, 2009</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="suburban.html">Suburban FSB</a></td> + <td headers="city">Crofton</td> + <td headers="state">MD</td> + <td headers="CERT #">30763</td> + <td headers="AI">Bank of Essex</td> + <td headers="Closing Date">January 30, 2009</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="magnet.html">MagnetBank</a></td> + <td headers="city">Salt Lake City</td> + <td headers="state">UT</td> + <td headers="CERT #">58001</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">January 30, 2009</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="centennial.html">1st Centennial Bank</a></td> + <td headers="city">Redlands</td> + <td headers="state">CA</td> + <td headers="CERT #">33025</td> + <td headers="AI">First California Bank</td> + <td headers="Closing Date">January 23, 2009</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="clark.html">Bank of Clark County</a></td> + <td headers="city">Vancouver</td> + <td headers="state">WA</td> + <td headers="CERT #">34959</td> + <td headers="AI">Umpqua Bank</td> + <td headers="Closing Date">January 16, 2009</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="commerce.html">National Bank of Commerce</a></td> + <td headers="city">Berkeley</td> + <td headers="state">IL</td> + <td headers="CERT #">19733</td> + <td headers="AI">Republic Bank of Chicago</td> + <td headers="Closing Date">January 16, 2009</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="sanderson.html">Sanderson State Bank</a><br /> + <a href="sanderson_spanish.html">En Espanol</a></td> + <td headers="city">Sanderson</td> + <td headers="state">TX</td> + <td headers="CERT #">11568</td> + <td headers="AI">The Pecos County State Bank</td> + <td headers="Closing Date">December 12, 2008</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="haventrust.html">Haven Trust Bank</a></td> + <td headers="city">Duluth</td> + <td headers="state">GA</td> + <td headers="CERT #">35379</td> + <td headers="AI">Branch Banking & Trust Company, (BB&T) </td> + <td headers="Closing Date">December 12, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="firstga.html">First Georgia Community Bank</a></td> + <td headers="city">Jackson</td> + <td headers="state">GA</td> + <td headers="CERT #">34301</td> + <td headers="AI">United Bank</td> + <td headers="Closing Date">December 5, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="pff.html">PFF Bank & Trust </a></td> + <td headers="city">Pomona</td> + <td headers="state">CA</td> + <td headers="CERT #">28344</td> + <td headers="AI">U.S. Bank, N.A.</td> + <td headers="Closing Date">November 21, 2008</td> + <td headers="Updated">January 4, 2013</td> + </tr> + <tr> + <td><a href="downey.html">Downey Savings & Loan</a></td> + <td headers="city">Newport Beach</td> + <td headers="state">CA</td> + <td headers="CERT #">30968</td> + <td headers="AI">U.S. Bank, N.A.</td> + <td headers="Closing Date">November 21, 2008</td> + <td headers="Updated">January 4, 2013</td> + </tr> + <tr> + <td><a href="community.html">Community Bank</a></td> + <td headers="city">Loganville</td> + <td headers="state">GA</td> + <td headers="CERT #">16490</td> + <td headers="AI">Bank of Essex</td> + <td headers="Closing Date">November 21, 2008</td> + <td headers="Updated">September 4, 2012</td> + </tr> + <tr> + <td><a href="securitypacific.html">Security Pacific Bank</a></td> + <td headers="city">Los Angeles</td> + <td headers="state">CA</td> + <td headers="CERT #">23595</td> + <td headers="AI">Pacific Western Bank</td> + <td headers="Closing Date">November 7, 2008</td> + <td headers="Updated">August 28, 2012</td> + </tr> + <tr> + <td><a href="franklinbank.html">Franklin Bank, SSB</a></td> + <td headers="city">Houston</td> + <td headers="state">TX</td> + <td headers="CERT #">26870</td> + <td headers="AI">Prosperity Bank</td> + <td headers="Closing Date">November 7, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="freedom.html">Freedom Bank</a></td> + <td headers="city">Bradenton</td> + <td headers="state">FL</td> + <td headers="CERT #">57930</td> + <td headers="AI">Fifth Third Bank</td> + <td headers="Closing Date">October 31, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="alpha.html">Alpha Bank & Trust</a></td> + <td headers="city">Alpharetta</td> + <td headers="state">GA</td> + <td headers="CERT #">58241</td> + <td headers="AI">Stearns Bank, N.A.</td> + <td headers="Closing Date">October 24, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="meridian.html">Meridian Bank</a></td> + <td headers="city">Eldred</td> + <td headers="state">IL</td> + <td headers="CERT #">13789</td> + <td headers="AI">National Bank</td> + <td headers="Closing Date">October 10, 2008</td> + <td headers="Updated">May 31, 2012</td> + </tr> + <tr> + <td><a href="mainstreet.html">Main Street Bank</a></td> + <td headers="city">Northville</td> + <td headers="state">MI</td> + <td headers="CERT #">57654</td> + <td headers="AI">Monroe Bank & Trust</td> + <td headers="Closing Date">October 10, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="wamu.html">Washington Mutual Bank <br /> + (Including its subsidiary Washington Mutual Bank FSB)</a></td> + <td headers="city">Henderson</td> + <td headers="state">NV</td> + <td headers="CERT #">32633</td> + <td headers="AI">JP Morgan Chase Bank</td> + <td headers="Closing Date">September 25, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <!-- <tr> + <td width="210"><a href="wamu.html">Washington Mutual Bank FSB</a></td> + <td headers="city" width="126">Park City</td> + <td headers="state" width="44">UT</td> + <td headers="CERT #" width="61">33891</td> + <td headers="Closing Date" width="117">September 25, 2008</td> + <td headers="Updated" width="129">November 23, 2009</td> + </tr> --> + <tr> + <td><a href="ameribank.html">Ameribank</a></td> + <td headers="city">Northfork</td> + <td headers="state">WV</td> + <td headers="CERT #">6782</td> + <td headers="AI">The Citizens Savings Bank<br /><br />Pioneer Community Bank, Inc.</td> + <td headers="Closing Date">September 19, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="silverstate.html">Silver State Bank</a><br /> + <a href="silverstatesp.html">En Espanol </a></td> + <td headers="city">Henderson</td> + <td headers="state">NV</td> + <td headers="CERT #">34194</td> + <td headers="AI">Nevada State Bank</td> + <td headers="Closing Date">September 5, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="integrity.html">Integrity Bank</a></td> + <td headers="city">Alpharetta</td> + <td headers="state">GA</td> + <td headers="CERT #">35469</td> + <td headers="AI">Regions Bank</td> + <td headers="Closing Date">August 29, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="columbian.html">Columbian Bank & Trust</a></td> + <td headers="city">Topeka</td> + <td headers="state">KS</td> + <td headers="CERT #">22728</td> + <td headers="AI">Citizens Bank & Trust</td> + <td headers="Closing Date">August 22, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="firstprioritybank.html">First Priority Bank</a></td> + <td headers="city">Bradenton</td> + <td headers="state">FL</td> + <td headers="CERT #">57523</td> + <td headers="AI">SunTrust Bank</td> + <td headers="Closing Date">August 1, 2008</td> + <td headers="Updated">August 16, 2012</td> + </tr> + <tr> + <td><a href="heritage.html">First Heritage Bank, NA</a></td> + <td headers="city">Newport Beach</td> + <td headers="state">CA</td> + <td headers="CERT #">57961</td> + <td headers="AI">Mutual of Omaha Bank</td> + <td headers="Closing Date">July 25, 2008</td> + <td headers="Updated">August 28, 2012</td> + </tr> + <tr> + <td><a href="fnbnv.html">First National Bank of Nevada</a></td> + <td headers="city">Reno</td> + <td headers="state">NV</td> + <td headers="CERT #">27011</td> + <td headers="AI">Mutual of Omaha Bank</td> + <td headers="Closing Date">July 25, 2008</td> + <td headers="Updated">August 28, 2012</td> + </tr> + <tr> + <td><a href="IndyMac.html">IndyMac Bank</a></td> + <td headers="city">Pasadena</td> + <td headers="state">CA</td> + <td headers="CERT #">29730</td> + <td headers="AI">OneWest Bank, FSB</td> + <td headers="Closing Date">July 11, 2008</td> + <td headers="Updated">August 28, 2012</td> + </tr> + <tr> + <td><a href="first_integrity_bank.html">First Integrity Bank, NA</a></td> + <td headers="city">Staples</td> + <td headers="state">MN</td> + <td headers="CERT #">12736</td> + <td headers="AI">First International Bank and Trust</td> + <td headers="Closing Date">May 30, 2008</td> + <td headers="Updated">August 28, 2012</td> + </tr><tr> + <td><a href="anb.html">ANB Financial, NA</a></td> + <td headers="city">Bentonville</td> + <td headers="state">AR</td> + <td headers="CERT #">33901</td> + <td headers="AI">Pulaski Bank and Trust Company</td> + <td headers="Closing Date">May 9, 2008</td> + <td headers="Updated">August 28, 2012</td> + </tr><tr> + <td><a href="Hume.html">Hume Bank</a></td> + <td headers="city">Hume</td> + <td headers="state">MO</td> + <td headers="CERT #">1971</td> + <td headers="AI">Security Bank</td> + <td headers="Closing Date">March 7, 2008</td> + <td headers="Updated">August 28, 2012</td> + </tr> + <tr> + <td><a href="Douglass.html">Douglass National Bank</a></td> + <td headers="city">Kansas City</td> + <td headers="state">MO</td> + <td headers="CERT #">24660</td> + <td headers="AI">Liberty Bank and Trust Company</td> + <td headers="Closing Date">January 25, 2008</td> + <td headers="Updated">October 26, 2012</td> + </tr> + <tr> + <td><a href="MiamiValley.html">Miami Valley Bank</a></td> + <td headers="city">Lakeview</td> + <td headers="state">OH</td> + <td headers="CERT #">16848</td> + <td headers="AI">The Citizens Banking Company</td> + <td headers="Closing Date">October 4, 2007</td> + <td headers="Updated">August 28, 2012</td> + </tr> + <tr> + <td><a href="NetBank.html">NetBank</a></td> + <td headers="city">Alpharetta</td> + <td headers="state">GA</td> + <td headers="CERT #">32575</td> + <td headers="AI">ING DIRECT</td> + <td headers="Closing Date">September 28, 2007</td> + <td headers="Updated">August 28, 2012</td> + </tr> + <tr> + <td><a href="MetropolitanSB.html">Metropolitan Savings Bank</a></td> + <td headers="city">Pittsburgh</td> + <td headers="state">PA</td> + <td headers="CERT #">35353</td> + <td headers="AI">Allegheny Valley Bank of Pittsburgh</td> + <td headers="Closing Date">February 2, 2007</td> + <td headers="Updated">October 27, 2010</td> + </tr> + <tr> + <td><a href="ephraim.html">Bank of Ephraim</a></td> + <td headers="city">Ephraim</td> + <td headers="state">UT</td> + <td headers="CERT #">1249</td> + <td headers="AI">Far West Bank</td> + <td headers="Closing Date">June 25, 2004</td> + <td headers="Updated">April 9, 2008</td> + </tr> + <tr> + <td><a href="reliance.html">Reliance Bank</a></td> + <td headers="city">White Plains</td> + <td headers="state">NY</td> + <td headers="CERT #">26778</td> + <td headers="AI">Union State Bank</td> + <td headers="Closing Date">March 19, 2004</td> + <td headers="Updated">April 9, 2008</td> + </tr> + <tr> + <td><a href="gnb.html">Guaranty National Bank of Tallahassee</a></td> + <td headers="city">Tallahassee</td> + <td headers="state">FL</td> + <td headers="CERT #">26838</td> + <td headers="AI">Hancock Bank of Florida</td> + <td headers="Closing Date">March 12, 2004</td> + <td headers="Updated">June 5, 2012</td> + </tr> + <tr> + <td><a href="dollar.html">Dollar Savings Bank</a></td> + <td headers="city">Newark</td> + <td headers="state">NJ</td> + <td headers="CERT #">31330</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">February 14, 2004</td> + <td headers="Updated">April 9, 2008</td> + </tr> + <tr> + <td><a href="pulaski.html">Pulaski Savings Bank</a></td> + <td headers="city">Philadelphia</td> + <td headers="state">PA</td> + <td headers="CERT #">27203</td> + <td headers="AI">Earthstar Bank</td> + <td headers="Closing Date">November 14, 2003</td> + <td headers="Updated">July 22, 2005</td> + </tr> + <tr> + <td><a href="blanchardville.html">First National Bank of Blanchardville</a></td> + <td headers="city">Blanchardville</td> + <td headers="state">WI</td> + <td headers="CERT #">11639</td> + <td headers="AI">The Park Bank</td> + <td headers="Closing Date">May 9, 2003</td> + <td headers="Updated">June 5, 2012</td> + </tr> + <tr> + <td><a href="spbank.html">Southern Pacific Bank</a></td> + <td headers="city">Torrance</td> + <td headers="state">CA</td> + <td headers="CERT #">27094</td> + <td headers="AI">Beal Bank</td> + <td headers="Closing Date">February 7, 2003</td> + <td headers="Updated">October 20, 2008</td> + </tr> + <tr> + <td><a href="farmers.html">Farmers Bank of Cheneyville</a></td> + <td headers="city">Cheneyville</td> + <td headers="state">LA</td> + <td headers="CERT #">16445</td> + <td headers="AI">Sabine State Bank & Trust</td> + <td headers="Closing Date">December 17, 2002</td> + <td headers="Updated">October 20, 2004</td> + </tr> + <tr> + <td><a href="bankofalamo.html">Bank of Alamo</a></td> + <td headers="city">Alamo</td> + <td headers="state">TN</td> + <td headers="CERT #">9961</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">November 8, 2002</td> + <td headers="Updated">March 18, 2005</td> + </tr> + <tr> + <td><a href="amtrade.html">AmTrade International Bank</a><br /><a href="amtrade-spanish.html">En Espanol </a></td> + <td headers="city">Atlanta</td> + <td headers="state">GA</td> + <td headers="CERT #">33784</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">September 30, 2002</td> + <td headers="Updated">September 11, 2006</td> + </tr> + <tr> + <td headers="Instituition"><a href="universal.html">Universal Federal Savings Bank</a></td> + <td headers="city">Chicago</td> + <td headers="state">IL</td> + <td headers="CERT #">29355</td> + <td headers="AI">Chicago Community Bank</td> + <td headers="Closing Date">June 27, 2002</td> + <td headers="Updated">April 9, 2008</td> + </tr> + <tr> + <td headers="Instituition"><a href="cbc.html">Connecticut Bank of Commerce</a></td> + <td headers="city">Stamford</td> + <td headers="state">CT</td> + <td headers="CERT #">19183</td> + <td headers="AI">Hudson United Bank</td> + <td headers="Closing Date">June 26, 2002</td> + <td headers="Updated">February 14, 2012</td> + </tr> + <tr> + <td headers="Instituition"><a href="newcentury.html">New Century Bank</a></td> + <td headers="city">Shelby Township</td> + <td headers="state">MI</td> + <td headers="CERT #">34979</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">March 28, 2002</td> + <td headers="Updated">March 18, 2005</td> + </tr> + <tr> + <td headers="Instituition"><a href="netfirst.html">Net 1st National Bank</a></td> + <td headers="city">Boca Raton</td> + <td headers="state">FL</td> + <td headers="CERT #">26652</td> + <td headers="AI">Bank Leumi USA</td> + <td headers="Closing Date">March 1, 2002</td> + <td headers="Updated">April 9, 2008</td> + </tr> + <tr> + <td headers="Instituition"><a href="nextbank.html">NextBank, NA</a></td> + <td headers="city">Phoenix</td> + <td headers="state">AZ</td> + <td headers="CERT #">22314</td> + <td headers="AI">No Acquirer</td> + <td headers="Closing Date">February 7, 2002</td> + <td headers="Updated">August 27, 2010</td> + </tr> + <tr> + <td headers="Instituition"><a href="Oakwood.html">Oakwood Deposit Bank Co.</a></td> + <td headers="city">Oakwood</td> + <td headers="state">OH</td> + <td headers="CERT #">8966</td> + <td headers="AI">The State Bank & Trust Company</td> + <td headers="Closing Date">February 1, 2002</td> + <td headers="Updated">October 25, 2012</td> + </tr> + <tr> + <td headers="Instituition"><a href="sierrablanca.html">Bank of Sierra Blanca</a></td> + <td headers="city">Sierra Blanca</td> + <td headers="state">TX</td> + <td headers="CERT #">22002</td> + <td headers="AI">The Security State Bank of Pecos</td> + <td headers="Closing Date">January 18, 2002</td> + <td headers="Updated">November 6, 2003</td> + </tr> + <tr> + <td headers="Instituition"><a href="hamilton.html">Hamilton Bank, NA</a><br /> + <a href="hamilton-spanish.html">En Espanol</a></td> + <td headers="city">Miami</td> + <td headers="state">FL</td> + <td headers="CERT #">24382</td> + <td headers="AI">Israel Discount Bank of New York</td> + <td headers="Closing Date">January 11, 2002</td> + <td headers="Updated">June 5, 2012</td> + </tr> + <tr> + <td headers="Instituition"><a href="sinclair.html">Sinclair National Bank</a></td> + <td headers="city">Gravette</td> + <td headers="state">AR</td> + <td headers="CERT #">34248</td> + <td headers="AI">Delta Trust & Bank</td> + <td headers="Closing Date">September 7, 2001</td> + <td headers="Updated">February 10, 2004</td> + </tr> + <tr> + <td headers="Instituition"><a href="superior.html">Superior Bank, FSB</a></td> + <td headers="city">Hinsdale</td> + <td headers="state">IL</td> + <td headers="CERT #">32646</td> + <td headers="AI">Superior Federal, FSB</td> + <td headers="Closing Date">July 27, 2001</td> + <td headers="Updated">June 5, 2012</td> + </tr> + <tr> + <td headers="Instituition"><a href="Malta.html">Malta National Bank</a></td> + <td headers="city">Malta</td> + <td headers="state">OH</td> + <td headers="CERT #">6629</td> + <td headers="AI">North Valley Bank</td> + <td headers="Closing Date">May 3, 2001</td> + <td headers="Updated">November 18, 2002</td> + </tr> + <tr> + <td headers="Instituition"><a href="firstalliance.html">First Alliance Bank & Trust Co.</a></td> + <td headers="city">Manchester</td> + <td headers="state">NH</td> + <td headers="CERT #">34264</td> + <td headers="AI">Southern New Hampshire Bank & Trust</td> + <td headers="Closing Date">February 2, 2001</td> + <td headers="Updated">February 18, 2003</td> + </tr> + <tr> + <td headers="Instituition"><a href="nsb.html">National State Bank of Metropolis</a></td> + <td headers="city">Metropolis</td> + <td headers="state">IL</td> + <td headers="CERT #">3815</td> + <td headers="AI">Banterra Bank of Marion</td> + <td headers="Closing Date">December 14, 2000</td> + <td headers="Updated">March 17, 2005</td> + </tr> + <tr> + <td headers="Instituition"><a href="boh.html">Bank of Honolulu</a></td> + <td headers="city">Honolulu</td> + <td headers="state">HI</td> + <td headers="CERT #">21029</td> + <td headers="AI">Bank of the Orient</td> + <td headers="Closing Date">October 13, 2000</td> + <td headers="Updated">March 17, 2005</td> + </tr> + </tbody> +</table> + <!-- +<script language="javascript"> + +document.writeln("<div id=\"controls\">"); +document.writeln("<div id=\"perpage\">"); +document.writeln("<select onchange=\"sorter.size(this.value)\">"); +document.writeln("<option value=\"5\">5</option>"); +document.writeln("<option value=\"10\" >10</option>"); +document.writeln("<option value=\"20\"selected=\"selected\">20</option>"); +document.writeln("<option value=\"50\">50</option>"); +document.writeln("<option value=\"100\">100</option>"); +document.writeln("<option value=\"150\">150</option>"); +document.writeln("</select>"); +document.writeln("&nbsp;Entries Per Page"); +document.writeln("</div>"); +document.writeln("<div id=\"navigation\">"); +document.writeln("<img src=\"images/first.gif\" width=\"16\" height=\"16\" alt=\"First Page\" onclick=\"sorter.move(-1,true)\" />"); +document.writeln("<img src=\"images/previous.gif\" width=\"16\" height=\"16\" alt=\"Previous Page\" onclick=\"sorter.move(-1)\" />"); +document.writeln("<img src=\"images/next.gif\" width=\"16\" height=\"16\" alt=\"Next Page\" onclick=\"sorter.move(1)\" />"); +document.writeln("<img src=\"images/last.gif\" width=\"16\" height=\"16\" alt=\"Last Page\" onclick=\"sorter.move(1,true)\" />"); +document.writeln("</div>"); +document.writeln("<div id=\"text\">Displaying Page&nbsp;"); +document.writeln("<span id=\"currentpage\">"); +document.writeln("</span>"); +document.writeln("&nbsp;of&nbsp;"); +document.writeln("<span id=\"pagelimit\">"); +document.writeln("</span>"); +document.writeln("</div>"); +document.writeln("</div>"); + + </script> +--> + +<script type="text/javascript"> +var TINY={}; + +function T$(i){return document.getElementById(i)} +function T$$(e,p){return p.getElementsByTagName(e)} + +TINY.table=function(){ + function sorter(n){this.n=n; this.pagesize=20; this.paginate=0} + sorter.prototype.init=function(e,f){ + var t=ge(e), i=0; this.e=e; this.l=t.r.length; t.a=[]; + t.h=T$$('thead',T$(e))[0].rows[0]; t.w=t.h.cells.length; + for(i;i<t.w;i++){ + var c=t.h.cells[i]; + if(c.className!='nosort'){ + c.className=this.head; c.onclick=new Function(this.n+'.wk(this.cellIndex)') + } + } + for(i=0;i<this.l;i++){t.a[i]={}} + if(f!=null){var a=new Function(this.n+'.wk('+f+')'); a()} + if(this.paginate){this.g=1; this.pages()} + }; + sorter.prototype.wk=function(y){ + var t=ge(this.e), x=t.h.cells[y], i=0; + for(i;i<this.l;i++){ + t.a[i].o=i; var v=t.r[i].cells[y]; t.r[i].style.display=''; + while(v.hasChildNodes()){v=v.firstChild} + t.a[i].v=v.nodeValue?v.nodeValue:'' + } + for(i=0;i<t.w;i++){var c=t.h.cells[i]; if(c.className!='nosort'){c.className=this.head}} + + + if(t.p==y) + { + t.a.reverse(); + x.className=t.d?this.asc:this.desc; + t.d=t.d?0:1 + } + + else + { + t.p = y; + t.a.sort(cp); + t.d = 0; + x.className = this.asc; + } + + + + + var n=document.createElement('tbody'); + for(i=0;i<this.l;i++){ + var r=t.r[t.a[i].o].cloneNode(true); n.appendChild(r); + r.className=i%2==0?this.even:this.odd; var cells=T$$('td',r); + for(var z=0;z<t.w;z++){cells[z].className=y==z?i%2==0?this.evensel:this.oddsel:''} + } + t.replaceChild(n,t.b); if(this.paginate){this.size(this.pagesize)} + }; + sorter.prototype.page=function(s){ + var t=ge(this.e), i=0, l=s+parseInt(this.pagesize); + if(this.currentid&&this.limitid){T$(this.currentid).innerHTML=this.g} + for(i;i<this.l;i++){t.r[i].style.display=i>=s&&i<l?'':'none'} + }; + sorter.prototype.move=function(d,m){ + var s=d==1?(m?this.d:this.g+1):(m?1:this.g-1); + if(s<=this.d&&s>0){this.g=s; this.page((s-1)*this.pagesize)} + }; + sorter.prototype.size=function(s){ + this.pagesize=s; this.g=1; this.pages(); this.page(0); + if(this.currentid&&this.limitid){T$(this.limitid).innerHTML=this.d} + }; + sorter.prototype.pages=function(){this.d=Math.ceil(this.l/this.pagesize)}; + function ge(e){var t=T$(e); t.b=T$$('tbody',t)[0]; t.r=t.b.rows; return t}; + function cp(f,c){ + var g,h; f=g=f.v.toLowerCase(), c=h=c.v.toLowerCase(); + var i=parseFloat(f.replace(/(\$|\,)/g,'')), n=parseFloat(c.replace(/(\$|\,)/g,'')); + if(!isNaN(i)&&!isNaN(n)){g=i,h=n} + i=Date.parse(f); n=Date.parse(c); + if(!isNaN(i)&&!isNaN(n)) + { + g=i; + h=n; + + } + + /**** This string returns the sort by ASCENDING Order *****/ + //return g>h?1:(g<h?-1:0) + + + /**** This string returns the sort by DESCENDING Order *****/ + return g<h?1:(g>h?-1:0) + + }; + return{sorter:sorter} +}(); + + </script> +<script type="text/javascript"> + var sorter = new TINY.table.sorter("sorter"); + sorter.head = "head"; + sorter.asc = "asc"; + sorter.desc = "desc"; + sorter.even = "evenrow"; + sorter.odd = "oddrow"; + sorter.evensel = "evenselected"; + sorter.oddsel = "oddselected"; + sorter.paginate = false; + sorter.currentid = "currentpage"; + sorter.limitid = "pagelimit"; + sorter.init("table",5); + </script> +</td></tr> +</table> + +<!-- DRR END Product Title & Body--> + <br /> + <br /> + + + + + </td> + </tr> + + <!-- begin: last updated date and contact information --> + <tr> + <td width="25"><img src="http://www.fdic.gov/images/spacer.gif" width="25" height="1" alt="" border="0" /><br /></td> + <td> + + <!-- Instruction: change "mm/dd/yyyy" to the date the document was created or last modfied --> + + <font face="arial, helvetica, sans-serif" size="1" color="#000066">Last Updated + 04/23/2013</font></td> + <td align="right"><font face="arial, helvetica, sans-serif" size="1" color="#000066"> + +<!-- Instruction: change the link text and href value of "Insert_Content_Email_Address@fdic.gov" to the fdic.gov e-mail address of the document's point of contact --> + +<a HREF="mailto:cservicefdicdal@fdic.gov">cservicefdicdal@fdic.gov</a></font></td> + </tr> + <!-- end: last updated date and contact information --> +</table> +<!-- BEGIN FOOTER INCLUDE --> +<!-- Instruction: The following statement is the footer include statement. Do not revise this code. --> +<br /> +</font><!-- Ends Opening Font Tag --> +<!-- begin footer --> +<!-- Last Updated Date: 1-18-2011 Time: 2:24PM Version: 1.4 --> +</div><!-- ends body tag --> +<!-- begin footer --> + <div id="footer-container"> + <div> + <ul id="footer-top"> + <li><a href="/" title="Home">Home</a> </li> + <li>|</li> + <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li> + <li>|</li> + <li><a href="/search/" title="Search">Search</a></li> + <li>|</li> + <li><a href="/help/" title="Help">Help</a></li> + <li>|</li> + <li><a href="/sitemap/" title="SiteMap">SiteMap</a></li> + <li>|</li> + <li><a href="/regulations/laws/forms/" title="Forms">Forms</a></li> + <li>|</li> + <li><a href="/quicklinks/spanish.html" title="En Espa&ntilde;ol">En Espa&ntilde;ol</a></li> + + </ul> + </div> + + <div> + <ul id="footer-middle"> + <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li> + <li>|</li> + <li><a href="/about/privacy/policy/index.html" title="Privacy Policy">Privacy Policy</a></li> + <li>|</li> + <li><a href="/plainlanguage/index.html" title="Privacy Policy">Plain Writing Act of 2010 </a></li> + <li>|</li> + <li><a href="http://www.usa.gov/" title="USA.gov">USA.gov</a></li> + <li>|</li> + <li><a href="http://www.fdicoig.gov/" title="FDIC Office of Inspector General">FDIC Office of Inspector General</a></li> + </ul> + </div> + + <div> + <ul id="footer-bottom"> + <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li> + <li>|</li> + <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a><a href="/about/diversity/nofear/" title="No FEAR Act Data"></a></li> <li>|</li> + <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a><a href="/about/diversity/nofear/" title="No FEAR Act Data"></a></li> + </ul> + </div> + </div><!-- end of footer container --> +<!-- end footer --> +</div><!-- ends site-container --> + +<script language="JavaScript" type="text/javascript"> +/************* DO NOT ALTER ANYTHING BELOW THIS LINE ! **************/ +var s_code=s.t();if(s_code)document.write(s_code)</script> +<script language="JavaScript" type="text/javascript"> +if(navigator.appVersion.indexOf('MSIE')>=0)document.write(unescape('%3C')+'\!-'+'-') +</script> +<noscript> +<a href="http://www.omniture.com" title="Web Analytics"> +<img src="http://fdic.122.2o7.net/b/ss/fdicgovprod/1/H.21--NS/0?[AQB]%26cl=Session%26AQE" height="1" width="1" border="0" alt="" /></a> +</noscript> + +<!--/DO NOT REMOVE/--> +<!-- End SiteCatalyst code version: H.21. --> +<!-- end footer --> +<!-- END FOOTER INCLUDE --> + + +</body> +</html> diff --git a/pandas/io/tests/data/spam.html b/pandas/io/tests/data/spam.html new file mode 100644 index 0000000000000..9f6ac2d74e0c9 --- /dev/null +++ b/pandas/io/tests/data/spam.html @@ -0,0 +1,797 @@ + +<!DOCTYPE html> +<!--[if lt IE 7 ]> <html lang="en" class="no-js ie6"> <![endif]--> +<!--[if IE 7 ]> <html lang="en" class="no-js ie7"> <![endif]--> +<!--[if IE 8 ]> <html lang="en" class="no-js ie8"> <![endif]--> +<!--[if IE 9 ]> <html lang="en" class="no-js ie9"> <![endif]--> +<!--[if (gt IE 9)|!(IE)]><!--> <html lang="en" class="no-js"><!--<![endif]--> + + +<html> + <head> + + <title>Show Foods</title> + <link rel="shortcut icon" href="/ndb/static/images/favicon.ico" type="image/x-icon" /> + + + + + + + <link rel='stylesheet' type='text/css' href='/ndb/plugins/richui-0.8/css/autocomplete.css' /> +<script type='text/javascript' src='/ndb/plugins/richui-0.8/js/yui/yahoo-dom-event/yahoo-dom-event.js'></script> +<script type='text/javascript' src='/ndb/plugins/richui-0.8/js/yui/connection/connection-min.js'></script> +<script type='text/javascript' src='/ndb/plugins/richui-0.8/js/yui/datasource/datasource-min.js'></script> +<script type='text/javascript' src='/ndb/plugins/richui-0.8/js/yui/animation/animation-min.js'></script> +<script type='text/javascript' src='/ndb/plugins/richui-0.8/js/yui/autocomplete/autocomplete-min.js'></script> + +<link rel="stylesheet" href="/ndb/static/css/main.css" /> + + <script type="text/JavaScript"> + var _gaq = _gaq || []; + // NAL + _gaq.push(['_setAccount', 'UA-28627214-1']); + _gaq.push(['_setDomainName', 'nal.usda.gov']); + _gaq.push(['_setAllowLinker', true]); + _gaq.push(['_trackPageview']); + // + // _gaq.push(['_setAccount', 'UA-3876418-1']); + // _gaq.push(['_trackPageview']); + // for NDB + _gaq.push(['_setAccount', 'UA-36442725-1']); + _gaq.push(['_trackPageview']); + // USDA servers + _gaq.push(['_setAccount', 'UA-466807-3']); + _gaq.push(['_setDomainName', 'usda.gov']); + _gaq.push(['_setAllowLinker', true]); + _gaq.push(['_trackPageview']); + // + _gaq.push(['a._setAccount', 'UA-27627304-18']); + _gaq.push(['a._setDomainName', 'usda.gov']); + _gaq.push(['a._setAllowLinker', true]); + _gaq.push(['a._trackPageview']); + // + _gaq.push(['b._setAccount', 'UA-27627304-1']); + _gaq.push(['b._setDomainName', 'usda.gov']); + _gaq.push(['b._setAllowLinker', true]); + _gaq.push(['b._trackPageview']); + + (function() { + var ga = document.createElement('script'); ga.type = + 'text/javascript'; ga.async = true; + ga.src = ('https:' == document.location.protocol ? 'https://ssl' : + 'http://www') + '.google-analytics.com/ga.js'; + var s = document.getElementsByTagName('script')[0]; + s.parentNode.insertBefore(ga, s); + })(); + </script> + + + + <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> + <meta name="layout" content="main"/> + + + + + + + + <script src="/ndb/static/plugins/yui-2.8.2.1/js/yui/yahoo-dom-event/yahoo-dom-event.js" type="text/javascript" ></script> +<script src="/ndb/static/plugins/yui-2.8.2.1/js/yui/element/element-min.js" type="text/javascript" ></script> +<script src="/ndb/static/plugins/yui-2.8.2.1/js/yui/animation/animation-min.js" type="text/javascript" ></script> +<script src="/ndb/static/plugins/yui-2.8.2.1/js/yui/connection/connection-min.js" type="text/javascript" ></script> +<script src="/ndb/static/plugins/yui-2.8.2.1/js/yui/dragdrop/dragdrop-min.js" type="text/javascript" ></script> +<script src="/ndb/static/bundle-bundle_yui-container_head.js" type="text/javascript" ></script> +<link href="/ndb/static/bundle-bundle_yui-container_head.css" type="text/css" rel="stylesheet" media="screen, projection" /> +<link href="/ndb/static/plugins/yui-2.8.2.1/js/yui/fonts/fonts-min.css" type="text/css" rel="stylesheet" media="screen, projection" /> +<script src="/ndb/static/plugins/grails-ui-1.2.3/js/grailsui/grailsui.js" type="text/javascript" ></script> +<link href="/ndb/static/plugins/jquery-ui-1.8.24/jquery-ui/themes/ui-lightness/jquery-ui-1.8.24.custom.css" type="text/css" rel="stylesheet" media="screen, projection" /> +<script src="/ndb/static/plugins/jquery-1.8.0/js/jquery/jquery-1.8.0.min.js" type="text/javascript" ></script> +<script src="/ndb/static/plugins/jquery-ui-1.8.24/jquery-ui/js/jquery-ui-1.8.24.custom.min.js" type="text/javascript" ></script> +<script src="/ndb/static/bundle-bundle_yui-menu_head.js" type="text/javascript" ></script> +<link href="/ndb/static/bundle-bundle_yui-menu_head.css" type="text/css" rel="stylesheet" media="screen, projection" /> +<script src="/ndb/static/bundle-bundle_yui-button_head.js" type="text/javascript" ></script> +<link href="/ndb/static/bundle-bundle_yui-button_head.css" type="text/css" rel="stylesheet" media="screen, projection" /> + + </head> + <body class="yui-skin-sam"> + <div class="section clearfix" > + <div id="name-and-slogan" style="padding-left:15px;" > + <a href="http://www.ars.usda.gov"><img id="masthead-map" usemap="#masthead-map" src="/ndb/static/images/masthead.jpg" alt="National Nutrient Database" border="0" /></a> + <map id="masthead-map" name="masthead-map"> +<area shape="rect" coords="4,2,54,52" href="http://www.usda.gov" alt="" title="USDA Website" /> +<area shape="rect" coords="66,1,128,49" href="http://www.ars.usda.gov" alt="" title="Agricultural Research Service Website" /> +<area shape="rect" coords="127,0,336,50" href="http://www.nal.usda.gov" alt="" title="National Agricultural Library Website" /> +<area shape="rect" coords="470,2,679,52" href="http://www.ars.usda.gov/main/site_main.htm?modecode=12-35-45-00" alt="" title="Nutrient Data Laboratory Website" /> +<area shape="rect" coords="702,6,742,47" href="http://fnic.nal.usda.gov" alt="" title="Food and Nutrition Information Center Website" /> +</map> + + + </div> + + + + </div> + <div id='site-slogan' align='left'> + National Nutrient Database for Standard Reference<br>Release 25 + </div> + <div class="bodywrapper"> + + <div class="nav"> + <span class="menuButton"> <a href="http://www.ars.usda.gov/main/site_main.htm?modecode=12-35-45-00" class="home" title="Go the NDL home page">NDL Home</a></span> + <span class="menuButton"><a href="/ndb/search/list" class="list" name="menu-advanced" title="Browse the foods list">Foods List</a></span> + <span class="menuButton"><a href="/ndb/beef/show" class="calc" title="Use the ground beef calculator">Ground Beef Calculator</a></span> + <span class="menuButton"><a href="http://www.ars.usda.gov/SP2UserFiles/Place/12354500/Data/SR25/sr25_doc.pdf" class="docs" title="View and download release documentation" target="_help">SR25 Documentation</a></span> + <span class="menuButton"><a href="/ndb/help/index" class="help" target="_help" title="Read help on how to use the website">Help</a></span> + </div> + + + + + + <div id="view-name">Basic Report</div> + + <div class="body"> + <h1>Nutrient data for 07908, Luncheon meat, pork with ham, minced, canned, includes SPAM (Hormel) + + + </h1> + <div class="menuButton" > + <a href="/ndb/search/list?fg=&amp;man=&amp;lfacet=&amp;count=&amp;max=25&amp;sort=&amp;qlookup=spam&amp;offset=&amp;format=Abridged&amp;new=" name="search" class="previous" title="Return to results list">Return to Search Results</a><script type='text/javascript'> var myTooltip = new YAHOO.widget.Tooltip("myTooltip", { context:"null" } );</script> + + + + <a href="/ndb/foods/show/1732?fg=&amp;man=&amp;lfacet=&amp;count=&amp;max=25&amp;sort=&amp;qlookup=spam&amp;offset=&amp;format=Full&amp;new=" name="full" title="View Full Report">Full Report (All Nutrients)</a><script type='text/javascript'> var myTooltip = new YAHOO.widget.Tooltip("myTooltip", { context:"null" } );</script> + + + <a href="/ndb/foods/show/1732?fg=&amp;man=&amp;lfacet=&amp;count=&amp;max=25&amp;sort=&amp;qlookup=spam&amp;offset=&amp;format=Stats&amp;new=" name="stats" title="View Statistics Report">Statistics Report</a><script type='text/javascript'> var myTooltip = new YAHOO.widget.Tooltip("myTooltip", { context:"null" } );</script> + + </div> + + + <div class="dialog"> + + + <div class="null"> + <div id="measuresHelpDialog"> + <div class="hd">Modifying household measures</div> + <div class="bd"> + + <div id="helpDiv"></div> + + </div> + </div> + </div> + <script> + function init_dlg_measuresHelpDialog() { + // Instantiate the Dialog + GRAILSUI.measuresHelpDialog = new YAHOO.widget.Dialog("measuresHelpDialog", + { 'width': '600px', +'class': 'helpDialog', +'draggable': true, +'modal': false, +'fixedcenter': true, +'visible': false, +'params': [], +'constraintoviewport': true, +'buttons': [{'text': 'OK', +handler: function() {this.cancel();}, +'isDefault': true}] }); + GRAILSUI.measuresHelpDialog.render(document.body); + + + } + YAHOO.util.Event.onDOMReady(init_dlg_measuresHelpDialog); + </script> + + + <!-- NUTRIENT DATA TABLE --> + <form action="/ndb/foods/show/1732" method="get" > + <input type="hidden" name="fg" value="" id="fg" /> + <input type="hidden" name="man" value="" id="man" /> + <input type="hidden" name="lfacet" value="" id="lfacet" /> + <input type="hidden" name="count" value="" id="count" /> + <input type="hidden" name="max" value="25" id="max" /> + <input type="hidden" name="qlookup" value="spam" id="qlookup" /> + <input type="hidden" name="offset" value="" id="offset" /> + <input type="hidden" name="sort" value="" id="sort" /> + <input type="hidden" name="format" value="Abridged" id="format" /> + + + <div class="nutlist"> + + + <p style="font-style:italic;font-size:.8em">Nutrient values and weights are for edible portion</p> + + + <table> + <thead> + + <tr><td colspan="6" style="vertical-align:middle;text-align:center;height:2em;" class="buttons"><input type="submit" name="_action_show" value="Apply Changes" class="calc" title="Click to recalculate measures" id="1732" /><a href="/ndb/help/contextHelp/measures" onclick="jQuery.ajax({type:'POST', url:'/ndb/help/contextHelp/measures',success:function(data,textStatus){jQuery('#helpDiv').html(data);},error:function(XMLHttpRequest,textStatus,errorThrown){},complete:function(XMLHttpRequest,textStatus){GRAILSUI.measuresHelpDialog.show();}});return false;" controller="help" action="contextHelp" id="measures"><img title="Click for more information on calculating household measures" src="/ndb/static/images/skin/help.png" alt="Help" border="0" style="vertical-align:middle"/></a></span></td></tr> + <th style="vertical-align:middle">Nutrient</th> + <th style="vertical-align:middle" >Unit</th> + <th style="vertical-align:middle"><input type="text" name="Qv" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="1" id="Qv" /><br/>Value per 100.0g</th> + + + + + <th style="width:130px;line-height:1.2em;text-align:center"> + <input type="text" name="Q3483" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="2.0" id="Q3483" /> + <br> + + oz 1 NLEA serving + <br>56g + <!-- + --> + </th> + + </thead> + <tbody> + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Proximates</td> + </tr> + + + <tr class="odd"> + <td >Water + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">51.70</td> + + + <td style="text-align:right;">28.95</td> + + + </tr> + + + <tr class="even"> + <td >Energy + + + </td> + + <td style="text-align:center;">kcal</td> + <td style="text-align:right;">315</td> + + + <td style="text-align:right;">176</td> + + + </tr> + + + <tr class="odd"> + <td >Protein + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">13.40</td> + + + <td style="text-align:right;">7.50</td> + + + </tr> + + + <tr class="even"> + <td >Total lipid (fat) + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">26.60</td> + + + <td style="text-align:right;">14.90</td> + + + </tr> + + + <tr class="odd"> + <td >Carbohydrate, by difference + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">4.60</td> + + + <td style="text-align:right;">2.58</td> + + + </tr> + + + <tr class="even"> + <td >Fiber, total dietary + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">0.0</td> + + + <td style="text-align:right;">0.0</td> + + + </tr> + + + <tr class="odd"> + <td >Sugars, total + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">0.00</td> + + + <td style="text-align:right;">0.00</td> + + + </tr> + + + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Minerals</td> + </tr> + + + <tr class="odd"> + <td >Calcium, Ca + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0</td> + + + <td style="text-align:right;">0</td> + + + </tr> + + + <tr class="even"> + <td >Iron, Fe + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.64</td> + + + <td style="text-align:right;">0.36</td> + + + </tr> + + + <tr class="odd"> + <td >Magnesium, Mg + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">14</td> + + + <td style="text-align:right;">8</td> + + + </tr> + + + <tr class="even"> + <td >Phosphorus, P + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">151</td> + + + <td style="text-align:right;">85</td> + + + </tr> + + + <tr class="odd"> + <td >Potassium, K + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">409</td> + + + <td style="text-align:right;">229</td> + + + </tr> + + + <tr class="even"> + <td >Sodium, Na + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">1411</td> + + + <td style="text-align:right;">790</td> + + + </tr> + + + <tr class="odd"> + <td >Zinc, Zn + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">1.59</td> + + + <td style="text-align:right;">0.89</td> + + + </tr> + + + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Vitamins</td> + </tr> + + + <tr class="odd"> + <td >Vitamin C, total ascorbic acid + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.0</td> + + + <td style="text-align:right;">0.0</td> + + + </tr> + + + <tr class="even"> + <td >Thiamin + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.317</td> + + + <td style="text-align:right;">0.178</td> + + + </tr> + + + <tr class="odd"> + <td >Riboflavin + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.176</td> + + + <td style="text-align:right;">0.099</td> + + + </tr> + + + <tr class="even"> + <td >Niacin + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">3.530</td> + + + <td style="text-align:right;">1.977</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin B-6 + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.218</td> + + + <td style="text-align:right;">0.122</td> + + + </tr> + + + <tr class="even"> + <td >Folate, DFE + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">3</td> + + + <td style="text-align:right;">2</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin B-12 + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">0.45</td> + + + <td style="text-align:right;">0.25</td> + + + </tr> + + + <tr class="even"> + <td >Vitamin A, RAE + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">0</td> + + + <td style="text-align:right;">0</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin A, IU + + + </td> + + <td style="text-align:center;">IU</td> + <td style="text-align:right;">0</td> + + + <td style="text-align:right;">0</td> + + + </tr> + + + <tr class="even"> + <td >Vitamin E (alpha-tocopherol) + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0.42</td> + + + <td style="text-align:right;">0.24</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin D (D2 + D3) + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">0.6</td> + + + <td style="text-align:right;">0.3</td> + + + </tr> + + + <tr class="even"> + <td >Vitamin D + + + </td> + + <td style="text-align:center;">IU</td> + <td style="text-align:right;">26</td> + + + <td style="text-align:right;">15</td> + + + </tr> + + + <tr class="odd"> + <td >Vitamin K (phylloquinone) + + + </td> + + <td style="text-align:center;">µg</td> + <td style="text-align:right;">0.0</td> + + + <td style="text-align:right;">0.0</td> + + + </tr> + + + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Lipids</td> + </tr> + + + <tr class="odd"> + <td >Fatty acids, total saturated + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">9.987</td> + + + <td style="text-align:right;">5.593</td> + + + </tr> + + + <tr class="even"> + <td >Fatty acids, total monounsaturated + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">13.505</td> + + + <td style="text-align:right;">7.563</td> + + + </tr> + + + <tr class="odd"> + <td >Fatty acids, total polyunsaturated + + + </td> + + <td style="text-align:center;">g</td> + <td style="text-align:right;">2.019</td> + + + <td style="text-align:right;">1.131</td> + + + </tr> + + + <tr class="even"> + <td >Cholesterol + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">71</td> + + + <td style="text-align:right;">40</td> + + + </tr> + + + + <tr class="even" > + <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Other</td> + </tr> + + + <tr class="odd"> + <td >Caffeine + + + </td> + + <td style="text-align:center;">mg</td> + <td style="text-align:right;">0</td> + + + <td style="text-align:right;">0</td> + + + </tr> + + + + </tbody> + </table> + + </div> + </form> + + + + + + </div> + + </div> + + <script src="/ndb/static/js/application.js" type="text/javascript" ></script> + + + </div> + <div class="footer"> + National Nutrient Database for Standard Reference<br>Release 25 + &nbsp;&nbsp;Software v.1.2.2 + </div> + </body> +</html> \ No newline at end of file diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py new file mode 100644 index 0000000000000..d0468026caef3 --- /dev/null +++ b/pandas/io/tests/test_html.py @@ -0,0 +1,324 @@ +import os +import re +from cStringIO import StringIO +from unittest import TestCase + +import nose + +import numpy as np +from numpy.testing.decorators import slow + +from pandas.io.html import read_html, import_module +from pandas import DataFrame, MultiIndex +from pandas.util.testing import assert_frame_equal, network + + +def _skip_if_no_parser(): + try: + import_module('lxml') + except ImportError: + try: + import_module('bs4') + except ImportError: + raise nose.SkipTest + + +DATA_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data') + + +def _run_read_html(*args, **kwargs): + _skip_if_no_parser() + return read_html(*args, **kwargs) + + +def isframe(x): + return isinstance(x, DataFrame) + + +def assert_framelist_equal(list1, list2): + assert len(list1) == len(list2), ('lists are not of equal size ' + 'len(list1) == {0}, ' + 'len(list2) == {1}'.format(len(list1), + len(list2))) + assert all(map(lambda x, y: isframe(x) and isframe(y), list1, list2)), \ + 'not all list elements are DataFrames' + for frame_i, frame_j in zip(list1, list2): + assert_frame_equal(frame_i, frame_j) + assert not frame_i.empty, 'frames are both empty' + + +class TestLxmlReadHtml(TestCase): + def setUp(self): + self.spam_data = os.path.join(DATA_PATH, 'spam.html') + self.banklist_data = os.path.join(DATA_PATH, 'failed_banklist.html') + + def run_read_html(self, *args, **kwargs): + kwargs['flavor'] = 'lxml' + return _run_read_html(*args, **kwargs) + + @network + def test_banklist_url(self): + url = 'http://www.fdic.gov/bank/individual/failed/banklist.html' + df1 = self.run_read_html(url, 'First Federal Bank of Florida', + attrs={"id": 'table'}) + df2 = self.run_read_html(url, 'Metcalf Bank', attrs={'id': 'table'}) + + assert_framelist_equal(df1, df2) + + @network + def test_spam_url(self): + url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&' + 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam') + df1 = self.run_read_html(url, '.*Water.*') + df2 = self.run_read_html(url, 'Unit') + + assert_framelist_equal(df1, df2) + + @slow + def test_banklist(self): + df1 = self.run_read_html(self.banklist_data, '.*Florida.*', + attrs={'id': 'table'}) + df2 = self.run_read_html(self.banklist_data, 'Metcalf Bank', + attrs={'id': 'table'}) + + assert_framelist_equal(df1, df2) + + @slow + def test_banklist_header(self): + df = self.run_read_html(self.banklist_data, 'Metcalf', + attrs={'id': 'table'}, header=0, skiprows=1)[0] + self.assertFalse(df.empty) + cols = ['Bank Name', 'City', 'State', 'CERT #', + 'Acquiring Institution', 'Closing Date', 'Updated Date'] + self.assertListEqual(df.columns.values.tolist(), cols) + self.assertEqual(df.shape[0], 499) + + def test_spam(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', + infer_types=False) + df2 = self.run_read_html(self.spam_data, 'Unit', infer_types=False) + + assert_framelist_equal(df1, df2) + + self.assertEqual(df1[0].ix[0, 0], 'Nutrient') + + def test_spam_no_match(self): + dfs = self.run_read_html(self.spam_data) + for df in dfs: + self.assertIsInstance(df, DataFrame) + + def test_banklist_no_match(self): + dfs = self.run_read_html(self.banklist_data, attrs={'id': 'table'}) + for df in dfs: + self.assertIsInstance(df, DataFrame) + + def test_spam_header(self): + df = self.run_read_html(self.spam_data, '.*Water.*', header=0)[0] + self.assertEqual(df.columns[0], 'Nutrient') + self.assertFalse(df.empty) + + def test_skiprows_int(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', skiprows=1) + df2 = self.run_read_html(self.spam_data, 'Unit', skiprows=1) + + assert_framelist_equal(df1, df2) + + def test_skiprows_xrange(self): + df1 = [self.run_read_html(self.spam_data, '.*Water.*').pop()[2:]] + df2 = self.run_read_html(self.spam_data, 'Unit', skiprows=xrange(2)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_list(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', skiprows=[1, 2]) + df2 = self.run_read_html(self.spam_data, 'Unit', skiprows=[2, 1]) + + assert_framelist_equal(df1, df2) + + def test_skiprows_set(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', + skiprows=set([1, 2])) + df2 = self.run_read_html(self.spam_data, 'Unit', skiprows=set([2, 1])) + + assert_framelist_equal(df1, df2) + + def test_skiprows_slice(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', skiprows=1) + df2 = self.run_read_html(self.spam_data, 'Unit', skiprows=1) + + assert_framelist_equal(df1, df2) + + def test_skiprows_slice_short(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', + skiprows=slice(2)) + df2 = self.run_read_html(self.spam_data, 'Unit', skiprows=slice(2)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_slice_long(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', + skiprows=slice(2, 5)) + df2 = self.run_read_html(self.spam_data, 'Unit', + skiprows=slice(4, 1, -1)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_ndarray(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', + skiprows=np.arange(2)) + df2 = self.run_read_html(self.spam_data, 'Unit', skiprows=np.arange(2)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_invalid(self): + self.assertRaises(ValueError, self.run_read_html, self.spam_data, + '.*Water.*', skiprows='asdf') + + def test_index(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', index_col=0) + df2 = self.run_read_html(self.spam_data, 'Unit', index_col=0) + assert_framelist_equal(df1, df2) + + def test_header(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', header=0) + df2 = self.run_read_html(self.spam_data, 'Unit', header=0) + assert_framelist_equal(df1, df2) + self.assertEqual(df1[0].columns[0], 'Nutrient') + + def test_header_and_index(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', header=0, + index_col=0) + df2 = self.run_read_html(self.spam_data, 'Unit', header=0, index_col=0) + assert_framelist_equal(df1, df2) + + def test_infer_types(self): + df1 = self.run_read_html(self.spam_data, '.*Water.*', header=0, + index_col=0, infer_types=False) + df2 = self.run_read_html(self.spam_data, 'Unit', header=0, index_col=0, + infer_types=False) + assert_framelist_equal(df1, df2) + + df2 = self.run_read_html(self.spam_data, 'Unit', header=0, index_col=0, + infer_types=True) + + self.assertRaises(AssertionError, assert_framelist_equal, df1, df2) + + def test_string_io(self): + with open(self.spam_data) as f: + data1 = StringIO(f.read()) + + with open(self.spam_data) as f: + data2 = StringIO(f.read()) + + df1 = self.run_read_html(data1, '.*Water.*', infer_types=False) + df2 = self.run_read_html(data2, 'Unit', infer_types=False) + assert_framelist_equal(df1, df2) + + def test_string(self): + with open(self.spam_data) as f: + data = f.read() + + df1 = self.run_read_html(data, '.*Water.*', infer_types=False) + df2 = self.run_read_html(data, 'Unit', infer_types=False) + + assert_framelist_equal(df1, df2) + + def test_file_like(self): + with open(self.spam_data) as f: + df1 = self.run_read_html(f, '.*Water.*', infer_types=False) + + with open(self.spam_data) as f: + df2 = self.run_read_html(f, 'Unit', infer_types=False) + + assert_framelist_equal(df1, df2) + + def test_bad_url_protocol(self): + self.assertRaises(ValueError, self.run_read_html, 'git://github.com', + '.*Water.*') + + @slow + def test_file_url(self): + url = self.banklist_data + dfs = self.run_read_html('file://' + url, 'First', + attrs={'id': 'table'}) + self.assertIsInstance(dfs, list) + for df in dfs: + self.assertIsInstance(df, DataFrame) + + @slow + def test_invalid_table_attrs(self): + url = self.banklist_data + self.assertRaises(AssertionError, self.run_read_html, url, + 'First Federal Bank of Florida', + attrs={'id': 'tasdfable'}) + + def _bank_data(self, *args, **kwargs): + return self.run_read_html(self.banklist_data, 'Metcalf', + attrs={'id': 'table'}, *args, **kwargs) + + @slow + def test_multiindex_header(self): + df = self._bank_data(header=[0, 1])[0] + self.assertIsInstance(df.columns, MultiIndex) + + @slow + def test_multiindex_index(self): + df = self._bank_data(index_col=[0, 1])[0] + self.assertIsInstance(df.index, MultiIndex) + + @slow + def test_multiindex_header_index(self): + df = self._bank_data(header=[0, 1], index_col=[0, 1])[0] + self.assertIsInstance(df.columns, MultiIndex) + self.assertIsInstance(df.index, MultiIndex) + + @slow + def test_multiindex_header_skiprows(self): + df = self._bank_data(header=[0, 1], skiprows=1)[0] + self.assertIsInstance(df.columns, MultiIndex) + + @slow + def test_multiindex_header_index_skiprows(self): + df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0] + self.assertIsInstance(df.index, MultiIndex) + + @slow + def test_regex_idempotency(self): + url = self.banklist_data + dfs = self.run_read_html('file://' + url, + match=re.compile(re.compile('Florida')), + attrs={'id': 'table'}) + self.assertIsInstance(dfs, list) + for df in dfs: + self.assertIsInstance(df, DataFrame) + + def test_negative_skiprows_spam(self): + url = self.spam_data + self.assertRaises(AssertionError, self.run_read_html, url, 'Water', + skiprows=-1) + + def test_negative_skiprows_banklist(self): + url = self.banklist_data + self.assertRaises(AssertionError, self.run_read_html, url, 'Florida', + skiprows=-1) + + @slow + def test_multiple_matches(self): + url = self.banklist_data + dfs = self.run_read_html(url, match=r'Florida') + self.assertIsInstance(dfs, list) + self.assertGreater(len(dfs), 1) + for df in dfs: + self.assertIsInstance(df, DataFrame) + + +def test_invalid_flavor(): + url = 'google.com' + nose.tools.assert_raises(AssertionError, _run_read_html, url, 'google', + flavor='not a* valid**++ flaver') + + +class TestBs4ReadHtml(TestLxmlReadHtml): + def run_read_html(self, *args, **kwargs): + kwargs['flavor'] = 'bs4' + return _run_read_html(*args, **kwargs) diff --git a/setup.py b/setup.py index 24b07b1d274fc..3e56144e25378 100755 --- a/setup.py +++ b/setup.py @@ -509,7 +509,8 @@ def pxd(name): 'tests/data/*.txt', 'tests/data/*.xls', 'tests/data/*.xlsx', - 'tests/data/*.table'], + 'tests/data/*.table', + 'tests/data/*.html'], 'pandas.tools': ['tests/*.csv'], 'pandas.tests': ['data/*.pickle', 'data/*.csv'],
This PR adds new functionality for reading HTML tables from a URI, string, or file-like object into a DataFrame. #3369
https://api.github.com/repos/pandas-dev/pandas/pulls/3477
2013-04-28T18:01:06Z
2013-05-03T16:54:13Z
2013-05-03T16:54:13Z
2014-06-16T01:32:21Z
ENH: Scatterplot Method added
diff --git a/doc/source/release.rst b/doc/source/release.rst index 34cc4e499a0d5..c85e86caa8114 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -210,6 +210,7 @@ API Changes - Default export for ``to_clipboard`` is now csv with a sep of `\t` for compat (:issue:`3368`) - ``at`` now will enlarge the object inplace (and return the same) (:issue:`2578`) + - new class added to allow scatterplotting using ``df.plot(kind="scatter")``(:issue:`2215`) - ``HDFStore`` diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index bdeb4ca3d0212..be18f0bd5cf89 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -449,7 +449,7 @@ def test_plot_xy(self): # columns.inferred_type == 'mixed' # TODO add MultiIndex test - + @slow def test_xcompat(self): import pandas as pd @@ -534,6 +534,21 @@ def test_subplots(self): [self.assert_(label.get_visible()) for label in ax.get_yticklabels()] + @slow + def test_plot_scatter(self): + from matplotlib.pylab import close + df = DataFrame(randn(6, 4), + index=list(string.ascii_letters[:6]), + columns=['x', 'y', 'z', 'four']) + + _check_plot_works(df.plot, x='x', y='y', kind='scatter') + _check_plot_works(df.plot, x=1, y=2, kind='scatter') + + with tm.assertRaises(ValueError): + df.plot(x='x', kind='scatter') + with tm.assertRaises(ValueError): + df.plot(y='y', kind='scatter') + @slow def test_plot_bar(self): from matplotlib.pylab import close diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index d6c0482d86be4..7de5840384974 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -322,7 +322,6 @@ def _gcf(): import matplotlib.pyplot as plt return plt.gcf() - def _get_marker_compat(marker): import matplotlib.lines as mlines import matplotlib as mpl @@ -1201,7 +1200,32 @@ def _post_plot_logic(self): for ax in self.axes: ax.legend(loc='best') - +class ScatterPlot(MPLPlot): + def __init__(self, data, x, y, **kwargs): + MPLPlot.__init__(self, data, **kwargs) + self.kwds.setdefault('c', self.plt.rcParams['patch.facecolor']) + if x is None or y is None: + raise ValueError( 'scatter requires and x and y column') + if com.is_integer(x) and not self.data.columns.holds_integer(): + x = self.data.columns[x] + if com.is_integer(y) and not self.data.columns.holds_integer(): + y = self.data.columns[y] + self.x = x + self.y = y + + + def _make_plot(self): + x, y, data = self.x, self.y, self.data + ax = self.axes[0] + ax.scatter(data[x].values, data[y].values, **self.kwds) + + def _post_plot_logic(self): + ax = self.axes[0] + x, y = self.x, self.y + ax.set_ylabel(com.pprint_thing(y)) + ax.set_xlabel(com.pprint_thing(x)) + + class LinePlot(MPLPlot): def __init__(self, data, **kwargs): @@ -1562,7 +1586,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, secondary_y=False, **kwds): """ - Make line or bar plot of DataFrame's series with the index on the x-axis + Make line, bar, or scatter plots of DataFrame series with the index on the x-axis using matplotlib / pylab. Parameters @@ -1593,10 +1617,11 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, ax : matplotlib axis object, default None style : list or dict matplotlib line style per column - kind : {'line', 'bar', 'barh', 'kde', 'density'} + kind : {'line', 'bar', 'barh', 'kde', 'density', 'scatter'} bar : vertical bar plot barh : horizontal bar plot kde/density : Kernel Density Estimation plot + scatter: scatter plot logx : boolean, default False For line plots, use log scaling on x axis logy : boolean, default False @@ -1632,36 +1657,50 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, klass = BarPlot elif kind == 'kde': klass = KdePlot + elif kind == 'scatter': + klass = ScatterPlot else: raise ValueError('Invalid chart type given %s' % kind) - if x is not None: - if com.is_integer(x) and not frame.columns.holds_integer(): - x = frame.columns[x] - frame = frame.set_index(x) - - if y is not None: - if com.is_integer(y) and not frame.columns.holds_integer(): - y = frame.columns[y] - label = x if x is not None else frame.index.name - label = kwds.pop('label', label) - ser = frame[y] - ser.index.name = label - return plot_series(ser, label=label, kind=kind, - use_index=use_index, - rot=rot, xticks=xticks, yticks=yticks, - xlim=xlim, ylim=ylim, ax=ax, style=style, - grid=grid, logx=logx, logy=logy, - secondary_y=secondary_y, title=title, - figsize=figsize, fontsize=fontsize, **kwds) - - plot_obj = klass(frame, kind=kind, subplots=subplots, rot=rot, - legend=legend, ax=ax, style=style, fontsize=fontsize, - use_index=use_index, sharex=sharex, sharey=sharey, - xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, - title=title, grid=grid, figsize=figsize, logx=logx, - logy=logy, sort_columns=sort_columns, - secondary_y=secondary_y, **kwds) + if kind == 'scatter': + plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots, + rot=rot,legend=legend, ax=ax, style=style, + fontsize=fontsize, use_index=use_index, sharex=sharex, + sharey=sharey, xticks=xticks, yticks=yticks, + xlim=xlim, ylim=ylim, title=title, grid=grid, + figsize=figsize, logx=logx, logy=logy, + sort_columns=sort_columns, secondary_y=secondary_y, + **kwds) + else: + if x is not None: + if com.is_integer(x) and not frame.columns.holds_integer(): + x = frame.columns[x] + frame = frame.set_index(x) + + if y is not None: + if com.is_integer(y) and not frame.columns.holds_integer(): + y = frame.columns[y] + label = x if x is not None else frame.index.name + label = kwds.pop('label', label) + ser = frame[y] + ser.index.name = label + return plot_series(ser, label=label, kind=kind, + use_index=use_index, + rot=rot, xticks=xticks, yticks=yticks, + xlim=xlim, ylim=ylim, ax=ax, style=style, + grid=grid, logx=logx, logy=logy, + secondary_y=secondary_y, title=title, + figsize=figsize, fontsize=fontsize, **kwds) + + else: + plot_obj = klass(frame, kind=kind, subplots=subplots, rot=rot, + legend=legend, ax=ax, style=style, fontsize=fontsize, + use_index=use_index, sharex=sharex, sharey=sharey, + xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, + title=title, grid=grid, figsize=figsize, logx=logx, + logy=logy, sort_columns=sort_columns, + secondary_y=secondary_y, **kwds) + plot_obj.generate() plot_obj.draw() if subplots:
closes #2215 This is a simple pull request that creates a shortcut for scatter plots from a df. This is in reference to #2215 (and #1527) and follows the same pattern as df.hist(). ``` #where you would previously have had to do: from pandas.tools.plotting import scatter_plot scatter_plot(df, x = "x", y = 'y') #you can now do df.scatter(x="x",y="y") #example import pandas as pd import numpy as np df = pd.DataFrame( np.random.randn(100,4)) df.scatter(x=1,y=2) ``` I followed the same pattern as df.hist() even though it looks like you are intended make a class object for scatterplots that can be passed to the plot_frame() function as is the case for BarPlot and LinePlot. In any case, this works and may be useful. best, zach cp
https://api.github.com/repos/pandas-dev/pandas/pulls/3473
2013-04-27T21:06:11Z
2013-10-17T12:50:56Z
2013-10-17T12:50:56Z
2014-06-12T20:43:38Z
ENH: Bring Series.dot up to par with DataFrame.dot
diff --git a/pandas/core/series.py b/pandas/core/series.py index 7d9303fa75acd..7c0c12c11e177 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1944,6 +1944,47 @@ def clip_lower(self, threshold): """ return pa.where(self < threshold, threshold, self) + def dot(self, other): + """ + Matrix multiplication with DataFrame or inner-product with Series objects + + Parameters + ---------- + other : Series or DataFrame + + Returns + ------- + dot_product : scalar or Series + """ + from pandas.core.frame import DataFrame + if isinstance(other, (Series, DataFrame)): + common = self.index.union(other.index) + if (len(common) > len(self.index) or + len(common) > len(other.index)): + raise ValueError('matrices are not aligned') + + left = self.reindex(index=common, copy=False) + right = other.reindex(index=common, copy=False) + lvals = left.values + rvals = right.values + else: + left = self + lvals = self.values + rvals = np.asarray(other) + if lvals.shape[0] != rvals.shape[0]: + raise Exception('Dot product shape mismatch, %s vs %s' % + (lvals.shape, rvals.shape)) + + if isinstance(other, DataFrame): + return self._constructor(np.dot(lvals, rvals), + index=other.columns) + elif isinstance(other, Series): + return np.dot(lvals, rvals) + elif isinstance(rvals, np.ndarray): + return np.dot(lvals, rvals) + else: # pragma: no cover + raise TypeError('unsupported type: %s' % type(other)) + #------------------------------------------------------------------------------ # Combination diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 20ff6e95b436c..4845ae5258892 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2486,6 +2486,33 @@ def test_count(self): self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum()) + def test_dot(self): + a = Series(np.random.randn(4), index=['p', 'q', 'r', 's']) + b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'], + columns=['p', 'q', 'r', 's']).T + + result = a.dot(b) + expected = Series(np.dot(a.values, b.values), + index=['1', '2', '3']) + assert_series_equal(result, expected) + + #Check index alignment + b2 = b.reindex(index=reversed(b.index)) + result = a.dot(b) + assert_series_equal(result, expected) + + # Check ndarray argument + result = a.dot(b.values) + self.assertTrue(np.all(result == expected.values)) + self.assertEquals(a.dot(b['2'].values), expected['2']) + + #Check series argument + self.assertEquals(a.dot(b['1']), expected['1']) + self.assertEquals(a.dot(b2['1']), expected['1']) + + self.assertRaises(Exception, a.dot, a.values[:3]) + self.assertRaises(ValueError, a.dot, b.T) + def test_value_counts_nunique(self): s = Series(['a', 'b', 'b', 'b', 'b', 'a', 'c', 'd', 'd', 'a']) hist = s.value_counts()
If second argument is Series or DataFrame, ensures alignment of indices. If second argument is some type of ndarray, just fall back to np.dot on the values. This mirrors the behavior in DataFrame.dot Includes unittest
https://api.github.com/repos/pandas-dev/pandas/pulls/3470
2013-04-27T03:19:17Z
2013-04-29T17:43:37Z
2013-04-29T17:43:37Z
2014-07-16T08:06:39Z
ENH: Adding '.' as an na_value for FRED.
diff --git a/RELEASE.rst b/RELEASE.rst index f584a5de924c1..49d576aacaff9 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -57,6 +57,7 @@ pandas 0.12.0 - Unordered time series selection was misbehaving when using label slicing (GH3448_) - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_) - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_) + - DataFrames fetched via FRED now handle '.' as a NaN. (GH3469_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 diff --git a/pandas/io/data.py b/pandas/io/data.py index 8edb319a565a1..43178fdcfddf1 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -367,9 +367,17 @@ def get_data_fred(name=None, start=dt.datetime(2010, 1, 1), url = fred_URL + '%s' % name + \ '/downloaddata/%s' % name + '.csv' data = read_csv(urllib.urlopen(url), index_col=0, parse_dates=True, - header=None, skiprows=1, names=["DATE", name]) - return data.truncate(start, end) - + header=None, skiprows=1, names=["DATE", name], + na_values='.') + try: + return data.truncate(start, end) + except KeyError: + if data.ix[3].name[7:12] == 'Error': + raise Exception("Failed to get the data. " + "Check that {} is valid FRED " + "series.".format(name)) + else: + raise def get_data_famafrench(name, start=None, end=None): start, end = _sanitize_dates(start, end) diff --git a/pandas/io/tests/test_fred.py b/pandas/io/tests/test_fred.py new file mode 100644 index 0000000000000..3e951e5443bc3 --- /dev/null +++ b/pandas/io/tests/test_fred.py @@ -0,0 +1,85 @@ +import unittest +import nose +from datetime import datetime + +from pandas.util.py3compat import StringIO, BytesIO + +import pandas as pd +import pandas.io.data as web +from pandas.util.testing import (network, assert_frame_equal, + assert_series_equal, + assert_almost_equal) +from numpy.testing.decorators import slow + +import urllib2 + + +class TestFred(unittest.TestCase): + + @slow + @network + def test_fred(self): + """ + Throws an exception when DataReader can't get a 200 response from + FRED. + """ + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + + try: + self.assertEquals( + web.DataReader("GDP", "fred", start, end)['GDP'].tail(1), + 16010.2) + + self.assertRaises( + Exception, + lambda: web.DataReader("NON EXISTENT SERIES", 'fred', + start, end)) + except urllib2.URLError: + try: + urllib2.urlopen('http://google.com') + except urllib2.URLError: + raise nose.SkipTest + else: + raise + + @slow + @network + def test_fred_nan(self): + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + df = web.DataReader("DFII5", "fred", start, end) + assert pd.isnull(df.ix['2010-01-01']) + + @slow + @network + def test_fred_parts(self): + import numpy as np + + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + df = web.get_data_fred("CPIAUCSL", start, end) + assert df.ix['2010-05-01'] == 217.23 + + t = np.array(df.CPIAUCSL.tolist()) + assert np.issubdtype(t.dtype, np.floating) + assert t.shape == (37,) + + # Test some older ones: + expected = [[576.7], + [962.9], + [684.7], + [848.3], + [933.3]] + result = web.get_data_fred("A09024USA144NNBR", start="1915").ix[:5] + assert (result.values == expected).all() + + @slow + @network + def test_invalid_series(self): + name = "NOT A REAL SERIES" + self.assertRaises(Exception, web.get_data_fred, name) + +if __name__ == '__main__': + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False)
The St. Louis Fed's [FRED](http://research.stlouisfed.org/fred2/) uses '.' as an NaN marker. I don't think the user has any way to specify the na_value via DataReader, so this just hard codes '.' as an na_value in the fred data-fetching function. Before: ``` python In [4]: from pandas.io.data import DataReader In [5]: df = DataReader('DFII5', data_source="fred") In [6]: df.head() Out[6]: DFII5 DATE 2010-01-01 . 2010-01-04 0.52 2010-01-05 0.44 2010-01-06 0.44 2010-01-07 0.43 ``` After ``` python In [2]: from pandas.io.data import DataReader In [3]: df = DataReader('DFII5', data_source="fred") In [4]: df.head() Out[4]: DFII5 DATE 2010-01-01 NaN 2010-01-04 0.52 2010-01-05 0.44 2010-01-06 0.44 2010-01-07 0.43 In [5]: pd.__version__ Out[5]: '0.11.0rc1' ``` Hopefully I've done everything correctly with the pull request. Let me know if anything needs fixing.
https://api.github.com/repos/pandas-dev/pandas/pulls/3469
2013-04-27T00:53:30Z
2013-04-27T20:14:38Z
2013-04-27T20:14:38Z
2016-11-03T12:37:23Z
BUG: adjust to_latex column format when no index
diff --git a/pandas/core/format.py b/pandas/core/format.py index 7226bd14e5576..5b68b26a41b77 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -356,7 +356,10 @@ def get_col_type(dtype): if column_format is None: dtypes = self.frame.dtypes.values - column_format = 'l%s' % ''.join(map(get_col_type, dtypes)) + if self.index: + column_format = 'l%s' % ''.join(map(get_col_type, dtypes)) + else: + column_format = '%s' % ''.join(map(get_col_type, dtypes)) elif not isinstance(column_format, basestring): raise AssertionError(('column_format must be str or unicode, not %s' % type(column_format))) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index c5ee51d9a7408..37f08dd177eae 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -1355,6 +1355,31 @@ def test_to_latex(self): # it works! self.frame.to_latex() + df = DataFrame({'a': [1, 2], + 'b': ['b1', 'b2']}) + withindex_result = df.to_latex() + withindex_expected = r"""\begin{tabular}{lrl} +\toprule +{} & a & b \\ +\midrule +0 & 1 & b1 \\ +1 & 2 & b2 \\ +\bottomrule +\end{tabular} +""" + self.assertEqual(withindex_result, withindex_expected) + + withoutindex_result = df.to_latex(index=False) + withoutindex_expected = r"""\begin{tabular}{rl} +\toprule + a & b \\ +\midrule + 1 & b1 \\ + 2 & b2 \\ +\bottomrule +\end{tabular} +""" + self.assertEqual(withoutindex_result, withoutindex_expected) class TestSeriesFormatting(unittest.TestCase): _multiprocess_can_split_ = True
`to_latex` was adding an extra alignment to `column_format` when the `index` argument was False
https://api.github.com/repos/pandas-dev/pandas/pulls/3467
2013-04-26T21:14:43Z
2013-04-28T00:55:20Z
2013-04-28T00:55:20Z
2014-07-16T08:06:35Z
Fix pprint of index, summarizes according to display.max_seq_items
diff --git a/RELEASE.rst b/RELEASE.rst index aac34c6cf8a5e..9920f89d50501 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -42,6 +42,8 @@ pandas 0.12.0 - When removing an object from a store, **store.remove(key)**, raises **KeyError** if **key** is not a valid store object. + - The repr() for (Multi)Index now obeys display.max_seq_items rather + then numpy threshold print options. (GH3426_, GH3466_) **Bug Fixes** @@ -60,6 +62,8 @@ pandas 0.12.0 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3454: https://github.com/pydata/pandas/issues/3454 .. _GH3457: https://github.com/pydata/pandas/issues/3457 +.. _GH3426: https://github.com/pydata/pandas/issues/3426 +.. _GH3466: https://github.com/pydata/pandas/issues/3466 .. _GH3038: https://github.com/pydata/pandas/issues/3038 .. _GH3437: https://github.com/pydata/pandas/issues/3437 .. _GH3455: https://github.com/pydata/pandas/issues/3455 diff --git a/pandas/core/index.py b/pandas/core/index.py index 5ffd211c86d27..34edd26a49617 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -170,12 +170,7 @@ def __unicode__(self): Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. """ - if len(self) > 6 and len(self) > np.get_printoptions()['threshold']: - data = self[:3].format() + ["..."] + self[-3:].format() - else: - data = self.format() - - prepr = com.pprint_thing(data, escape_chars=('\t', '\r', '\n'),quote_strings=True) + prepr = com.pprint_thing(self, escape_chars=('\t', '\r', '\n'),quote_strings=True) return '%s(%s, dtype=%s)' % (type(self).__name__, prepr, self.dtype) def __repr__(self): @@ -1504,19 +1499,9 @@ def __unicode__(self): """ output = 'MultiIndex\n%s' - options = np.get_printoptions() - np.set_printoptions(threshold=50) - - if len(self) > 100: - values = self[:50].format() + ["..."] + self[-50:].format() - else: - values = self.format() - - summary = com.pprint_thing(values, escape_chars=('\t', '\r', '\n'), + summary = com.pprint_thing(self, escape_chars=('\t', '\r', '\n'), quote_strings=True) - np.set_printoptions(threshold=options['threshold']) - return output % summary def __repr__(self): diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 65a3d3b1c8a20..993fc690ad04e 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -17,6 +17,7 @@ import pandas.core.common as com import pandas.util.testing as tm +import pandas.core.config as cf from pandas.tseries.index import _to_m8 import pandas.tseries.offsets as offsets @@ -895,9 +896,10 @@ def test_print_unicode_columns(self): repr(df.columns) # should not raise UnicodeDecodeError def test_repr_summary(self): - r = repr(pd.Index(np.arange(10000))) - self.assertTrue(len(r) < 100) - self.assertTrue("..." in r) + with cf.option_context('display.max_seq_items',10): + r = repr(pd.Index(np.arange(1000))) + self.assertTrue(len(r) < 100) + self.assertTrue("..." in r) def test_unicode_string_with_unicode(self): idx = Index(range(1000))
#3465 makes #3391 a very good idea.
https://api.github.com/repos/pandas-dev/pandas/pulls/3466
2013-04-26T18:51:30Z
2013-04-26T18:52:46Z
2013-04-26T18:52:46Z
2014-06-22T23:23:22Z
BUG: GH3461 Fix sorting in a frame with a list of columns which contains datetime64
diff --git a/RELEASE.rst b/RELEASE.rst index aac34c6cf8a5e..a3becfa2de3c7 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -54,6 +54,7 @@ pandas 0.12.0 - ``.loc`` was not raising when passed an integer list (GH3449_) - Unordered time series selection was misbehaving when using label slicing (GH3448_) - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_) + - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 @@ -64,6 +65,7 @@ pandas 0.12.0 .. _GH3437: https://github.com/pydata/pandas/issues/3437 .. _GH3455: https://github.com/pydata/pandas/issues/3455 .. _GH3457: https://github.com/pydata/pandas/issues/3457 +.. _GH3461: https://github.com/pydata/pandas/issues/3461 .. _GH3448: https://github.com/pydata/pandas/issues/3448 .. _GH3449: https://github.com/pydata/pandas/issues/3449 diff --git a/pandas/core/common.py b/pandas/core/common.py index 3edd7abcfc100..aa5205b674df3 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1477,6 +1477,8 @@ def is_timedelta64_dtype(arr_or_dtype): tipo = arr_or_dtype.dtype.type return issubclass(tipo, np.timedelta64) +def needs_i8_conversion(arr_or_dtype): + return is_datetime64_dtype(arr_or_dtype) or is_timedelta64_dtype(arr_or_dtype) def is_float_dtype(arr_or_dtype): if isinstance(arr_or_dtype, np.dtype): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 69e7e4178ecfd..977dc9e2b56ff 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3144,7 +3144,12 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False): % str(x)) keys.append(k) - keys = [self[x].values for x in by] + def trans(v): + if com.needs_i8_conversion(v): + return v.view('i8') + return v + + keys = [trans(self[x].values) for x in by] indexer = _lexsort_indexer(keys, orders=ascending) indexer = com._ensure_platform_int(indexer) else: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 530128a100d0b..7bafed216b9b9 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7737,6 +7737,25 @@ def test_sort_index_duplicates(self): except Exception, e: self.assertTrue('duplicate' in str(e)) + def test_sort_datetimes(self): + + # GH 3461, argsort / lexsort differences for a datetime column + df = DataFrame(['a','a','a','b','c','d','e','f','g'], + columns=['A'], + index=date_range('20130101',periods=9)) + dts = [ Timestamp(x) for x in ['2004-02-11','2004-01-21','2004-01-26','2005-09-20','2010-10-04','2009-05-12','2008-11-12','2010-09-28','2010-09-28'] ] + df['B'] = dts[::2] + dts[1::2] + df['C'] = 2. + df['A1'] = 3. + + df1 = df.sort(columns='A') + df2 = df.sort(columns=['A']) + assert_frame_equal(df1,df2) + + df1 = df.sort(columns='B') + df2 = df.sort(columns=['B']) + assert_frame_equal(df1,df2) + def test_frame_column_inplace_sort_exception(self): s = self.frame['A'] self.assertRaises(Exception, s.sort)
closes #3461
https://api.github.com/repos/pandas-dev/pandas/pulls/3464
2013-04-25T21:27:25Z
2013-04-25T21:56:33Z
2013-04-25T21:56:33Z
2014-07-03T18:23:38Z
CLN: series to now inherit from NDFrame
diff --git a/RELEASE.rst b/RELEASE.rst index 6f55b7cd4490f..7c11bea47c61c 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -37,11 +37,67 @@ pandas 0.12.0 - Fixed various issues with internal pprinting code, the repr() for various objects including TimeStamp and *Index now produces valid python code strings and can be used to recreate the object, (GH3038_), (GH3379_), (GH3251_) + - Added modulo operator to Series **API Changes** - When removing an object from a store, **store.remove(key)**, raises **KeyError** if **key** is not a valid store object. + - Refactor of PandasObject to become new generic Pandas base class + + - moved methods + - __str__,__bytes__,__repr__,save,load + All NDFrame hierarchy, Index hierarchy, Period (Timestamp not included) + + - Refactor of series.py/frame.py/panel.py to move common code to generic.py + - added _setup_axes to created generic NDFrame structures + - moved methods + + - from_axes,_wrap_array,axes,ix,shape,empty,swapaxes,transpose,pop + - __iter__,keys,__contains__,__len__,__neg__,__invert__ + - convert_objects,as_blocks,as_matrix,values + - __getstate__,__setstate__ (though compat remains in frame/panel) + - __getattr__,__setattr__ + - _indexed_same,reindex_like,reindex,align,where,mask + - filter (also added axis argument to selectively filter on a different axis) + - reindex,reindex_axis (which was the biggest change to make generic) + - truncate (moved to become part of NDFrame) + + These are API changes which make Panel more consistent with DataFrame + - swapaxes on a Panel with the same axes specified now return a copy + - support attribute access for setting + - filter supports same api as original DataFrame filter + + - Reindex called with no arguments will now return a copy of the input object + + - Series now inherits from ``NDFrame`` rather than directly from ``ndarray``. + There are several minor changes that affect the API. + + - numpy functions that do not support the array interface will now + return ``ndarrays`` rather than series, e.g. ``np.diff`` and ``np.where`` + - ``Series(0.5)`` would previously return the scalar ``0.5``, this is not + longer supported + - several methods from frame/series have moved to ``NDFrame`` + (convert_objects,where,mask) + - ``TimeSeries`` is now an alias for ``Series``. the property ``is_time_series`` + can be used to distinguish (if desired) + + - Refactor of Sparse objects to use BlockManager + + - Created a new block type in internals, SparseBlock, which can hold multi-dtypes + and is non-consolidatable. SparseSeries and SparseDataFrame now inherit + more methods from there hierarchy (Series/DataFrame), and no longer inherit + from SparseArray (which instead is the object of the SparseBlock) + - Sparse suite now supports integration with non-sparse data. Non-float sparse + data is supportable (partially implemented) + - Operations on sparse structures within DataFrames should preserve sparseness, + merging type operations will convert to dense (and back to sparse), so might + be somewhat inefficient + - enable setitem on SparseSeries for boolean/integer/slices + - SparsePanels implementation is unchanged (e.g. not using BlockManager, needs work) + + - added ``ftypes`` method to Series/DataFame, similar to ``dtypes``, but indicates + if the underlying is sparse/dense (as well as the dtype) **Bug Fixes** @@ -64,7 +120,6 @@ pandas 0.12.0 .. _GH3448: https://github.com/pydata/pandas/issues/3448 .. _GH3449: https://github.com/pydata/pandas/issues/3449 - pandas 0.11.0 ============= diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py new file mode 100644 index 0000000000000..a264a793f7ee5 --- /dev/null +++ b/pandas/compat/pickle_compat.py @@ -0,0 +1,62 @@ +""" support pre 0.12 series pickle compatibility """ + +import sys +import pickle +import numpy as np +import pandas +from pandas.util import py3compat +from pandas.core.series import Series +from pandas.sparse.series import SparseSeries + +def load_reduce(self): + stack = self.stack + args = stack.pop() + func = stack[-1] + if type(args[0]) is type: + n = args[0].__name__ + if n == 'DeprecatedSeries': + stack[-1] = object.__new__(Series) + return + elif n == 'DeprecatedSparseSeries': + stack[-1] = object.__new__(SparseSeries) + return + + try: + value = func(*args) + except: + print(sys.exc_info()) + print(func, args) + raise + + stack[-1] = value + +if py3compat.PY3: + class Unpickler(pickle._Unpickler): + pass +else: + class Unpickler(pickle.Unpickler): + pass + +Unpickler.dispatch[pickle.REDUCE[0]] = load_reduce + +def load(file): + # try to load a compatibility pickle + # fake the old class hierarchy + # if it works, then return the new type objects + + try: + pandas.core.series.Series = DeprecatedSeries + pandas.sparse.series.SparseSeries = DeprecatedSparseSeries + with open(file,'rb') as fh: + return Unpickler(fh).load() + except: + raise + finally: + pandas.core.series.Series = Series + pandas.sparse.series.SparseSeries = SparseSeries + +class DeprecatedSeries(Series, np.ndarray): + pass + +class DeprecatedSparseSeries(DeprecatedSeries): + pass diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 4bb990a57cb4d..498b18363ca91 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -185,7 +185,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False): values = com._ensure_object(values) keys, counts = htable.value_count_object(values, mask) - result = Series(counts, index=keys) + result = Series(counts, index=com._values_from_object(keys)) if sort: result.sort() diff --git a/pandas/core/base.py b/pandas/core/base.py new file mode 100644 index 0000000000000..e767a4074a323 --- /dev/null +++ b/pandas/core/base.py @@ -0,0 +1,53 @@ +# pylint: disable=W0231,E1101 + +from pandas.core import common as com +from pandas.util import py3compat + +class PandasObject(object): + """ The base class for pandas objects """ + + #---------------------------------------------------------------------- + # Reconstruction + + def save(self, path): + com.save(self, path) + + @classmethod + def load(cls, path): + return com.load(path) + + #---------------------------------------------------------------------- + # Formatting + + def __unicode__(self): + raise NotImplementedError + + def __str__(self): + """ + Return a string representation for a particular Object + + Invoked by str(df) in both py2/py3. + Yields Bytestring in Py2, Unicode String in py3. + """ + + if py3compat.PY3: + return self.__unicode__() + return self.__bytes__() + + def __bytes__(self): + """ + Return a string representation for a particular Object + + Invoked by bytes(df) in py3 only. + Yields a bytestring in both py2/py3. + """ + encoding = com.get_option("display.encoding") + return self.__unicode__().encode(encoding, 'replace') + + def __repr__(self): + """ + Return a string representation for a particular Object + + Yields Bytestring in Py2, Unicode String in py3. + """ + return str(self) diff --git a/pandas/core/common.py b/pandas/core/common.py index 3edd7abcfc100..ea743c69f5e90 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -33,16 +33,27 @@ except Exception: # pragma: no cover pass - class PandasError(Exception): pass - class AmbiguousIndexError(PandasError, KeyError): pass - _POSSIBLY_CAST_DTYPES = set([ np.dtype(t) for t in ['M8[ns]','m8[ns]','O','int8','uint8','int16','uint16','int32','uint32','int64','uint64'] ]) +_TIMELIKE_DTYPES = set([ np.dtype(t) for t in ['M8[ns]','m8[ns]'] ]) + +def is_series(obj): + return getattr(obj,'_typ',None) == 'series' +def is_sparse_series(obj): + return getattr(obj,'_subtyp',None) in ('sparse_series','sparse_time_series') +def is_sparse_array_like(obj): + return getattr(obj,'_subtyp',None) in ['sparse_array','sparse_series','sparse_array'] +def is_dataframe(obj): + return getattr(obj,'_typ',None) == 'dataframe' +def is_panel(obj): + return getattr(obj,'_typ',None) == 'panel' +def is_generic(obj): + return getattr(obj,'_data',None) is not None def isnull(obj): ''' @@ -63,14 +74,12 @@ def _isnull_new(obj): if lib.isscalar(obj): return lib.checknull(obj) - from pandas.core.generic import PandasObject - if isinstance(obj, np.ndarray): + if is_series(obj) or isinstance(obj, np.ndarray): return _isnull_ndarraylike(obj) - elif isinstance(obj, PandasObject): - # TODO: optimize for DataFrame, etc. + elif is_generic(obj): return obj.apply(isnull) elif isinstance(obj, list) or hasattr(obj, '__array__'): - return _isnull_ndarraylike(obj) + return _isnull_ndarraylike(np.asarray(obj)) else: return obj is None @@ -90,14 +99,12 @@ def _isnull_old(obj): if lib.isscalar(obj): return lib.checknull_old(obj) - from pandas.core.generic import PandasObject - if isinstance(obj, np.ndarray): + if is_series(obj) or isinstance(obj, np.ndarray): return _isnull_ndarraylike_old(obj) - elif isinstance(obj, PandasObject): - # TODO: optimize for DataFrame, etc. + elif is_generic(obj): return obj.apply(_isnull_old) elif isinstance(obj, list) or hasattr(obj, '__array__'): - return _isnull_ndarraylike_old(obj) + return _isnull_ndarraylike_old(np.asarray(obj)) else: return obj is None @@ -130,39 +137,41 @@ def _use_inf_as_null(key): def _isnull_ndarraylike(obj): - from pandas import Series - values = np.asarray(obj) - if values.dtype.kind in ('O', 'S', 'U'): + values = obj + dtype = values.dtype + + if dtype.kind in ('O', 'S', 'U'): # Working around NumPy ticket 1542 shape = values.shape - if values.dtype.kind in ('S', 'U'): + if dtype.kind in ('S', 'U'): result = np.zeros(values.shape, dtype=bool) else: result = np.empty(shape, dtype=bool) vec = lib.isnullobj(values.ravel()) result[:] = vec.reshape(shape) - - if isinstance(obj, Series): - result = Series(result, index=obj.index, copy=False) - elif values.dtype == np.dtype('M8[ns]'): - # this is the NaT pattern - result = values.view('i8') == tslib.iNaT - elif values.dtype == np.dtype('m8[ns]'): + + elif dtype in _TIMELIKE_DTYPES: # this is the NaT pattern - result = values.view('i8') == tslib.iNaT + v = getattr(values,'asi8',None) + if v is None: + v = values.view('i8') + result = v == tslib.iNaT else: - # -np.isfinite(obj) result = np.isnan(obj) - return result + if is_series(obj): + from pandas import Series + result = Series(result, index=obj.index, copy=False) + + return result def _isnull_ndarraylike_old(obj): - from pandas import Series - values = np.asarray(obj) + values = obj + dtype = values.dtype - if values.dtype.kind in ('O', 'S', 'U'): + if dtype.kind in ('O', 'S', 'U'): # Working around NumPy ticket 1542 shape = values.shape @@ -173,15 +182,20 @@ def _isnull_ndarraylike_old(obj): vec = lib.isnullobj_old(values.ravel()) result[:] = vec.reshape(shape) - if isinstance(obj, Series): - result = Series(result, index=obj.index, copy=False) - elif values.dtype == np.dtype('M8[ns]'): + elif dtype in _TIMELIKE_DTYPES: # this is the NaT pattern - result = values.view('i8') == tslib.iNaT + v = getattr(values,'asi8',None) + if v is None: + v = values.view('i8') + result = v == tslib.iNaT else: result = -np.isfinite(obj) - return result + if is_series(obj): + from pandas import Series + result = Series(result, index=obj.index, copy=False) + + return result def notnull(obj): ''' @@ -876,7 +890,7 @@ def _possibly_downcast_to_dtype(result, dtype): """ try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32 """ - if not isinstance(result, np.ndarray): + if np.isscalar(result): return result try: @@ -997,6 +1011,34 @@ def backfill_2d(values, limit=None, mask=None): # for test coverage pass +def interpolate_2d(values, method='pad', axis=0, limit=None, missing=None): + """ perform an actual interpolation of values, values will be make 2-d if needed + fills inplace, returns the result """ + + transf = (lambda x: x) if axis == 0 else (lambda x: x.T) + + # reshape a 1 dim if needed + ndim = values.ndim + if values.ndim == 1: + if axis != 0: + raise Exception("cannot interpolate on a ndim == 1 with axis != 0") + values = values.reshape(tuple((1,) + values.shape)) + + if missing is None: + mask = None + else: # todo create faster fill func without masking + mask = mask_missing(transf(values), missing) + + if method == 'pad': + pad_2d(transf(values), limit=limit, mask=mask) + else: + backfill_2d(transf(values), limit=limit, mask=mask) + + # reshape back + if ndim == 1: + values = values[0] + + return values def _consensus_name_attr(objs): name = objs[0].name @@ -1008,10 +1050,28 @@ def _consensus_name_attr(objs): #---------------------------------------------------------------------- # Lots of little utilities +def _maybe_box(indexer, values, obj, key): + + # if we have multiples coming back, box em + if isinstance(values, np.ndarray): + return obj[indexer.get_loc(key)] + + # return the value + return values + +def _values_from_object(o): + """ return my values or the object if we are say an ndarray """ + return o.get_values() if hasattr(o,'get_values') else o def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True): """ if we have an object dtype, try to coerce dates and/or numers """ + # if we have passed in a list or scalar + if isinstance(values, (list,tuple)): + values = np.array(values,dtype=np.object_) + if not hasattr(values,'dtype'): + values = np.array([values],dtype=np.object_) + # convert dates if convert_dates and values.dtype == np.object_: @@ -1049,6 +1109,8 @@ def _possibly_convert_platform(values): if isinstance(values, (list,tuple)): values = lib.list_to_object_array(values) if getattr(values,'dtype',None) == np.object_: + if hasattr(values,'values'): + values = values.values values = lib.maybe_convert_objects(values) return values @@ -1059,14 +1121,14 @@ def _possibly_cast_to_timedelta(value, coerce=True): don't force the conversion unless coerce is True """ # deal with numpy not being able to handle certain timedelta operations - if isinstance(value,np.ndarray) and value.dtype.kind == 'm': + if (isinstance(value,np.ndarray) or is_series(value)) and value.dtype.kind == 'm': if value.dtype != 'timedelta64[ns]': value = value.astype('timedelta64[ns]') return value # we don't have a timedelta, but we want to try to convert to one (but don't force it) if coerce: - new_value = tslib.array_to_timedelta64(value.astype(object), coerce=False) + new_value = tslib.array_to_timedelta64(_values_from_object(value).astype(object), coerce=False) if new_value.dtype == 'i8': value = np.array(new_value,dtype='timedelta64[ns]') @@ -1131,26 +1193,27 @@ def _possibly_cast_to_datetime(value, dtype, coerce = False): def _is_bool_indexer(key): - if isinstance(key, np.ndarray) and key.dtype == np.object_: - key = np.asarray(key) - - if not lib.is_bool_array(key): - if isnull(key).any(): - raise ValueError('cannot index with vector containing ' - 'NA / NaN values') - return False - return True - elif isinstance(key, np.ndarray) and key.dtype == np.bool_: - return True + if isinstance(key, np.ndarray) or is_series(key): + if key.dtype == np.object_: + key = np.asarray(_values_from_object(key)) + + if len(key) and not lib.is_bool_array(key): + if isnull(key).any(): + raise ValueError('cannot index with vector containing ' + 'NA / NaN values') + return False + return True + elif key.dtype == np.bool_: + return True elif isinstance(key, list): try: - return np.asarray(key).dtype == np.bool_ + arr = np.asarray(key) + return arr.dtype == np.bool_ and len(arr) == len(key) except TypeError: # pragma: no cover return False return False - def _default_index(n): from pandas.core.index import Int64Index values = np.arange(n, dtype=np.int64) @@ -1583,6 +1646,13 @@ def load(path): try: with open(path,'rb') as fh: return pickle.load(fh) + + except (TypeError), detail: + + # try to process a deprecated compatibility + from pandas.compat.pickle_compat import load + return load(vf) + except: if not py3compat.PY3: raise @@ -1748,14 +1818,14 @@ def _to_pydatetime(x): def _where_compat(mask, arr1, arr2): if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE: - new_vals = np.where(mask, arr1.view(np.int64), arr2.view(np.int64)) + new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8')) return new_vals.view(_NS_DTYPE) import pandas.tslib as tslib if arr1.dtype == _NS_DTYPE: - arr1 = tslib.ints_to_pydatetime(arr1.view(np.int64)) + arr1 = tslib.ints_to_pydatetime(arr1.view('i8')) if arr2.dtype == _NS_DTYPE: - arr2 = tslib.ints_to_pydatetime(arr2.view(np.int64)) + arr2 = tslib.ints_to_pydatetime(arr2.view('i8')) return np.where(mask, arr1, arr2) diff --git a/pandas/core/expressions.py b/pandas/core/expressions.py index de93394872e12..c53afc281583e 100644 --- a/pandas/core/expressions.py +++ b/pandas/core/expressions.py @@ -6,6 +6,8 @@ """ import numpy as np +from pandas.core import common as com +from pandas.core.common import _values_from_object try: import numexpr as ne @@ -84,14 +86,11 @@ def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False): if _can_use_numexpr(op, op_str, a, b, 'evaluate'): try: - a_value, b_value = a, b - if hasattr(a_value,'values'): - a_value = a_value.values - if hasattr(b_value,'values'): - b_value = b_value.values - result = ne.evaluate('a_value %s b_value' % op_str, - local_dict={ 'a_value' : a_value, - 'b_value' : b_value }, + a = _values_from_object(a) + b = _values_from_object(b) + result = ne.evaluate('a %s b' % op_str, + local_dict={ 'a' : a, + 'b' : b }, casting='safe') except (ValueError), detail: if 'unknown type object' in str(detail): @@ -106,6 +105,9 @@ def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False): return result def _where_standard(cond, a, b, raise_on_error=True): + cond = _values_from_object(cond) + a = _values_from_object(a) + b = _values_from_object(b) return np.where(cond, a, b) def _where_numexpr(cond, a, b, raise_on_error = False): @@ -113,18 +115,15 @@ def _where_numexpr(cond, a, b, raise_on_error = False): if _can_use_numexpr(None, 'where', a, b, 'where'): + cond = _values_from_object(cond) + a = _values_from_object(a) + b = _values_from_object(b) + try: - cond_value, a_value, b_value = cond, a, b - if hasattr(cond_value,'values'): - cond_value = cond_value.values - if hasattr(a_value,'values'): - a_value = a_value.values - if hasattr(b_value,'values'): - b_value = b_value.values - result = ne.evaluate('where(cond_value,a_value,b_value)', - local_dict={ 'cond_value' : cond_value, - 'a_value' : a_value, - 'b_value' : b_value }, + result = ne.evaluate('where(cond,a,b)', + local_dict={ 'c' : cond, + 'a' : a, + 'b' : b }, casting='safe') except (ValueError), detail: if 'unknown type object' in str(detail): diff --git a/pandas/core/format.py b/pandas/core/format.py index 22a1f99c6e2d9..f4ffa64d71cd5 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -379,7 +379,7 @@ def get_col_type(dtype): def _format_col(self, i): formatter = self._get_formatter(i) - return format_array(self.frame.icol(i).values, formatter, + return format_array(self.frame.icol(i).get_values(), formatter, float_format=self.float_format, na_rep=self.na_rep, space=self.col_space) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 69e7e4178ecfd..d7a6fc3b0a002 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -24,7 +24,7 @@ from pandas.core.common import (isnull, notnull, PandasError, _try_sort, _default_index, _maybe_upcast, _is_sequence, - _infer_dtype_from_scalar) + _infer_dtype_from_scalar, _values_from_object) from pandas.core.generic import NDFrame from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (_NDFrameIndexer, _maybe_droplevels, @@ -35,6 +35,7 @@ create_block_manager_from_blocks) from pandas.core.series import Series, _radd_compat import pandas.core.expressions as expressions +from pandas.sparse.array import SparseArray from pandas.compat.scipy import scoreatpercentile as _quantile from pandas.util.compat import OrderedDict from pandas.util import py3compat @@ -196,7 +197,7 @@ def na_op(x, y): except TypeError: xrav = x.ravel() result = np.empty(x.size, dtype=x.dtype) - if isinstance(y, np.ndarray): + if isinstance(y, (np.ndarray, Series)): yrav = y.ravel() mask = notnull(xrav) & notnull(yrav) result[mask] = op(xrav[mask], yrav[mask]) @@ -251,7 +252,7 @@ def na_op(x, y): except TypeError: xrav = x.ravel() result = np.empty(x.size, dtype=x.dtype) - if isinstance(y, np.ndarray): + if isinstance(y, (np.ndarray, Series)): yrav = y.ravel() mask = notnull(xrav) & notnull(yrav) result[mask] = op(np.array(list(xrav[mask])), @@ -369,16 +370,13 @@ class DataFrame(NDFrame): read_csv / read_table / read_clipboard """ _auto_consolidate = True - _het_axis = 1 - _info_axis = 'columns' - _col_klass = Series + _verbose_info = True - _AXIS_NUMBERS = { - 'index': 0, - 'columns': 1 - } + @property + def _constructor(self): + return DataFrame - _AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems()) + _constructor_sliced = Series def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): @@ -389,7 +387,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, data = data._data if isinstance(data, BlockManager): - mgr = self._init_mgr(data, index, columns, dtype=dtype, copy=copy) + mgr = self._init_mgr(data, axes = dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = self._init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): @@ -401,7 +399,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, data = data.copy() mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) - elif isinstance(data, np.ndarray): + elif isinstance(data, (np.ndarray,Series)): if data.dtype.names: data_columns, data = _rec_to_dict(data) if columns is None: @@ -449,30 +447,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, else: raise PandasError('DataFrame constructor not properly called!') - NDFrame.__init__(self, mgr) - - @classmethod - def _from_axes(cls, data, axes): - # for construction from BlockManager - if isinstance(data, BlockManager): - return cls(data) - else: - columns, index = axes - return cls(data, index=index, columns=columns, copy=False) - - def _init_mgr(self, mgr, index, columns, dtype=None, copy=False): - if columns is not None: - mgr = mgr.reindex_axis(columns, axis=0, copy=False) - if index is not None: - mgr = mgr.reindex_axis(index, axis=1, copy=False) - # do not copy BlockManager unless explicitly done - if copy and dtype is None: - mgr = mgr.copy() - elif dtype is not None: - # avoid copy if we can - if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype: - mgr = mgr.astype(dtype) - return mgr + NDFrame.__init__(self, mgr, fastpath=True) def _init_dict(self, data, index, columns, dtype=None): """ @@ -534,6 +509,10 @@ def _init_ndarray(self, values, index, columns, dtype=None, else: values = values.reindex(index) + # zero len case (GH #2234) + if not len(values) and len(columns): + values = np.empty((0,1), dtype=object) + values = _prep_ndarray(values, copy=copy) if dtype is not None: @@ -557,15 +536,11 @@ def _init_ndarray(self, values, index, columns, dtype=None, return create_block_manager_from_blocks([ values.T ], [ columns, index ]) - def _wrap_array(self, arr, axes, copy=False): - index, columns = axes - return self._constructor(arr, index=index, columns=columns, copy=copy) - @property def _verbose_info(self): import warnings warnings.warn('The _verbose_info property will be removed in version ' - '0.12. please use "max_info_rows"', FutureWarning) + '0.12', FutureWarning) return get_option('display.max_info_rows') is None @_verbose_info.setter @@ -581,21 +556,12 @@ def _verbose_info(self, value): def axes(self): return [self.index, self.columns] - @property - def _constructor(self): - return self.__class__ - @property def shape(self): return (len(self.index), len(self.columns)) - #---------------------------------------------------------------------- # Class behavior - @property - def empty(self): - return not (len(self.columns) > 0 and len(self.index) > 0) - def __nonzero__(self): raise ValueError("Cannot call bool() on DataFrame.") @@ -653,28 +619,6 @@ def _repr_fits_horizontal_(self): repr_width = max([len(l) for l in value.split('\n')]) return repr_width <= width - def __str__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular DataFrame @@ -714,13 +658,9 @@ def __unicode__(self): return value - def __repr__(self): - """ - Return a string representation for a particular DataFrame - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) + def _need_wide_repr(self): + return (get_option("display.expand_frame_repr") + and com.in_interactive_session()) def _repr_html_(self): """ @@ -752,15 +692,6 @@ def _repr_html_(self): else: return None - def __iter__(self): - """ - Iterate over columns of the frame. - """ - return iter(self.columns) - - def keys(self): - return self.columns - def iteritems(self): """Iterator over (column, series) pairs""" if self.columns.is_unique and hasattr(self, '_item_cache'): @@ -776,9 +707,7 @@ def iterrows(self): """ columns = self.columns for k, v in izip(self.index, self.values): - s = v.view(Series) - s.index = columns - s.name = k + s = Series(v,index=columns,name=k) yield k, s def itertuples(self, index=True): @@ -797,13 +726,9 @@ def itertuples(self, index=True): items = iteritems def __len__(self): - """Returns length of index""" + """Returns length of info axis, but here we use the index """ return len(self.index) - def __contains__(self, key): - """True if DataFrame has this column""" - return key in self.columns - #---------------------------------------------------------------------- # Arithmetic methods @@ -849,14 +774,6 @@ def __contains__(self, key): __rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__', default_axis=None) - def __neg__(self): - arr = operator.neg(self.values) - return self._wrap_array(arr, self.axes, copy=False) - - def __invert__(self): - arr = operator.inv(self.values) - return self._wrap_array(arr, self.axes, copy=False) - # Comparison methods __eq__ = _comp_method(operator.eq, '__eq__', '==') __ne__ = _comp_method(operator.ne, '__ne__', '!=') @@ -1625,133 +1542,20 @@ def info(self, verbose=True, buf=None, max_cols=None): @property def dtypes(self): - return self.apply(lambda x: x.dtype) - - def convert_objects(self, convert_dates=True, convert_numeric=False): - """ - Attempt to infer better dtype for object columns - Always returns a copy (even if no object columns) + return self.apply(lambda x: x.dtype, reduce=False) - Parameters - ---------- - convert_dates : if True, attempt to soft convert_dates, if 'coerce', force conversion (and non-convertibles get NaT) - convert_numeric : if True attempt to coerce to numerbers (including strings), non-convertibles get NaN - - Returns - ------- - converted : DataFrame - """ - return self._constructor(self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric)) - - #---------------------------------------------------------------------- - # properties for index and columns - - columns = lib.AxisProperty(0) - index = lib.AxisProperty(1) - - def as_matrix(self, columns=None): - """ - Convert the frame to its Numpy-array matrix representation. Columns - are presented in sorted order unless a specific list of columns is - provided. - - NOTE: the dtype will be a lower-common-denominator dtype (implicit upcasting) - that is to say if the dtypes (even of numeric types) are mixed, the one that accomodates all will be chosen - use this with care if you are not dealing with the blocks - - e.g. if the dtypes are float16,float32 -> float32 - float16,float32,float64 -> float64 - int32,uint8 -> int32 - - Parameters - ---------- - columns : array-like - Specific column order - - Returns - ------- - values : ndarray - If the DataFrame is heterogeneous and contains booleans or objects, - the result will be of dtype=object - """ - self._consolidate_inplace() - return self._data.as_matrix(columns).T - - values = property(fget=as_matrix) - - def as_blocks(self, columns=None): - """ - Convert the frame to a dict of dtype -> DataFrames that each has a homogeneous dtype. - are presented in sorted order unless a specific list of columns is - provided. - - NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) - - Parameters - ---------- - columns : array-like - Specific column order - - Returns - ------- - values : a list of DataFrames - """ - self._consolidate_inplace() - - bd = dict() - for b in self._data.blocks: - b = b.reindex_items_from(columns or b.items) - bd[str(b.dtype)] = DataFrame(BlockManager([ b ], [ b.items, self.index ])) - return bd - - blocks = property(fget=as_blocks) + @property + def ftypes(self): + return self.apply(lambda x: x.ftype, reduce=False) def transpose(self): - """ - Returns a DataFrame with the rows/columns switched. If the DataFrame is - homogeneously-typed, the data is not copied - """ - return self._constructor(data=self.values.T, index=self.columns, - columns=self.index, copy=False) + return super(DataFrame, self).transpose(1,0) T = property(transpose) - def swapaxes(self, i, j): - """ - Like ndarray.swapaxes, equivalent to transpose - - Returns - ------- - swapped : DataFrame - View on original data (no copy) - """ - if i in (0, 1) and j in (0, 1): - if i == j: - return self - return self._constructor(data=self.values.T, index=self.columns, - columns=self.index, copy=False) - else: - raise ValueError('Axis numbers must be in (0, 1)') - #---------------------------------------------------------------------- # Picklability - def __getstate__(self): - return self._data - - def __setstate__(self, state): - # old DataFrame pickle - if isinstance(state, BlockManager): - self._data = state - elif isinstance(state[0], dict): # pragma: no cover - self._unpickle_frame_compat(state) - else: # pragma: no cover - # old pickling format, for compatibility - self._unpickle_matrix_compat(state) - - # ordinarily created in NDFrame - self._item_cache = {} - # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover from pandas.core.common import _unpickle_array @@ -1785,15 +1589,6 @@ def _unpickle_matrix_compat(self, state): # pragma: no cover self._data = dm._data #---------------------------------------------------------------------- - # Array interface - - def __array__(self, dtype=None): - return self.values - - def __array_wrap__(self, result): - return self._constructor(result, index=self.index, - columns=self.columns, copy=False) - #---------------------------------------------------------------------- # Getting and setting elements @@ -1812,7 +1607,7 @@ def get_value(self, index, col): """ series = self._get_item_cache(col) engine = self.index._engine - return engine.get_value(series, index) + return engine.get_value(series.values, index) def set_value(self, index, col, value): """ @@ -1833,7 +1628,7 @@ def set_value(self, index, col, value): try: series = self._get_item_cache(col) engine = self.index._engine - engine.set_value(series, index, value) + engine.set_value(series.values, index, value) return self except KeyError: new_index, new_columns = self._expand_axes((index, col)) @@ -1904,8 +1699,8 @@ def _ixs(self, i, axis=0, copy=False): return self.take(i, axis=1, convert=True) values = self._data.iget(i) - return self._col_klass.from_array(values, index=self.index, - name=label) + return self._constructor_sliced.from_array(values, index=self.index, + name=label, fastpath=True) def iget_value(self, i, j): return self.iat[i,j] @@ -1917,7 +1712,7 @@ def __getitem__(self, key): if indexer is not None: return self._getitem_slice(indexer) - if isinstance(key, (np.ndarray, list)): + if isinstance(key, (Series, np.ndarray, list)): # either boolean or fancy integer index return self._getitem_array(key) elif isinstance(key, DataFrame): @@ -1956,7 +1751,7 @@ def _getitem_array(self, key): def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) - if isinstance(loc, (slice, np.ndarray)): + if isinstance(loc, (slice, Series, np.ndarray)): new_columns = self.columns[loc] result_columns = _maybe_droplevels(new_columns, key) if self._is_mixed_type: @@ -1984,12 +1779,9 @@ def _getitem_frame(self, key): return self.where(key) def _slice(self, slobj, axis=0, raise_on_error=False): - if axis == 0: - mgr_axis = 1 - else: - mgr_axis = 0 - - new_data = self._data.get_slice(slobj, axis=mgr_axis, raise_on_error=raise_on_error) + axis = self._get_block_manager_axis(axis) + new_data = self._data.get_slice(slobj, axis=axis) + new_data = self._data.get_slice(slobj, axis=axis, raise_on_error=raise_on_error) return self._constructor(new_data) def _box_item_values(self, key, values): @@ -1997,32 +1789,11 @@ def _box_item_values(self, key, values): if values.ndim == 2: return self._constructor(values.T, columns=items, index=self.index) else: - return Series.from_array(values, index=self.index, name=items) - - def __getattr__(self, name): - """After regular attribute access, try looking up the name of a column. - This allows simpler access to columns for interactive use.""" - if name in self.columns: - return self[name] - raise AttributeError("'%s' object has no attribute '%s'" % - (type(self).__name__, name)) - - def __setattr__(self, name, value): - """After regular attribute access, try looking up the name of a column. - This allows simpler access to columns for interactive use.""" - if name == '_data': - super(DataFrame, self).__setattr__(name, value) - else: - try: - existing = getattr(self, name) - if isinstance(existing, Index): - super(DataFrame, self).__setattr__(name, value) - elif name in self.columns: - self[name] = value - else: - object.__setattr__(self, name, value) - except (AttributeError, TypeError): - object.__setattr__(self, name, value) + return self._box_col_values(values, items) + + def _box_col_values(self, values, items): + """ provide boxed values for a column """ + return self._constructor_sliced.from_array(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): # see if we can slice the rows @@ -2030,7 +1801,7 @@ def __setitem__(self, key, value): if indexer is not None: return self._setitem_slice(indexer, value) - if isinstance(key, (np.ndarray, list)): + if isinstance(key, (Series, np.ndarray, list)): self._setitem_array(key, value) elif isinstance(key, DataFrame): self._setitem_frame(key, value) @@ -2086,17 +1857,6 @@ def _set_item(self, key, value): NDFrame._set_item(self, key, value) def insert(self, loc, column, value): - """ - Insert column into DataFrame at specified location. Raises Exception if - column is already contained in the DataFrame - - Parameters - ---------- - loc : int - Must have 0 <= loc <= len(columns) - column : object - value : int, Series, or array-like - """ value = self._sanitize_column(column, value) self._data.insert(loc, column, value) @@ -2157,17 +1917,6 @@ def _sanitize_column(self, key, value): value = com._possibly_cast_to_datetime(value, dtype) return np.atleast_2d(np.asarray(value)) - def pop(self, item): - """ - Return column and drop from frame. Raise KeyError if not found. - - Returns - ------- - column : Series - """ - return NDFrame.pop(self, item) - - # to support old APIs @property def _series(self): return self._data.get_series_dict() @@ -2356,236 +2105,40 @@ def lookup(self, row_labels, col_labels): #---------------------------------------------------------------------- # Reindexing and alignment - def align(self, other, join='outer', axis=None, level=None, copy=True, - fill_value=NA, method=None, limit=None, fill_axis=0): - """ - Align two DataFrame object on their index and columns with the - specified join method for each axis Index - - Parameters - ---------- - other : DataFrame or Series - join : {'outer', 'inner', 'left', 'right'}, default 'outer' - axis : {0, 1, None}, default None - Align on index (0), columns (1), or both (None) - level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - copy : boolean, default True - Always returns new objects. If copy=False and no reindexing is - required then original objects are returned. - fill_value : scalar, default np.NaN - Value to use for missing values. Defaults to NaN, but can be any - "compatible" value - method : str, default None - limit : int, default None - fill_axis : {0, 1}, default 0 - Filling axis, method and limit - - Returns - ------- - (left, right) : (DataFrame, type of other) - Aligned objects - """ - if axis is not None: - axis = self._get_axis_number(axis) - if isinstance(other, DataFrame): - return self._align_frame(other, join=join, axis=axis, level=level, - copy=copy, fill_value=fill_value, - method=method, limit=limit, - fill_axis=fill_axis) - elif isinstance(other, Series): - return self._align_series(other, join=join, axis=axis, level=level, - copy=copy, fill_value=fill_value, - method=method, limit=limit, - fill_axis=fill_axis) - else: # pragma: no cover - raise TypeError('unsupported type: %s' % type(other)) - - def _align_frame(self, other, join='outer', axis=None, level=None, - copy=True, fill_value=NA, method=None, limit=None, - fill_axis=0): - # defaults - join_index, join_columns = None, None - ilidx, iridx = None, None - clidx, cridx = None, None - - if axis is None or axis == 0: - if not self.index.equals(other.index): - join_index, ilidx, iridx = \ - self.index.join(other.index, how=join, level=level, - return_indexers=True) - - if axis is None or axis == 1: - if not self.columns.equals(other.columns): - join_columns, clidx, cridx = \ - self.columns.join(other.columns, how=join, level=level, - return_indexers=True) - - left = self._reindex_with_indexers(join_index, ilidx, - join_columns, clidx, copy, - fill_value=fill_value) - right = other._reindex_with_indexers(join_index, iridx, - join_columns, cridx, copy, - fill_value=fill_value) - - if method is not None: - left = left.fillna(axis=fill_axis, method=method, limit=limit) - right = right.fillna(axis=fill_axis, method=method, limit=limit) - - return left, right - - def _align_series(self, other, join='outer', axis=None, level=None, - copy=True, fill_value=None, method=None, limit=None, - fill_axis=0): - fdata = self._data - if axis == 0: - join_index = self.index - lidx, ridx = None, None - if not self.index.equals(other.index): - join_index, lidx, ridx = self.index.join(other.index, how=join, - return_indexers=True) - - if lidx is not None: - fdata = fdata.reindex_indexer(join_index, lidx, axis=1) - elif axis == 1: - join_index = self.columns - lidx, ridx = None, None - if not self.columns.equals(other.index): - join_index, lidx, ridx = \ - self.columns.join(other.index, how=join, - return_indexers=True) - - if lidx is not None: - fdata = fdata.reindex_indexer(join_index, lidx, axis=0) - else: - raise ValueError('Must specify axis=0 or 1') - - if copy and fdata is self._data: - fdata = fdata.copy() - - left_result = DataFrame(fdata) - right_result = other if ridx is None else other.reindex(join_index) - - fill_na = notnull(fill_value) or (method is not None) - if fill_na: - return (left_result.fillna(fill_value, method=method, limit=limit, - axis=fill_axis), - right_result.fillna(fill_value, method=method, - limit=limit)) - else: - return left_result, right_result - - def reindex(self, index=None, columns=None, method=None, level=None, - fill_value=NA, limit=None, copy=True): - """Conform DataFrame to new index with optional filling logic, placing - NA/NaN in locations having no value in the previous index. A new object - is produced unless the new index is equivalent to the current one and - copy=False - - Parameters - ---------- - index : array-like, optional - New labels / index to conform to. Preferably an Index object to - avoid duplicating data - columns : array-like, optional - Same usage as index argument - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed DataFrame - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - copy : boolean, default True - Return a new object, even if the passed indexes are the same - level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - fill_value : scalar, default np.NaN - Value to use for missing values. Defaults to NaN, but can be any - "compatible" value - limit : int, default None - Maximum size gap to forward or backward fill - - Examples - -------- - >>> df.reindex(index=[date1, date2, date3], columns=['A', 'B', 'C']) + def _reindex_axes(self, axes, level, limit, method, fill_value, copy): + frame = self - Returns - ------- - reindexed : same type as calling instance - """ - self._consolidate_inplace() - frame = self - - if (index is not None and columns is not None - and method is None and level is None - and not self._is_mixed_type): - return self._reindex_multi(index, columns, copy, fill_value) - - if columns is not None: - frame = frame._reindex_columns(columns, copy, level, - fill_value, limit) - - if index is not None: - frame = frame._reindex_index(index, method, copy, level, + columns = axes['columns'] + if columns is not None: + frame = frame._reindex_columns(columns, copy, level, fill_value, limit) - return frame + index = axes['index'] + if index is not None: + frame = frame._reindex_index(index, method, copy, level, + fill_value, limit) - def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, - limit=None, fill_value=NA): - """Conform DataFrame to new index with optional filling logic, placing - NA/NaN in locations having no value in the previous index. A new object - is produced unless the new index is equivalent to the current one and - copy=False + return frame - Parameters - ---------- - index : array-like, optional - New labels / index to conform to. Preferably an Index object to - avoid duplicating data - axis : {0, 1} - 0 -> index (rows) - 1 -> columns - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed DataFrame - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - copy : boolean, default True - Return a new object, even if the passed indexes are the same - level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - limit : int, default None - Maximum size gap to forward or backward fill - - Examples - -------- - >>> df.reindex_axis(['A', 'B', 'C'], axis=1) + def _reindex_index(self, new_index, method, copy, level, fill_value=NA, + limit=None): + new_index, indexer = self.index.reindex(new_index, method, level, + limit=limit) + return self._reindex_with_indexers({ 0 : [ new_index, indexer ] }, + copy=copy, fill_value=fill_value) - See also - -------- - DataFrame.reindex, DataFrame.reindex_like + def _reindex_columns(self, new_columns, copy, level, fill_value=NA, + limit=None): + new_columns, indexer = self.columns.reindex(new_columns, level=level, + limit=limit) + return self._reindex_with_indexers({ 1 : [ new_columns, indexer ] }, + copy=copy, fill_value=fill_value) - Returns - ------- - reindexed : same type as calling instance - """ - self._consolidate_inplace() - axis = self._get_axis_number(axis) - if axis == 0: - return self._reindex_index(labels, method, copy, level, - fill_value=fill_value, - limit=limit) - elif axis == 1: - return self._reindex_columns(labels, copy, level, - fill_value=fill_value, - limit=limit) - else: # pragma: no cover - raise ValueError('Must specify axis=0 or 1') + def _reindex_multi(self, axes, copy, fill_value): + """ we are guaranteed non-Nones in the axes! """ - def _reindex_multi(self, new_index, new_columns, copy, fill_value): - new_index, row_indexer = self.index.reindex(new_index) - new_columns, col_indexer = self.columns.reindex(new_columns) + new_index, row_indexer = self.index.reindex(axes['index']) + new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer @@ -2594,83 +2147,12 @@ def _reindex_multi(self, new_index, new_columns, copy, fill_value): return self._constructor(new_values, index=new_index, columns=new_columns) elif row_indexer is not None: - return self._reindex_with_indexers(new_index, row_indexer, - None, None, copy, fill_value) + return self._reindex_with_indexers({ 0 : [ new_index, row_indexer ] }, copy=copy, fill_value=fill_value) elif col_indexer is not None: - return self._reindex_with_indexers(None, None, - new_columns, col_indexer, - copy, fill_value) + return self._reindex_with_indexers({ 1 : [ new_columns, col_indexer ] }, copy=copy, fill_value=fill_value) else: return self.copy() if copy else self - def _reindex_index(self, new_index, method, copy, level, fill_value=NA, - limit=None): - new_index, indexer = self.index.reindex(new_index, method, level, - limit=limit) - return self._reindex_with_indexers(new_index, indexer, None, None, - copy, fill_value) - - def _reindex_columns(self, new_columns, copy, level, fill_value=NA, - limit=None): - new_columns, indexer = self.columns.reindex(new_columns, level=level, - limit=limit) - return self._reindex_with_indexers(None, None, new_columns, indexer, - copy, fill_value) - - def _reindex_with_indexers(self, index, row_indexer, columns, col_indexer, - copy, fill_value): - new_data = self._data - if row_indexer is not None: - row_indexer = com._ensure_int64(row_indexer) - new_data = new_data.reindex_indexer(index, row_indexer, axis=1, - fill_value=fill_value) - elif index is not None and index is not new_data.axes[1]: - new_data = new_data.copy(deep=copy) - new_data.axes[1] = index - - if col_indexer is not None: - # TODO: speed up on homogeneous DataFrame objects - col_indexer = com._ensure_int64(col_indexer) - new_data = new_data.reindex_indexer(columns, col_indexer, axis=0, - fill_value=fill_value) - elif columns is not None and columns is not new_data.axes[0]: - new_data = new_data.reindex_items(columns, copy=copy, - fill_value=fill_value) - - if copy and new_data is self._data: - new_data = new_data.copy() - - return DataFrame(new_data) - - def reindex_like(self, other, method=None, copy=True, limit=None, - fill_value=NA): - """ - Reindex DataFrame to match indices of another DataFrame, optionally - with filling logic - - Parameters - ---------- - other : DataFrame - method : string or None - copy : boolean, default True - limit : int, default None - Maximum size gap to forward or backward fill - - Notes - ----- - Like calling s.reindex(index=other.index, columns=other.columns, - method=...) - - Returns - ------- - reindexed : DataFrame - """ - return self.reindex(index=other.index, columns=other.columns, - method=method, copy=copy, limit=limit, - fill_value=fill_value) - - truncate = generic.truncate - def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ @@ -2893,46 +2375,12 @@ def take(self, indices, axis=0, convert=True): else: new_columns = self.columns.take(indices) new_index = self.index - return DataFrame(new_values, index=new_index, - columns=new_columns) + return self._constructor(new_values, index=new_index, + columns=new_columns) #---------------------------------------------------------------------- # Reindex-based selection methods - def filter(self, items=None, like=None, regex=None): - """ - Restrict frame's columns to set of items or wildcard - - Parameters - ---------- - items : list-like - List of columns to restrict to (must not all be present) - like : string - Keep columns where "arg in col == True" - regex : string (regular expression) - Keep columns with re.search(regex, col) == True - - Notes - ----- - Arguments are mutually exclusive, but this is not checked for - - Returns - ------- - DataFrame with filtered columns - """ - import re - if items is not None: - return self.reindex(columns=[r for r in items if r in self]) - elif like: - matchf = lambda x: (like in x if isinstance(x, basestring) - else like in str(x)) - return self.select(matchf, axis=1) - elif regex: - matcher = re.compile(regex) - return self.select(lambda x: matcher.search(x) is not None, axis=1) - else: - raise ValueError('items was None!') - def dropna(self, axis=0, how='any', thresh=None, subset=None): """ Return object with labels on given axis omitted where alternately any @@ -3049,13 +2497,13 @@ def _m8_to_i8(x): if np.iterable(cols) and not isinstance(cols, basestring): if isinstance(cols, tuple): if cols in self.columns: - values = [self[cols]] + values = [self[cols].values] else: values = [_m8_to_i8(self[x].values) for x in cols] else: values = [_m8_to_i8(self[x].values) for x in cols] else: - values = [self[cols]] + values = [self[cols].values] keys = lib.fast_zip_fillna(values) duplicated = lib.duplicated(keys, take_last=take_last) @@ -3268,233 +2716,6 @@ def reorder_levels(self, order, axis=0): result.columns = result.columns.reorder_levels(order) return result - #---------------------------------------------------------------------- - # Filling NA's - - def fillna(self, value=None, method=None, axis=0, inplace=False, - limit=None, downcast=None): - """ - Fill NA/NaN values using the specified method - - Parameters - ---------- - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - value : scalar or dict - Value to use to fill holes (e.g. 0), alternately a dict of values - specifying which value to use for each column (columns not in the - dict will not be filled) - axis : {0, 1}, default 0 - 0: fill column-by-column - 1: fill row-by-row - inplace : boolean, default False - If True, fill the DataFrame in place. Note: this will modify any - other views on this DataFrame, like if you took a no-copy slice of - an existing DataFrame, for example a column in a DataFrame. Returns - a reference to the filled object, which is self if inplace=True - limit : int, default None - Maximum size gap to forward or backward fill - downcast : dict, default is None, a dict of item->dtype of what to - downcast if possible - - See also - -------- - reindex, asfreq - - Returns - ------- - filled : DataFrame - """ - self._consolidate_inplace() - - axis = self._get_axis_number(axis) - if value is None: - if method is None: - raise ValueError('must specify a fill method or value') - if self._is_mixed_type and axis == 1: - if inplace: - raise NotImplementedError() - return self.T.fillna(method=method, limit=limit).T - - method = com._clean_fill_method(method) - new_data = self._data.interpolate(method = method, - axis = axis, - limit = limit, - inplace = inplace, - coerce = True) - else: - if method is not None: - raise ValueError('cannot specify both a fill method and value') - # Float type values - if len(self.columns) == 0: - return self - if isinstance(value, (dict, Series)): - if axis == 1: - raise NotImplementedError('Currently only can fill ' - 'with dict/Series column ' - 'by column') - - result = self if inplace else self.copy() - for k, v in value.iteritems(): - if k not in result: - continue - result[k].fillna(v, inplace=True) - return result - else: - new_data = self._data.fillna(value, inplace=inplace, - downcast=downcast) - - if inplace: - self._data = new_data - else: - return self._constructor(new_data) - - def ffill(self, axis=0, inplace=False, limit=None): - return self.fillna(method='ffill', axis=axis, inplace=inplace, - limit=limit) - - def bfill(self, axis=0, inplace=False, limit=None): - return self.fillna(method='bfill', axis=axis, inplace=inplace, - limit=limit) - - def replace(self, to_replace, value=None, method='pad', axis=0, - inplace=False, limit=None): - """ - Replace values given in 'to_replace' with 'value' or using 'method' - - Parameters - ---------- - value : scalar or dict, default None - Value to use to fill holes (e.g. 0), alternately a dict of values - specifying which value to use for each column (columns not in the - dict will not be filled) - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - axis : {0, 1}, default 0 - 0: fill column-by-column - 1: fill row-by-row - inplace : boolean, default False - If True, fill the DataFrame in place. Note: this will modify any - other views on this DataFrame, like if you took a no-copy slice of - an existing DataFrame, for example a column in a DataFrame. Returns - a reference to the filled object, which is self if inplace=True - limit : int, default None - Maximum size gap to forward or backward fill - - See also - -------- - reindex, asfreq - - Returns - ------- - filled : DataFrame - """ - self._consolidate_inplace() - - axis = self._get_axis_number(axis) - - if value is None: - return self._interpolate(to_replace, method, axis, inplace, limit) - else: - if len(self.columns) == 0: - return self - - new_data = self._data - if isinstance(to_replace, (dict, Series)): - if isinstance(value, (dict, Series)): # {'A' : NA} -> {'A' : 0} - new_data = self._data - for c, src in to_replace.iteritems(): - if c in value and c in self: - new_data = new_data.replace(src, value[c], - filter=[ c ], - inplace=inplace) - - elif not isinstance(value, (list, np.ndarray)): - new_data = self._data - for k, src in to_replace.iteritems(): - if k in self: - new_data = new_data.replace(src, value, - filter = [ k ], - inplace=inplace) - else: - raise ValueError('Fill value must be scalar or dict or Series') - - elif isinstance(to_replace, (list, np.ndarray)): - # [NA, ''] -> [0, 'missing'] - if isinstance(value, (list, np.ndarray)): - if len(to_replace) != len(value): - raise ValueError('Replacement lists must match ' - 'in length. Expecting %d got %d ' % - (len(to_replace), len(value))) - - new_data = self._data.replace_list(to_replace, value, - inplace=inplace) - - else: # [NA, ''] -> 0 - new_data = self._data.replace(to_replace, value, - inplace=inplace) - - else: - - # dest iterable dict-like - if isinstance(value, (dict, Series)): # NA -> {'A' : 0, 'B' : -1} - - new_data = self._data - for k, v in value.iteritems(): - if k in self: - new_data = new_data.replace(to_replace, v, - filter=[ k ], - inplace=inplace) - - elif not isinstance(value, (list, np.ndarray)): # NA -> 0 - new_data = self._data.replace(to_replace, value, - inplace=inplace) - else: - raise ValueError('Invalid to_replace type: %s' % - type(to_replace)) # pragma: no cover - - - if inplace: - self._data = new_data - else: - return self._constructor(new_data) - - def _interpolate(self, to_replace, method, axis, inplace, limit): - if self._is_mixed_type and axis == 1: - return self.T.replace(to_replace, method=method, limit=limit).T - - method = com._clean_fill_method(method) - - if isinstance(to_replace, (dict, Series)): - if axis == 1: - return self.T.replace(to_replace, method=method, - limit=limit).T - - rs = self if inplace else self.copy() - for k, v in to_replace.iteritems(): - if k in rs: - rs[k].replace(v, method=method, limit=limit, - inplace=True) - return rs if not inplace else None - - else: - - new_data = self._data.interpolate(method = method, - axis = axis, - limit = limit, - inplace = inplace, - missing = to_replace, - coerce = False) - - if inplace: - self._data = new_data - else: - return self._constructor(new_data) - #---------------------------------------------------------------------- # Rename @@ -3586,11 +2807,6 @@ def _arith_op(left, right): return self._constructor(result, index=new_index, columns=new_columns, copy=False) - def _indexed_same(self, other): - same_index = self.index.equals(other.index) - same_columns = self.columns.equals(other.columns) - return same_index and same_columns - def _combine_series(self, other, func, fill_value=None, axis=None, level=None): if axis is not None: @@ -4042,7 +3258,7 @@ def shift(self, periods=1, freq=None, **kwds): #---------------------------------------------------------------------- # Function application - def apply(self, func, axis=0, broadcast=False, raw=False, + def apply(self, func, axis=0, broadcast=False, raw=False, reduce=True, args=(), **kwds): """ Applies function along input axis of DataFrame. Objects passed to @@ -4060,6 +3276,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False, broadcast : bool, default False For aggregation functions, return object of same size with values propagated + reduce : bool, default True, try to apply reduction procedures raw : boolean, default False If False, convert each row or column into a Series. If raw=True the passed function will receive ndarray objects instead. If you are @@ -4103,8 +3320,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False, # How to determine this better? is_reduction = False try: - is_reduction = not isinstance(f(_EMPTY_SERIES), - np.ndarray) + is_reduction = not isinstance(f(_EMPTY_SERIES), Series) except Exception: pass @@ -4116,7 +3332,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False, if raw and not self._is_mixed_type: return self._apply_raw(f, axis) else: - return self._apply_standard(f, axis) + return self._apply_standard(f, axis, reduce=reduce) else: return self._apply_broadcast(f, axis) @@ -4133,21 +3349,26 @@ def _apply_raw(self, func, axis): else: return Series(result, index=self._get_agg_axis(axis)) - def _apply_standard(self, func, axis, ignore_failures=False): - try: - - if self._is_mixed_type: # maybe a hack for now - raise AssertionError('Must be mixed type DataFrame') - values = self.values - dummy = Series(NA, index=self._get_axis(axis), - dtype=values.dtype) + def _apply_standard(self, func, axis, ignore_failures=False, reduce=True): - labels = self._get_agg_axis(axis) - result = lib.reduce(values, func, axis=axis, dummy=dummy, - labels=labels) - return Series(result, index=self._get_agg_axis(axis)) - except Exception: - pass + # try to reduce first (by default) + # this only matters if the reduction in values is of different dtype + # e.g. if we want to apply to a SparseFrame, then can't directly reduce + if reduce: + try: + + if self._is_mixed_type: # maybe a hack for now + raise AssertionError('Must be mixed type DataFrame') + values = self.values.ravel() + dummy = Series(NA, index=self._get_axis(axis), + dtype=values.dtype) + + labels = self._get_agg_axis(axis) + result = lib.reduce(values, func, axis=axis, dummy=dummy, + labels=labels) + return Series(result, index=self._get_agg_axis(axis)) + except Exception: + pass if axis == 0: series_gen = (self.icol(i) for i in range(len(self.columns))) @@ -4254,11 +3475,12 @@ def applymap(self, func): # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if com.is_datetime64_dtype(x): - x = lib.map_infer(x, lib.Timestamp) - return lib.map_infer(x, func) + x = lib.map_infer(_values_from_object(x), lib.Timestamp) + return lib.map_infer(_values_from_object(x), func) #GH2786 if not self.columns.is_unique: raise ValueError("applymap does not support dataframes having duplicate column labels") + return self.apply(infer) #---------------------------------------------------------------------- @@ -4945,6 +4167,7 @@ def idxmax(self, axis=0, skipna=True): return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): + """ let's be explict about this """ if axis_num == 0: return self.columns elif axis_num == 1: @@ -5180,78 +4403,7 @@ def combineMult(self, other): """ return self.mul(other, fill_value=1.) - def where(self, cond, other=NA, inplace=False, try_cast=False, raise_on_error=True): - """ - Return a DataFrame with the same shape as self and whose corresponding - entries are from self where cond is True and otherwise are from other. - - Parameters - ---------- - cond : boolean DataFrame or array - other : scalar or DataFrame - inplace : boolean, default False - Whether to perform the operation in place on the data - try_cast : boolean, default False - try to cast the result back to the input type (if possible), - raise_on_error : boolean, default True - Whether to raise on invalid data types (e.g. trying to where on - strings) - - Returns - ------- - wh : DataFrame - """ - if isinstance(cond, DataFrame): - # this already checks for index/column equality - cond = cond.reindex(self.index, columns=self.columns) - else: - if not hasattr(cond, 'shape'): - raise ValueError('where requires an ndarray like object for its ' - 'condition') - if cond.shape != self.shape: - raise ValueError('Array conditional must be same shape as self') - cond = self._constructor(cond, index=self.index, - columns=self.columns) - - if inplace: - cond = -(cond.fillna(True).astype(bool)) - else: - cond = cond.fillna(False).astype(bool) - - if isinstance(other, DataFrame): - _, other = self.align(other, join='left', fill_value=NA) - elif isinstance(other,np.ndarray): - if other.shape != self.shape: - raise ValueError('other must be the same shape as self ' - 'when an ndarray') - other = self._constructor(other, self.index, self.columns) - - if inplace: - # we may have different type blocks come out of putmask, so - # reconstruct the block manager - self._data = self._data.putmask(cond,other,inplace=True) - - else: - new_data = self._data.where(other, cond, - raise_on_error=raise_on_error, - try_cast=try_cast) - - return self._constructor(new_data) - - def mask(self, cond): - """ - Returns copy of self whose values are replaced with nan if the - inverted condition is True - - Parameters - ---------- - cond: boolean DataFrame or array - - Returns - ------- - wh: DataFrame - """ - return self.where(~cond, NA) +DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True) _EMPTY_SERIES = Series([]) @@ -5420,7 +4572,7 @@ def convert(v): def _rec_to_dict(arr): - if isinstance(arr, np.ndarray): + if isinstance(arr, (np.ndarray, Series)): columns = list(arr.dtype.names) sdict = dict((k, arr[k]) for k in columns) elif isinstance(arr, DataFrame): @@ -5462,7 +4614,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) - elif isinstance(data, np.ndarray): + elif isinstance(data, (np.ndarray, Series)): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ed90aab715cfd..5314cba5e8d04 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1,38 +1,198 @@ # pylint: disable=W0231,E1101 +import operator import numpy as np -from pandas.core.index import MultiIndex +from pandas.core.index import Index, MultiIndex, _ensure_index import pandas.core.indexing as indexing from pandas.core.indexing import _maybe_convert_indices from pandas.tseries.index import DatetimeIndex -import pandas.core.common as com +from pandas.core.internals import BlockManager import pandas.lib as lib +from pandas.util import py3compat +import pandas.core.common as com +from pandas.core.common import (isnull, notnull, is_list_like, + _values_from_object, + _infer_dtype_from_scalar, _maybe_promote) +from pandas.core.base import PandasObject +_internal_names = ['_data','name','_subtyp','_index','_default_kind','_default_fill_value'] +_internal_names_set = set(_internal_names) + +class NDFrame(PandasObject): + """ + N-dimensional analogue of DataFrame. Store multi-dimensional in a + size-mutable, labeled data structure -class PandasError(Exception): - pass + Parameters + ---------- + data : BlockManager + axes : list + copy : boolean, default False + """ + def __init__(self, data, axes=None, copy=False, dtype=None, fastpath=False): -class PandasObject(object): + if not fastpath: + if dtype is not None: + data = data.astype(dtype) + elif copy: + data = data.copy() - _AXIS_NUMBERS = { - 'index': 0, - 'columns': 1 - } + if axes is not None: + for i, ax in enumerate(axes): + data = data.reindex_axis(ax, axis=i) - _AXIS_ALIASES = {} - _AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems()) + object.__setattr__(self, '_data', data) + object.__setattr__(self, '_item_cache', {}) - def save(self, path): - com.save(self, path) + def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): + """ passed a manager and a axes dict """ + for a, axe in axes.items(): + if axe is not None: + mgr = mgr.reindex_axis(axe, axis=self._get_block_manager_axis(a), copy=False) + + # do not copy BlockManager unless explicitly done + if copy and dtype is None: + mgr = mgr.copy() + elif dtype is not None: + # avoid copy if we can + if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype: + mgr = mgr.astype(dtype) + return mgr - @classmethod - def load(cls, path): - return com.load(path) + #---------------------------------------------------------------------- + # Construction + + @property + def _constructor(self): + raise NotImplementedError + + @property + def _constructor_sliced(self): + raise NotImplementedError #---------------------------------------------------------------------- - # Axis name business + # Axis + + @classmethod + def _setup_axes(cls, axes, info_axis = None, stat_axis = None, aliases = None, slicers = None, + axes_are_reversed = False, build_axes = True, ns = None): + """ provide axes setup for the major PandasObjects + + axes : the names of the axes in order (lowest to highest) + info_axis_num : the axis of the selector dimension (int) + stat_axis_num : the number of axis for the default stats (int) + aliases : other names for a single axis (dict) + slicers : how axes slice to others (dict) + axes_are_reversed : boolean whether to treat passed axes as reversed (DataFrame) + build_axes : setup the axis properties (default True) + """ + + cls._AXIS_ORDERS = axes + cls._AXIS_NUMBERS = dict([(a, i) for i, a in enumerate(axes) ]) + cls._AXIS_LEN = len(axes) + cls._AXIS_ALIASES = aliases or dict() + cls._AXIS_IALIASES = dict([ (v,k) for k, v in cls._AXIS_ALIASES.items() ]) + cls._AXIS_NAMES = dict([(i, a) for i, a in enumerate(axes) ]) + cls._AXIS_SLICEMAP = slicers or None + cls._AXIS_REVERSED = axes_are_reversed + + # typ + setattr(cls,'_typ',cls.__name__.lower()) + + # indexing support + cls._ix = None + + if info_axis is not None: + cls._info_axis_number = info_axis + cls._info_axis_name = axes[info_axis] + + if stat_axis is not None: + cls._stat_axis_number = stat_axis + cls._stat_axis_name = axes[stat_axis] + + # setup the actual axis + if build_axes: + + def set_axis(a, i): + setattr(cls,a,lib.AxisProperty(i)) + + if axes_are_reversed: + m = cls._AXIS_LEN-1 + for i, a in cls._AXIS_NAMES.items(): + set_axis(a,m-i) + else: + for i, a in cls._AXIS_NAMES.items(): + set_axis(a,i) + + # addtl parms + if isinstance(ns, dict): + for k, v in ns.items(): + setattr(cls,k,v) + + def _construct_axes_dict(self, axes=None, **kwargs): + """ return an axes dictionary for myself """ + d = dict([(a, self._get_axis(a)) for a in (axes or self._AXIS_ORDERS)]) + d.update(kwargs) + return d + + @staticmethod + def _construct_axes_dict_from(self, axes, **kwargs): + """ return an axes dictionary for the passed axes """ + d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)]) + d.update(kwargs) + return d + + def _construct_axes_dict_for_slice(self, axes=None, **kwargs): + """ return an axes dictionary for myself """ + d = dict([(self._AXIS_SLICEMAP[a], self._get_axis(a)) + for a in (axes or self._AXIS_ORDERS)]) + d.update(kwargs) + return d + + def _construct_axes_from_arguments(self, args, kwargs, require_all=False): + """ construct and returns axes if supplied in args/kwargs + if require_all, raise if all axis arguments are not supplied + return a tuple of (axes, kwargs) """ + + # construct the args + args = list(args) + for a in self._AXIS_ORDERS: + + # if we have an alias for this axis + alias = self._AXIS_IALIASES.get(a) + if alias is not None: + if a in kwargs: + if alias in kwargs: + raise Exception("arguments are multually exclusive for [%s,%s]" % (a,alias)) + continue + if alias in kwargs: + kwargs[a] = kwargs.pop(alias) + continue + + # look for a argument by position + if a not in kwargs: + try: + kwargs[a] = args.pop(0) + except (IndexError): + if require_all: + raise ValueError( + "not enough arguments specified!") + + axes = dict([ (a,kwargs.get(a)) for a in self._AXIS_ORDERS]) + return axes, kwargs + + @classmethod + def _from_axes(cls, data, axes): + # for construction from BlockManager + if isinstance(data, BlockManager): + return cls(data) + else: + if cls._AXIS_REVERSED: + axes = axes[::-1] + d = cls._construct_axes_dict_from(cls, axes, copy=False) + return cls(data, **d) def _get_axis_number(self, axis): axis = self._AXIS_ALIASES.get(axis, axis) @@ -62,20 +222,1228 @@ def _get_axis(self, axis): name = self._get_axis_name(axis) return getattr(self, name) + def _get_block_manager_axis(self, axis): + """ map the axis to the block_manager axis """ + axis = self._get_axis_number(axis) + if self._AXIS_REVERSED: + m = self._AXIS_LEN-1 + return m-axis + return axis + + @property + def _info_axis(self): + return getattr(self, self._info_axis_name) + + @property + def _stat_axis(self): + return getattr(self, self._stat_axis_name) + + @property + def shape(self): + return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) + + @property + def axes(self): + """ we do it this way because if we have reversed axes, then + the block manager shows then reversed """ + return [self._get_axis(a) for a in self._AXIS_ORDERS] + + def _construct_axes_dict(self, axes=None, **kwargs): + """ return an axes dictionary for myself """ + d = dict([(a, getattr(self, a)) for a in (axes or self._AXIS_ORDERS)]) + d.update(kwargs) + return d + + @staticmethod + def _construct_axes_dict_from(self, axes, **kwargs): + """ return an axes dictionary for the passed axes """ + d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)]) + d.update(kwargs) + return d + + @property + def values(self): + return self._data.as_matrix() + + @property + def ndim(self): + return self._data.ndim + + def _expand_axes(self, key): + new_axes = [] + for k, ax in zip(key, self.axes): + if k not in ax: + if type(k) != ax.dtype.type: + ax = ax.astype('O') + new_axes.append(ax.insert(len(ax), k)) + else: + new_axes.append(ax) + + return new_axes + + def _set_axis(self, axis, labels): + self._data.set_axis(axis, labels) + self._clear_item_cache() + + def transpose(self, *args, **kwargs): + """ + Permute the dimensions of the Object + + Parameters + ---------- + axes : int or name (or alias) + copy : boolean, default False + Make a copy of the underlying data. Mixed-dtype data will + always result in a copy + + Examples + -------- + >>> p.transpose(2, 0, 1) + >>> p.transpose(2, 0, 1, copy=True) + + Returns + ------- + y : same as input + """ + + # construct the args + axes, kwargs = self._construct_axes_from_arguments(args, kwargs, require_all=True) + axes_names = tuple([ self._get_axis_name( axes[a]) for a in self._AXIS_ORDERS ]) + axes_numbers = tuple([ self._get_axis_number(axes[a]) for a in self._AXIS_ORDERS ]) + + # we must have unique axes + if len(axes) != len(set(axes)): + raise ValueError('Must specify %s unique axes' % self._AXIS_LEN) + + new_axes = self._construct_axes_dict_from( + self, [self._get_axis(x) for x in axes_names]) + new_values = self.values.transpose(axes_numbers) + if kwargs.get('copy') or (len(args) and args[-1]): + new_values = new_values.copy() + return self._constructor(new_values, **new_axes) + + def swapaxes(self, axis1, axis2, copy=True): + """ + Interchange axes and swap values axes appropriately + + Returns + ------- + y : same as input + """ + i = self._get_axis_number(axis1) + j = self._get_axis_number(axis2) + + if i == j: + if copy: + return self.copy() + return self + + mapping = {i: j, j: i} + + new_axes = (self._get_axis(mapping.get(k, k)) + for k in range(self._AXIS_LEN)) + new_values = self.values.swapaxes(i, j) + if copy: + new_values = new_values.copy() + + return self._constructor(new_values, *new_axes) + + def pop(self, item): + """ + Return item and drop from frame. Raise KeyError if not found. + """ + result = self[item] + del self[item] + return result + + def squeeze(self): + """ squeeze length 1 dimensions """ + try: + return self.ix[tuple([ slice(None) if len(a) > 1 else a[0] for a in self.axes ])] + except: + return self + + def swaplevel(self, i, j, axis=0): + """ + Swap levels i and j in a MultiIndex on a particular axis + + Parameters + ---------- + i, j : int, string (can be mixed) + Level of index to be swapped. Can pass level name as string. + + Returns + ------- + swapped : type of caller (new object) + """ + axis = self._get_axis_number(axis) + result = self.copy() + labels = result._data.axes[axis] + result._data.set_axis(axis, labels.swaplevel(i, j)) + return result + + def rename_axis(self, mapper, axis=0, copy=True): + """ + Alter index and / or columns using input function or functions. + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. + + Parameters + ---------- + mapper : dict-like or function, optional + axis : int, default 0 + copy : boolean, default True + Also copy underlying data + + Returns + ------- + renamed : type of caller + """ + # should move this at some point + from pandas.core.series import _get_rename_function + + mapper_f = _get_rename_function(mapper) + + if axis == 0: + new_data = self._data.rename_items(mapper_f, copydata=copy) + else: + new_data = self._data.rename_axis(mapper_f, axis=axis) + if copy: + new_data = new_data.copy() + + return self._constructor(new_data) + + #---------------------------------------------------------------------- + # Comparisons + + def _indexed_same(self, other): + return all([ self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS]) + + def reindex(self, *args, **kwds): + raise NotImplementedError + + def __neg__(self): + arr = operator.neg(_values_from_object(self)) + return self._wrap_array(arr, self.axes, copy=False) + + def __invert__(self): + arr = operator.inv(_values_from_object(self)) + return self._wrap_array(arr, self.axes, copy=False) + + #---------------------------------------------------------------------- + # Iteration + + def __hash__(self): + raise TypeError + + def __iter__(self): + """ + Iterate over infor axis + """ + return iter(self._info_axis) + + def keys(self): + """ return the info axis names """ + return self._info_axis + + def iteritems(self): + for h in self._info_axis: + yield h, self[h] + + # Name that won't get automatically converted to items by 2to3. items is + # already in use for the first axis. + iterkv = iteritems + + def __len__(self): + """Returns length of info axis """ + return len(self._info_axis) + + def __contains__(self, key): + """True if the key is in the info axis """ + return key in self._info_axis + + @property + def empty(self): + return not all(len(self._get_axis(a)) > 0 for a in self._AXIS_ORDERS) + + #---------------------------------------------------------------------- + # Array Interface + + def _wrap_array(self, arr, axes, copy=False): + d = self._construct_axes_dict_from(self, axes, copy=copy) + return self._constructor(arr, **d) + + def __array__(self, dtype=None): + return _values_from_object(self) + + def __array_wrap__(self, result): + d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) + return self._constructor(result, **d) + + def to_dense(self): + # compat + return self + + #---------------------------------------------------------------------- + # Picklability + + def __getstate__(self): + return self._data + + def __setstate__(self, state): + + if isinstance(state, BlockManager): + self._data = state + elif isinstance(state, dict): + typ = state.get('_typ') + if typ is not None: + + # set in the order of internal names + # to avoid definitional recursion + # e.g. say fill_value needing _data to be + # defined + for k in _internal_names: + if k in state: + v = state[k] + object.__setattr__(self,k,v) + + for k, v in state.items(): + if k not in _internal_names: + object.__setattr__(self,k,v) + + else: + self._unpickle_series_compat(state) + elif isinstance(state[0], dict): + if len(state) == 5: + self._unpickle_sparse_frame_compat(state) + else: + self._unpickle_frame_compat(state) + elif len(state) == 4: + self._unpickle_panel_compat(state) + elif len(state) == 2: + self._unpickle_series_compat(state) + else: # pragma: no cover + # old pickling format, for compatibility + self._unpickle_matrix_compat(state) + + self._item_cache = {} + #---------------------------------------------------------------------- - # Indexers + # Fancy Indexing + @classmethod def _create_indexer(cls, name, indexer): """ create an indexer like _name in the class """ iname = '_%s' % name setattr(cls,iname,None) - def _indexer(self): - if getattr(self,iname,None) is None: - setattr(self,iname,indexer(self, name)) - return getattr(self,iname) + def _indexer(self): + if getattr(self,iname,None) is None: + setattr(self,iname,indexer(self, name)) + return getattr(self,iname) + + setattr(cls,name,property(_indexer)) + + def get(self, key, default=None): + """ + Get item from object for given key (DataFrame column, Panel slice, + etc.). Returns default value if not found + + Parameters + ---------- + key : object + + Returns + ------- + value : type of items contained in object + """ + try: + return self[key] + except KeyError: + return default + + def __getitem__(self, item): + return self._get_item_cache(item) + + def _get_item_cache(self, item): + cache = self._item_cache + try: + return cache[item] + except Exception: + values = self._data.get(item) + res = self._box_item_values(item, values) + cache[item] = res + return res + + def _box_item_values(self, key, values): + raise NotImplementedError + + def _clear_item_cache(self): + self._item_cache.clear() + + def _set_item(self, key, value): + self._data.set(key, value) + self._clear_item_cache() + + def __delitem__(self, key): + """ + Delete item + """ + deleted = False + + maybe_shortcut = False + if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex): + try: + maybe_shortcut = key not in self.columns._engine + except TypeError: + pass + + if maybe_shortcut: + # Allow shorthand to delete all columns whose first len(key) + # elements match key: + if not isinstance(key, tuple): + key = (key,) + for col in self.columns: + if isinstance(col, tuple) and col[:len(key)] == key: + del self[col] + deleted = True + if not deleted: + # If the above loop ran and didn't delete anything because + # there was no match, this call should raise the appropriate + # exception: + self._data.delete(key) + + try: + del self._item_cache[key] + except KeyError: + pass + + def take(self, indices, axis=0, convert=True): + """ + Analogous to ndarray.take + + Parameters + ---------- + indices : list / array of ints + axis : int, default 0 + convert : translate neg to pos indices (default) + + Returns + ------- + taken : type of caller + """ + + # check/convert indicies here + if convert: + axis = self._get_axis_number(axis) + indices = _maybe_convert_indices(indices, len(self._get_axis(axis))) + + if axis == 0: + labels = self._get_axis(axis) + new_items = labels.take(indices) + new_data = self._data.reindex_axis(new_items, axis=0) + else: + new_data = self._data.take(indices, axis=axis, verify=False) + return self._constructor(new_data) + + def select(self, crit, axis=0): + """ + Return data corresponding to axis labels matching criteria + + Parameters + ---------- + crit : function + To be called on each index (label). Should return True or False + axis : int + + Returns + ------- + selection : type of caller + """ + axis = self._get_axis_number(axis) + axis_name = self._get_axis_name(axis) + axis_values = self._get_axis(axis) + + if len(axis_values) > 0: + new_axis = axis_values[np.asarray([bool(crit(label)) for label in axis_values])] + else: + new_axis = axis_values + + return self.reindex(**{axis_name: new_axis}) + + def reindex_like(self, other, method=None, copy=True, limit=None): + """ return an object with matching indicies to myself + + Parameters + ---------- + other : Object + method : string or None + copy : boolean, default True + limit : int, default None + Maximum size gap to forward or backward fill + + Notes + ----- + Like calling s.reindex(index=other.index, columns=other.columns, + method=...) + + Returns + ------- + reindexed : same as input + """ + d = other._construct_axes_dict(method=method) + return self.reindex(**d) + + def drop(self, labels, axis=0, level=None): + """ + Return new object with labels in requested axis removed + + Parameters + ---------- + labels : array-like + axis : int + level : int or name, default None + For MultiIndex + + Returns + ------- + dropped : type of caller + """ + axis_name = self._get_axis_name(axis) + axis, axis_ = self._get_axis(axis), axis + + if axis.is_unique: + if level is not None: + if not isinstance(axis, MultiIndex): + raise AssertionError('axis must be a MultiIndex') + new_axis = axis.drop(labels, level=level) + else: + new_axis = axis.drop(labels) + dropped = self.reindex(**{axis_name: new_axis}) + try: + dropped.axes[axis_].names = axis.names + except AttributeError: + pass + return dropped + + else: + if level is not None: + if not isinstance(axis, MultiIndex): + raise AssertionError('axis must be a MultiIndex') + indexer = -lib.ismember(axis.get_level_values(level), + set(labels)) + else: + indexer = -axis.isin(labels) + + slicer = [slice(None)] * self.ndim + slicer[self._get_axis_number(axis_name)] = indexer + + return self.ix[tuple(slicer)] + + def add_prefix(self, prefix): + """ + Concatenate prefix string with panel items names. + + Parameters + ---------- + prefix : string + + Returns + ------- + with_prefix : type of caller + """ + new_data = self._data.add_prefix(prefix) + return self._constructor(new_data) + + def add_suffix(self, suffix): + """ + Concatenate suffix string with panel items names + + Parameters + ---------- + suffix : string + + Returns + ------- + with_suffix : type of caller + """ + new_data = self._data.add_suffix(suffix) + return self._constructor(new_data) + + def sort_index(self, axis=0, ascending=True): + """ + Sort object by labels (along an axis) + + Parameters + ---------- + axis : {0, 1} + Sort index/rows versus columns + ascending : boolean, default True + Sort ascending vs. descending + + Returns + ------- + sorted_obj : type of caller + """ + axis = self._get_axis_number(axis) + axis_name = self._get_axis_name(axis) + labels = self._get_axis(axis) + + sort_index = labels.argsort() + if not ascending: + sort_index = sort_index[::-1] + + new_axis = labels.take(sort_index) + return self.reindex(**{axis_name: new_axis}) + + def reindex(self, *args, **kwargs): + """Conform DataFrame to new index with optional filling logic, placing + NA/NaN in locations having no value in the previous index. A new object + is produced unless the new index is equivalent to the current one and + copy=False + + Parameters + ---------- + axes : array-like, optional (can be specified in order, or as keywords) + New labels / index to conform to. Preferably an Index object to + avoid duplicating data + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed DataFrame + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + copy : boolean, default True + Return a new object, even if the passed indexes are the same + level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + fill_value : scalar, default np.NaN + Value to use for missing values. Defaults to NaN, but can be any + "compatible" value + limit : int, default None + Maximum size gap to forward or backward fill + + Examples + -------- + >>> df.reindex(index=[date1, date2, date3], columns=['A', 'B', 'C']) + + Returns + ------- + reindexed : same type as calling instance + """ + + # construct the args + axes, kwargs = self._construct_axes_from_arguments(args, kwargs) + method = kwargs.get('method') + level = kwargs.get('level') + copy = kwargs.get('copy',True) + limit = kwargs.get('limit') + fill_value = kwargs.get('fill_value',np.nan) + + self._consolidate_inplace() + + # check if we are a multi reindex + if self._needs_reindex_multi(axes, method, level): + try: + return self._reindex_multi(axes, copy, fill_value) + except: + pass + + # perform the reindex on the axes + if copy and not com._count_not_none(*axes.values()): + return self.copy() + + return self._reindex_axes(axes, level, limit, method, fill_value, copy) + + def _reindex_axes(self, axes, level, limit, method, fill_value, copy): + """ perform the reinxed for all the axes """ + obj = self + for a in self._AXIS_ORDERS: + labels = axes[a] + if labels is None: continue + + # convert to an index if we are not a multi-selection + if level is None: + labels = _ensure_index(labels) + + axis = self._get_axis_number(a) + new_index, indexer = self._get_axis(a).reindex(labels, level=level, limit=limit) + obj = obj._reindex_with_indexers({ axis : [ labels, indexer ] }, method, fill_value, copy) + + return obj + + def _needs_reindex_multi(self, axes, method, level): + """ check if we do need a multi reindex """ + return (com._count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type + + def _reindex_multi(self, axes, copy, fill_value): + return NotImplemented + + def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, + limit=None, fill_value=np.nan): + """Conform input object to new index with optional filling logic, placing + NA/NaN in locations having no value in the previous index. A new object + is produced unless the new index is equivalent to the current one and + copy=False + + Parameters + ---------- + index : array-like, optional + New labels / index to conform to. Preferably an Index object to + avoid duplicating data + axis : allowed axis for the input + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed DataFrame + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + copy : boolean, default True + Return a new object, even if the passed indexes are the same + level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + limit : int, default None + Maximum size gap to forward or backward fill + + Examples + -------- + >>> df.reindex_axis(['A', 'B', 'C'], axis=1) + + See also + -------- + DataFrame.reindex, DataFrame.reindex_like + + Returns + ------- + reindexed : same type as calling instance + """ + self._consolidate_inplace() + + axis_name = self._get_axis_name(axis) + axis_values = self._get_axis(axis_name) + new_index, indexer = axis_values.reindex(labels, method, level, + limit=limit) + return self._reindex_with_indexers({ axis : [ new_index, indexer ] }, method, fill_value, copy) + + def _reindex_with_indexers(self, reindexers, method=None, fill_value=np.nan, copy=False): + + # reindex doing multiple operations on different axes if indiciated + new_data = self._data + for axis in sorted(reindexers.keys()): + index, indexer = reindexers[axis] + baxis = self._get_block_manager_axis(axis) + + # reindex the axis + if method is not None: + new_data = new_data.reindex_axis(index, method=method, axis=baxis, + fill_value=fill_value, copy=copy) + + elif indexer is not None: + # TODO: speed up on homogeneous DataFrame objects + indexer = com._ensure_int64(indexer) + new_data = new_data.reindex_indexer(index, indexer, axis=baxis, + fill_value=fill_value) + + elif baxis == 0 and index is not None and index is not new_data.axes[baxis]: + new_data = new_data.reindex_items(index, copy=copy, + fill_value=fill_value) + + elif baxis > 0 and index is not None and index is not new_data.axes[baxis]: + new_data = new_data.copy(deep=copy) + new_data.set_axis(baxis,index) + + if copy and new_data is self._data: + new_data = new_data.copy() + + return self._constructor(new_data) + + def _reindex_axis(self, new_index, fill_method, axis, copy): + new_data = self._data.reindex_axis(new_index, axis=axis, + method=fill_method, copy=copy) + + if new_data is self._data and not copy: + return self + else: + return self._constructor(new_data) + + def filter(self, items=None, like=None, regex=None, axis=None): + """ + Restrict the info axis to set of items or wildcard + + Parameters + ---------- + items : list-like + List of info axis to restrict to (must not all be present) + like : string + Keep info axis where "arg in col == True" + regex : string (regular expression) + Keep info axis with re.search(regex, col) == True + + Notes + ----- + Arguments are mutually exclusive, but this is not checked for + + Returns + ------- + same type as input object with filtered info axis + """ + import re + + if axis is None: + axis = self._info_axis_name + axis_name = self._get_axis_name(axis) + axis_values = self._get_axis(axis_name) + + if items is not None: + return self.reindex(**{ axis_name : [r for r in items if r in axis_values ] }) + elif like: + matchf = lambda x: (like in x if isinstance(x, basestring) + else like in str(x)) + return self.select(matchf, axis=axis_name) + elif regex: + matcher = re.compile(regex) + return self.select(lambda x: matcher.search(x) is not None, axis=axis_name) + else: + raise ValueError('items was None!') + + #---------------------------------------------------------------------- + # Attribute access + + def __getattr__(self, name): + """After regular attribute access, try looking up the name of a the info + This allows simpler access to columns for interactive use.""" + if name in self._info_axis: + return self[name] + raise AttributeError("'%s' object has no attribute '%s'" % + (type(self).__name__, name)) + + def __setattr__(self, name, value): + """After regular attribute access, try looking up the name of the info + This allows simpler access to columns for interactive use.""" + if name in _internal_names_set: + object.__setattr__(self, name, value) + else: + try: + existing = getattr(self, name) + if isinstance(existing, Index): + object.__setattr__(self, name, value) + elif name in self._info_axis: + self[name] = value + else: + object.__setattr__(self, name, value) + except (AttributeError, TypeError): + object.__setattr__(self, name, value) + + #---------------------------------------------------------------------- + # Getting and setting elements + + #---------------------------------------------------------------------- + # Consolidation of internals + + def _consolidate_inplace(self): + self._clear_item_cache() + self._data = self._data.consolidate() + + def consolidate(self, inplace=False): + """ + Compute NDFrame with "consolidated" internals (data of each dtype + grouped together in a single ndarray). Mainly an internal API function, + but available here to the savvy user + + Parameters + ---------- + inplace : boolean, default False + If False return new object, otherwise modify existing object + + Returns + ------- + consolidated : type of caller + """ + if inplace: + self._consolidate_inplace() + else: + cons_data = self._data.consolidate() + if cons_data is self._data: + cons_data = cons_data.copy() + return self._constructor(cons_data) + + @property + def _is_mixed_type(self): + return self._data.is_mixed_type + + @property + def _is_numeric_mixed_type(self): + return self._data.is_numeric_mixed_type + + #---------------------------------------------------------------------- + # Internal Interface Methods + + def as_matrix(self, columns=None): + """ + Convert the frame to its Numpy-array matrix representation. Columns + are presented in sorted order unless a specific list of columns is + provided. + + NOTE: the dtype will be a lower-common-denominator dtype (implicit upcasting) + that is to say if the dtypes (even of numeric types) are mixed, the one that accomodates all will be chosen + use this with care if you are not dealing with the blocks + + e.g. if the dtypes are float16,float32 -> float32 + float16,float32,float64 -> float64 + int32,uint8 -> int32 + + Parameters + ---------- + columns : array-like + Specific column order + + Returns + ------- + values : ndarray + If the DataFrame is heterogeneous and contains booleans or objects, + the result will be of dtype=object + """ + self._consolidate_inplace() + if self._AXIS_REVERSED: + return self._data.as_matrix(columns).T + return self._data.as_matrix(columns) + + @property + def values(self): + return self.as_matrix() + + @property + def _get_values(self): + # compat + return self.as_matrix() + + def get_values(self): + """ same as values (but handles sparseness conversions) """ + return self.as_matrix() + + def get_dtype_counts(self): + """ return the counts of dtypes in this frame """ + from pandas import Series + return Series(self._data.get_dtype_counts()) + + def get_ftype_counts(self): + """ return the counts of ftypes in this frame """ + return Series(self._data.get_ftype_counts()) + + def as_blocks(self, columns=None): + """ + Convert the frame to a dict of dtype -> Constructor Types that each has a homogeneous dtype. + are presented in sorted order unless a specific list of columns is + provided. + + NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) + + Parameters + ---------- + columns : array-like + Specific column order + + Returns + ------- + values : a list of Object + """ + self._consolidate_inplace() + + bd = dict() + for b in self._data.blocks: + b = b.reindex_items_from(columns or b.items) + bd[str(b.dtype)] = self._constructor(BlockManager([ b ], [ b.items, self.index ])) + return bd + + @property + def blocks(self): + return self.as_blocks() + + def astype(self, dtype, copy = True, raise_on_error = True): + """ + Cast object to input numpy.dtype + Return a copy when copy = True (be really careful with this!) + + Parameters + ---------- + dtype : numpy.dtype or Python type + raise_on_error : raise on invalid input + + Returns + ------- + casted : type of caller + """ + + mgr = self._data.astype(dtype, copy = copy, raise_on_error = raise_on_error) + return self._constructor(mgr) + + def copy(self, deep=True): + """ + Make a copy of this object + + Parameters + ---------- + deep : boolean, default True + Make a deep copy, i.e. also copy data + + Returns + ------- + copy : type of caller + """ + data = self._data + if deep: + data = data.copy() + return self._constructor(data) + + def convert_objects(self, convert_dates=True, convert_numeric=False): + """ + Attempt to infer better dtype for object columns + Always returns a copy (even if no object columns) + + Parameters + ---------- + convert_dates : if True, attempt to soft convert_dates, if 'coerce', force conversion (and non-convertibles get NaT) + convert_numeric : if True attempt to coerce to numerbers (including strings), non-convertibles get NaN + + Returns + ------- + converted : asm as input object + """ + return self._constructor(self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric)) + + #---------------------------------------------------------------------- + # Filling NA's + + def fillna(self, value=None, method=None, axis=0, inplace=False, + limit=None, downcast=None): + """ + Fill NA/NaN values using the specified method + + Parameters + ---------- + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + value : scalar or dict + Value to use to fill holes (e.g. 0), alternately a dict of values + specifying which value to use for each column (columns not in the + dict will not be filled) + axis : a valid axis for this object + inplace : boolean, default False + If True, fill the DataFrame in place. Note: this will modify any + other views on this DataFrame, like if you took a no-copy slice of + an existing DataFrame, for example a column in a DataFrame. Returns + a reference to the filled object, which is self if inplace=True + limit : int, default None + Maximum size gap to forward or backward fill + downcast : dict, default is None, a dict of item->dtype of what to + downcast if possible + + See also + -------- + reindex, asfreq + + Returns + ------- + filled : DataFrame + """ + self._consolidate_inplace() + + axis = self._get_axis_number(axis) + if axis > self._AXIS_LEN: + raise Exception("axis [%s] is not supported for this type [%s]" % (axis,type(self))) + + if value is None: + if method is None: + raise ValueError('must specify a fill method or value') + if self._is_mixed_type and axis == 1: + if inplace: + raise NotImplementedError() + return self.T.fillna(method=method, limit=limit).T + + method = com._clean_fill_method(method) + new_data = self._data.interpolate(method = method, + axis = axis, + limit = limit, + inplace = inplace, + coerce = True) + else: + if method is not None: + raise ValueError('cannot specify both a fill method and value') + # Float type values + for a in self._AXIS_ORDERS: + if len(self._get_axis(a)) == 0: + return self + + from pandas import Series + if isinstance(value, (dict, Series)): + if axis == 1: + raise NotImplementedError('Currently only can fill ' + 'with dict/Series column ' + 'by column') + + result = self if inplace else self.copy() + for k, v in value.iteritems(): + if k not in result: + continue + result[k].fillna(v, inplace=True, downcast=downcast) + return result + else: + new_data = self._data.fillna(value, inplace=inplace, downcast=downcast) + + if inplace: + self._data = new_data + else: + return self._constructor(new_data) + + def ffill(self, axis=0, inplace=False, limit=None): + return self.fillna(method='ffill', axis=axis, inplace=inplace, + limit=limit) + + def bfill(self, axis=0, inplace=False, limit=None): + return self.fillna(method='bfill', axis=axis, inplace=inplace, + limit=limit) + + def replace(self, to_replace, value=None, method='pad', axis=0, + inplace=False, limit=None): + """ + Replace values given in 'to_replace' with 'value' or using 'method' + + Parameters + ---------- + value : scalar or dict, default None + Value to use to fill holes (e.g. 0), alternately a dict of values + specifying which value to use for each column (columns not in the + dict will not be filled) + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + axis : a valid axis for this object + inplace : boolean, default False + If True, fill the DataFrame in place. Note: this will modify any + other views on this DataFrame, like if you took a no-copy slice of + an existing DataFrame, for example a column in a DataFrame. Returns + a reference to the filled object, which is self if inplace=True + limit : int, default None + Maximum size gap to forward or backward fill + + See also + -------- + reindex, asfreq + + Returns + ------- + filled : DataFrame + """ + self._consolidate_inplace() + + axis = self._get_axis_number(axis) + if axis > self._AXIS_LEN: + raise Exception("axis [%s] is not supported for this type [%s]" % (axis,type(self))) + + if value is None: + return self._interpolate(to_replace, method, axis, inplace, limit) + else: + for a in self._AXIS_ORDERS: + if len(self._get_axis(a)) == 0: + return self + + from pandas import Series + new_data = self._data + if isinstance(to_replace, (dict, Series)): + if isinstance(value, (dict, Series)): # {'A' : NA} -> {'A' : 0} + new_data = self._data + for c, src in to_replace.iteritems(): + if c in value and c in self: + new_data = new_data.replace(src, value[c], + filter=[ c ], + inplace=inplace) + + elif not isinstance(value, (list, np.ndarray)): + new_data = self._data + for k, src in to_replace.iteritems(): + if k in self: + new_data = new_data.replace(src, value, + filter = [ k ], + inplace=inplace) + else: + raise ValueError('Fill value must be scalar or dict or Series') + + elif isinstance(to_replace, (list, np.ndarray)): + # [NA, ''] -> [0, 'missing'] + if isinstance(value, (list, np.ndarray)): + if len(to_replace) != len(value): + raise ValueError('Replacement lists must match ' + 'in length. Expecting %d got %d ' % + (len(to_replace), len(value))) + + new_data = self._data.replace_list(to_replace, value, + inplace=inplace) + + else: # [NA, ''] -> 0 + new_data = self._data.replace(to_replace, value, + inplace=inplace) + + else: + + # dest iterable dict-like + if isinstance(value, (dict, Series)): # NA -> {'A' : 0, 'B' : -1} + + new_data = self._data + for k, v in value.iteritems(): + if k in self: + new_data = new_data.replace(to_replace, v, + filter=[ k ], + inplace=inplace) + + elif not isinstance(value, (list, np.ndarray)): # NA -> 0 + new_data = self._data.replace(to_replace, value, + inplace=inplace) + else: + raise ValueError('Invalid to_replace type: %s' % + type(to_replace)) # pragma: no cover + + + if inplace: + self._data = new_data + else: + return self._constructor(new_data) + + def _interpolate(self, to_replace, method, axis, inplace, limit): + if self._is_mixed_type and axis == 1: + return self.T.replace(to_replace, method=method, limit=limit).T + + method = com._clean_fill_method(method) + + from pandas import Series + if isinstance(to_replace, (dict, Series)): + if axis == 1: + return self.T.replace(to_replace, method=method, + limit=limit).T + + rs = self if inplace else self.copy() + for k, v in to_replace.iteritems(): + if k in rs: + rs[k].replace(v, method=method, limit=limit, + inplace=True) + return rs if not inplace else None + + else: - setattr(cls,name,property(_indexer)) + new_data = self._data.interpolate(method = method, + axis = axis, + limit = limit, + inplace = inplace, + missing = to_replace, + coerce = False) + + if inplace: + self._data = new_data + else: + return self._constructor(new_data) + + #---------------------------------------------------------------------- + # Action Methods def abs(self): """ @@ -86,25 +1454,9 @@ def abs(self): ------- abs: type of caller """ - return np.abs(self) - - def get(self, key, default=None): - """ - Get item from object for given key (DataFrame column, Panel slice, - etc.). Returns default value if not found - - Parameters - ---------- - key : object - - Returns - ------- - value : type of items contained in object - """ - try: - return self[key] - except KeyError: - return default + obj = np.abs(self) + obj = com._possibly_cast_to_timedelta(obj, coerce=False) + return obj def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True): @@ -320,379 +1672,301 @@ def last(self, offset): start = self.index.searchsorted(start_date, side='right') return self.ix[start:] - def select(self, crit, axis=0): - """ - Return data corresponding to axis labels matching criteria - - Parameters - ---------- - crit : function - To be called on each index (label). Should return True or False - axis : int - - Returns - ------- - selection : type of caller - """ - axis_name = self._get_axis_name(axis) - axis = self._get_axis(axis) - - if len(axis) > 0: - new_axis = axis[np.asarray([bool(crit(label)) for label in axis])] - else: - new_axis = axis - - return self.reindex(**{axis_name: new_axis}) - - def drop(self, labels, axis=0, level=None): - """ - Return new object with labels in requested axis removed - - Parameters - ---------- - labels : array-like - axis : int - level : int or name, default None - For MultiIndex - - Returns - ------- - dropped : type of caller - """ - axis_name = self._get_axis_name(axis) - axis, axis_ = self._get_axis(axis), axis - - if axis.is_unique: - if level is not None: - if not isinstance(axis, MultiIndex): - raise AssertionError('axis must be a MultiIndex') - new_axis = axis.drop(labels, level=level) - else: - new_axis = axis.drop(labels) - dropped = self.reindex(**{axis_name: new_axis}) - try: - dropped.axes[axis_].names = axis.names - except AttributeError: - pass - return dropped - - else: - if level is not None: - if not isinstance(axis, MultiIndex): - raise AssertionError('axis must be a MultiIndex') - indexer = -lib.ismember(axis.get_level_values(level), - set(labels)) - else: - indexer = -axis.isin(labels) - - slicer = [slice(None)] * self.ndim - slicer[self._get_axis_number(axis_name)] = indexer - - return self.ix[tuple(slicer)] - - def sort_index(self, axis=0, ascending=True): - """ - Sort object by labels (along an axis) - - Parameters - ---------- - axis : {0, 1} - Sort index/rows versus columns - ascending : boolean, default True - Sort ascending vs. descending - - Returns - ------- - sorted_obj : type of caller - """ - axis = self._get_axis_number(axis) - axis_name = self._get_axis_name(axis) - labels = self._get_axis(axis) - - sort_index = labels.argsort() - if not ascending: - sort_index = sort_index[::-1] - - new_axis = labels.take(sort_index) - return self.reindex(**{axis_name: new_axis}) - - def reindex(self, *args, **kwds): - raise NotImplementedError - - def tshift(self, periods=1, freq=None, **kwds): - """ - Shift the time index, using the index's frequency if available - - Parameters - ---------- - periods : int - Number of periods to move, can be positive or negative - freq : DateOffset, timedelta, or time rule string, default None - Increment to use from datetools module or time rule (e.g. 'EOM') - - Notes - ----- - If freq is not specified then tries to use the freq or inferred_freq - attributes of the index. If neither of those attributes exist, a - ValueError is thrown - - Returns - ------- - shifted : Series - """ - if freq is None: - freq = getattr(self.index, 'freq', None) - - if freq is None: - freq = getattr(self.index, 'inferred_freq', None) - - if freq is None: - msg = 'Freq was not given and was not set in the index' - raise ValueError(msg) - - return self.shift(periods, freq, **kwds) + def to_hdf(self, path_or_buf, key, **kwargs): + """ activate the HDFStore """ + from pandas.io import pytables + return pytables.to_hdf(path_or_buf, key, self, **kwargs) - def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, - **kwds): + def align(self, other, join='outer', axis=None, level=None, copy=True, + fill_value=np.nan, method=None, limit=None, fill_axis=0): """ - Percent change over given number of periods + Align two object on their axes with the + specified join method for each axis Index Parameters ---------- - periods : int, default 1 - Periods to shift for forming percent change - fill_method : str, default 'pad' - How to handle NAs before computing percent changes + other : DataFrame or Series + join : {'outer', 'inner', 'left', 'right'}, default 'outer' + axis : allowed axis of the other object, default None + Align on index (0), columns (1), or both (None) + level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + copy : boolean, default True + Always returns new objects. If copy=False and no reindexing is + required then original objects are returned. + fill_value : scalar, default np.NaN + Value to use for missing values. Defaults to NaN, but can be any + "compatible" value + method : str, default None limit : int, default None - The number of consecutive NAs to fill before stopping - freq : DateOffset, timedelta, or offset alias string, optional - Increment to use from time series API (e.g. 'M' or BDay()) + fill_axis : {0, 1}, default 0 + Filling axis, method and limit Returns ------- - chg : Series or DataFrame + (left, right) : (type of input, type of other) + Aligned objects """ - if fill_method is None: - data = self + from pandas import DataFrame,Series + + if isinstance(other, DataFrame): + return self._align_frame(other, join=join, axis=axis, level=level, + copy=copy, fill_value=fill_value, + method=method, limit=limit, + fill_axis=fill_axis) + elif isinstance(other, Series): + return self._align_series(other, join=join, axis=axis, level=level, + copy=copy, fill_value=fill_value, + method=method, limit=limit, + fill_axis=fill_axis) + else: # pragma: no cover + raise TypeError('unsupported type: %s' % type(other)) + + def _align_frame(self, other, join='outer', axis=None, level=None, + copy=True, fill_value=np.nan, method=None, limit=None, + fill_axis=0): + # defaults + join_index, join_columns = None, None + ilidx, iridx = None, None + clidx, cridx = None, None + + if axis is None or axis == 0: + if not self.index.equals(other.index): + join_index, ilidx, iridx = \ + self.index.join(other.index, how=join, level=level, + return_indexers=True) + + if axis is None or axis == 1: + if not self.columns.equals(other.columns): + join_columns, clidx, cridx = \ + self.columns.join(other.columns, how=join, level=level, + return_indexers=True) + + left = self._reindex_with_indexers({ 0 : [ join_index, ilidx ], + 1 : [ join_columns, clidx ] }, + copy=copy, fill_value=fill_value) + right = other._reindex_with_indexers({ 0 : [ join_index, iridx ], + 1 : [ join_columns, cridx ] }, + copy=copy, fill_value=fill_value) + + + if method is not None: + left = left.fillna(axis=fill_axis, method=method, limit=limit) + right = right.fillna(axis=fill_axis, method=method, limit=limit) + + return left, right + + def _align_series(self, other, join='outer', axis=None, level=None, + copy=True, fill_value=None, method=None, limit=None, + fill_axis=0): + from pandas import DataFrame + + fdata = self._data + if axis == 0: + join_index = self.index + lidx, ridx = None, None + if not self.index.equals(other.index): + join_index, lidx, ridx = self.index.join(other.index, how=join, + return_indexers=True) + + if lidx is not None: + fdata = fdata.reindex_indexer(join_index, lidx, axis=1) + elif axis == 1: + join_index = self.columns + lidx, ridx = None, None + if not self.columns.equals(other.index): + join_index, lidx, ridx = \ + self.columns.join(other.index, how=join, + return_indexers=True) + + if lidx is not None: + fdata = fdata.reindex_indexer(join_index, lidx, axis=0) else: - data = self.fillna(method=fill_method, limit=limit) - rs = data / data.shift(periods=periods, freq=freq, **kwds) - 1 - if freq is None: - mask = com.isnull(self.values) - np.putmask(rs.values, mask, np.nan) - return rs - - def to_hdf(self, path_or_buf, key, **kwargs): - """ activate the HDFStore """ - from pandas.io import pytables - return pytables.to_hdf(path_or_buf, key, self, **kwargs) - -# install the indexerse -for _name, _indexer in indexing.get_indexers_list(): - PandasObject._create_indexer(_name,_indexer) - -class NDFrame(PandasObject): - """ - N-dimensional analogue of DataFrame. Store multi-dimensional in a - size-mutable, labeled data structure - - Parameters - ---------- - data : BlockManager - axes : list - copy : boolean, default False - """ - # kludge - _default_stat_axis = 0 + raise ValueError('Must specify axis=0 or 1') - def __init__(self, data, axes=None, copy=False, dtype=None): - if dtype is not None: - data = data.astype(dtype) - elif copy: - data = data.copy() + if copy and fdata is self._data: + fdata = fdata.copy() - if axes is not None: - for i, ax in enumerate(axes): - data = data.reindex_axis(ax, axis=i) + left_result = DataFrame(fdata) + right_result = other if ridx is None else other.reindex(join_index) - object.__setattr__(self, '_data', data) - object.__setattr__(self, '_item_cache', {}) + fill_na = notnull(fill_value) or (method is not None) + if fill_na: + return (left_result.fillna(fill_value, method=method, limit=limit, + axis=fill_axis), + right_result.fillna(fill_value, method=method, + limit=limit)) + else: + return left_result, right_result - def astype(self, dtype, copy = True, raise_on_error = True): + def where(self, cond, other=np.nan, inplace=False, try_cast=False, raise_on_error=True): """ - Cast object to input numpy.dtype - Return a copy when copy = True (be really careful with this!) + Return an object of same shape as self and whose corresponding + entries are from self where cond is True and otherwise are from other. Parameters ---------- - dtype : numpy.dtype or Python type - raise_on_error : raise on invalid input + cond : boolean DataFrame or array + other : scalar or DataFrame + inplace : boolean, default False + Whether to perform the operation in place on the data + try_cast : boolean, default False + try to cast the result back to the input type (if possible), + raise_on_error : boolean, default True + Whether to raise on invalid data types (e.g. trying to where on + strings) Returns ------- - casted : type of caller + wh : DataFrame """ + if isinstance(cond, NDFrame): + cond = cond.reindex(**self._construct_axes_dict()) + else: + if not hasattr(cond, 'shape'): + raise ValueError('where requires an ndarray like object for its ' + 'condition') + if cond.shape != self.shape: + raise ValueError('Array conditional must be same shape as self') + cond = self._constructor(cond, **self._construct_axes_dict()) - mgr = self._data.astype(dtype, copy = copy, raise_on_error = raise_on_error) - return self._constructor(mgr) - - @property - def _constructor(self): - return NDFrame - - @property - def axes(self): - return self._data.axes + if inplace: + cond = -(cond.fillna(True).astype(bool)) + else: + cond = cond.fillna(False).astype(bool) - def __repr__(self): - return 'NDFrame' + # try to align + try_quick = True + if hasattr(other, 'align'): - @property - def values(self): - return self._data.as_matrix() + # align with me + if other.ndim <= self.ndim: - @property - def ndim(self): - return self._data.ndim + _, other = self.align(other, join='left', fill_value=np.nan) - def _set_axis(self, axis, labels): - self._data.set_axis(axis, labels) - self._clear_item_cache() + # slice me out of the other + else: + raise NotImplemented + + elif is_list_like(other): + + if self.ndim == 1: + + # try to set the same dtype as ourselves + new_other = np.array(other,dtype=self.dtype) + if not (new_other == np.array(other)).all(): + other = np.array(other) + + # we can't use our existing dtype + # because of incompatibilities + try_quick = False + else: + other = new_other + else: - def __getitem__(self, item): - return self._get_item_cache(item) + other = np.array(other) - def _get_item_cache(self, item): - cache = self._item_cache - try: - return cache[item] - except Exception: - values = self._data.get(item) - res = self._box_item_values(item, values) - cache[item] = res - return res + if isinstance(other,np.ndarray): - def _box_item_values(self, key, values): - raise NotImplementedError + if other.shape != self.shape: - def _clear_item_cache(self): - self._item_cache.clear() + if self.ndim == 1: - def _set_item(self, key, value): - self._data.set(key, value) - self._clear_item_cache() + icond = cond.values - def __delitem__(self, key): - """ - Delete item - """ - deleted = False + # GH 2745 + # treat like a scalar + if len(other) == 1: + other = np.array(other[0]*len(self)) - maybe_shortcut = False - if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex): - try: - maybe_shortcut = key not in self.columns._engine - except TypeError: - pass + # GH 3235 + # match True cond to other + elif len(cond[icond]) == len(other): - if maybe_shortcut: - # Allow shorthand to delete all columns whose first len(key) - # elements match key: - if not isinstance(key, tuple): - key = (key,) - for col in self.columns: - if isinstance(col, tuple) and col[:len(key)] == key: - del self[col] - deleted = True - if not deleted: - # If the above loop ran and didn't delete anything because - # there was no match, this call should raise the appropriate - # exception: - self._data.delete(key) + # try to not change dtype at first (if try_quick) + if try_quick: - try: - del self._item_cache[key] - except KeyError: - pass + try: + new_other = _values_from_object(self).copy() + new_other[icond] = other + other = new_other + except: + try_quick = False - def get_dtype_counts(self): - """ return the counts of dtypes in this frame """ - from pandas import Series - return Series(self._data.get_dtype_counts()) + # let's create a new (if we failed at the above + # or not try_quick + if not try_quick: + dtype, fill_value = _maybe_promote(other.dtype) + new_other = np.empty(len(icond),dtype=dtype) + new_other.fill(fill_value) + com._maybe_upcast_putmask(new_other, icond, other) + other = new_other - def pop(self, item): - """ - Return item and drop from frame. Raise KeyError if not found. - """ - result = self[item] - del self[item] - return result + else: + raise ValueError('Length of replacements must equal series length') - def squeeze(self): - """ squeeze length 1 dimensions """ - try: - return self.ix[tuple([ slice(None) if len(a) > 1 else a[0] for a in self.axes ])] - except: - return self + else: + raise ValueError('other must be the same shape as self ' + 'when an ndarray') - def _expand_axes(self, key): - new_axes = [] - for k, ax in zip(key, self.axes): - if k not in ax: - if type(k) != ax.dtype.type: - ax = ax.astype('O') - new_axes.append(ax.insert(len(ax), k)) + # we are the same shape, so create an actual object for alignment else: - new_axes.append(ax) + other = self._constructor(other, **self._construct_axes_dict()) - return new_axes + if inplace: + # we may have different type blocks come out of putmask, so reconstruct the block manager + self._data = self._data.putmask(cond,other,inplace=True) - #---------------------------------------------------------------------- - # Consolidation of internals + else: + new_data = self._data.where(other, cond, raise_on_error=raise_on_error, try_cast=try_cast) - def _consolidate_inplace(self): - self._clear_item_cache() - self._data = self._data.consolidate() + return self._constructor(new_data) - def consolidate(self, inplace=False): + def mask(self, cond): """ - Compute NDFrame with "consolidated" internals (data of each dtype - grouped together in a single ndarray). Mainly an internal API function, - but available here to the savvy user + Returns copy of self whose values are replaced with nan if the + inverted condition is True Parameters ---------- - inplace : boolean, default False - If False return new object, otherwise modify existing object + cond: boolean object or array Returns ------- - consolidated : type of caller + wh: same as input """ - if inplace: - self._consolidate_inplace() - else: - cons_data = self._data.consolidate() - if cons_data is self._data: - cons_data = cons_data.copy() - return self._constructor(cons_data) + return self.where(~cond, np.nan) - @property - def _is_mixed_type(self): - return self._data.is_mixed_type - - @property - def _is_numeric_mixed_type(self): - return self._data.is_numeric_mixed_type + def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, + **kwds): + """ + Percent change over given number of periods - def _reindex_axis(self, new_index, fill_method, axis, copy): - new_data = self._data.reindex_axis(new_index, axis=axis, - method=fill_method, copy=copy) + Parameters + ---------- + periods : int, default 1 + Periods to shift for forming percent change + fill_method : str, default 'pad' + How to handle NAs before computing percent changes + limit : int, default None + The number of consecutive NAs to fill before stopping + freq : DateOffset, timedelta, or offset alias string, optional + Increment to use from time series API (e.g. 'M' or BDay()) - if new_data is self._data and not copy: - return self + Returns + ------- + chg : Series or DataFrame + """ + if fill_method is None: + data = self else: - return self._constructor(new_data) + data = self.fillna(method=fill_method, limit=limit) + rs = data / data.shift(periods=periods, freq=freq, **kwds) - 1 + if freq is None: + mask = com.isnull(_values_from_object(self)) + np.putmask(rs.values, mask, np.nan) + return rs def cumsum(self, axis=None, skipna=True): """ @@ -711,13 +1985,13 @@ def cumsum(self, axis=None, skipna=True): y : DataFrame """ if axis is None: - axis = self._default_stat_axis + axis = self._stat_axis_number else: axis = self._get_axis_number(axis) - y = self.values.copy() + y = _values_from_object(self).copy() if not issubclass(y.dtype.type, np.integer): - mask = np.isnan(self.values) + mask = np.isnan(_values_from_object(self)) if skipna: np.putmask(y, mask, 0.) @@ -730,9 +2004,6 @@ def cumsum(self, axis=None, skipna=True): result = y.cumsum(axis) return self._wrap_array(result, self.axes, copy=False) - def _wrap_array(self, array, axes, copy=False): - raise NotImplementedError - def cumprod(self, axis=None, skipna=True): """ Return cumulative product over requested axis as DataFrame @@ -750,13 +2021,13 @@ def cumprod(self, axis=None, skipna=True): y : DataFrame """ if axis is None: - axis = self._default_stat_axis + axis = self._stat_axis_number else: axis = self._get_axis_number(axis) - y = self.values.copy() + y = _values_from_object(self).copy() if not issubclass(y.dtype.type, np.integer): - mask = np.isnan(self.values) + mask = np.isnan(_values_from_object(self)) if skipna: np.putmask(y, mask, 1.) @@ -785,13 +2056,13 @@ def cummax(self, axis=None, skipna=True): y : DataFrame """ if axis is None: - axis = self._default_stat_axis + axis = self._stat_axis_number else: axis = self._get_axis_number(axis) - y = self.values.copy() + y = _values_from_object(self).copy() if not issubclass(y.dtype.type, np.integer): - mask = np.isnan(self.values) + mask = np.isnan(_values_from_object(self)) if skipna: np.putmask(y, mask, -np.inf) @@ -821,13 +2092,13 @@ def cummin(self, axis=None, skipna=True): y : DataFrame """ if axis is None: - axis = self._default_stat_axis + axis = self._stat_axis_number else: axis = self._get_axis_number(axis) - y = self.values.copy() + y = _values_from_object(self).copy() if not issubclass(y.dtype.type, np.integer): - mask = np.isnan(self.values) + mask = np.isnan(_values_from_object(self)) if skipna: np.putmask(y, mask, np.inf) @@ -840,136 +2111,72 @@ def cummin(self, axis=None, skipna=True): result = np.minimum.accumulate(y, axis) return self._wrap_array(result, self.axes, copy=False) - def copy(self, deep=True): - """ - Make a copy of this object - - Parameters - ---------- - deep : boolean, default True - Make a deep copy, i.e. also copy data - - Returns - ------- - copy : type of caller - """ - data = self._data - if deep: - data = data.copy() - return self._constructor(data) - - def swaplevel(self, i, j, axis=0): + def tshift(self, periods=1, freq=None, **kwds): """ - Swap levels i and j in a MultiIndex on a particular axis + Shift the time index, using the index's frequency if available Parameters ---------- - i, j : int, string (can be mixed) - Level of index to be swapped. Can pass level name as string. - - Returns - ------- - swapped : type of caller (new object) - """ - axis = self._get_axis_number(axis) - result = self.copy() - labels = result._data.axes[axis] - result._data.set_axis(axis, labels.swaplevel(i, j)) - return result - - def add_prefix(self, prefix): - """ - Concatenate prefix string with panel items names. + periods : int + Number of periods to move, can be positive or negative + freq : DateOffset, timedelta, or time rule string, default None + Increment to use from datetools module or time rule (e.g. 'EOM') - Parameters - ---------- - prefix : string + Notes + ----- + If freq is not specified then tries to use the freq or inferred_freq + attributes of the index. If neither of those attributes exist, a + ValueError is thrown Returns ------- - with_prefix : type of caller + shifted : Series """ - new_data = self._data.add_prefix(prefix) - return self._constructor(new_data) + if freq is None: + freq = getattr(self.index, 'freq', None) - def add_suffix(self, suffix): - """ - Concatenate suffix string with panel items names + if freq is None: + freq = getattr(self.index, 'inferred_freq', None) - Parameters - ---------- - suffix : string + if freq is None: + msg = 'Freq was not given and was not set in the index' + raise ValueError(msg) - Returns - ------- - with_suffix : type of caller - """ - new_data = self._data.add_suffix(suffix) - return self._constructor(new_data) + return self.shift(periods, freq, **kwds) - def rename_axis(self, mapper, axis=0, copy=True): - """ - Alter index and / or columns using input function or functions. - Function / dict values must be unique (1-to-1). Labels not contained in - a dict / Series will be left as-is. + def truncate(self, before=None, after=None, copy=True): + """Function truncate a sorted DataFrame / Series before and/or after + some particular dates. Parameters ---------- - mapper : dict-like or function, optional - axis : int, default 0 - copy : boolean, default True - Also copy underlying data - - See also - -------- - DataFrame.rename + before : date + Truncate before date + after : date + Truncate after date Returns ------- - renamed : type of caller - """ - # should move this at some point - from pandas.core.series import _get_rename_function - - mapper_f = _get_rename_function(mapper) - - axis = self._get_axis_number(axis) - if axis == 0: - new_data = self._data.rename_items(mapper_f, copydata=copy) - else: - new_data = self._data.rename_axis(mapper_f, axis=axis) - if copy: - new_data = new_data.copy() - - return self._constructor(new_data) - - def take(self, indices, axis=0, convert=True): + truncated : type of caller """ - Analogous to ndarray.take + from pandas.tseries.tools import to_datetime + before = to_datetime(before) + after = to_datetime(after) + + if before is not None and after is not None: + if before > after: + raise AssertionError('Truncate: %s must be after %s' % + (before, after)) - Parameters - ---------- - indices : list / array of ints - axis : int, default 0 - convert : translate neg to pos indices (default) + result = self.ix[before:after] - Returns - ------- - taken : type of caller - """ + if isinstance(self.index, MultiIndex): + result.index = self.index.truncate(before, after) - # check/convert indicies here - if convert: - axis = self._get_axis_number(axis) - indices = _maybe_convert_indices(indices, len(self._get_axis(axis))) + if copy: + result = result.copy() - if axis == 0: - labels = self._get_axis(axis) - new_items = labels.take(indices) - new_data = self._data.reindex_axis(new_items, axis=0) - else: - new_data = self._data.take(indices, axis=axis, verify=False) - return self._constructor(new_data) + return result def tz_convert(self, tz, axis=0, copy=True): """ @@ -1044,40 +2251,7 @@ def tz_localize(self, tz, axis=0, copy=True): return new_obj -# Good for either Series or DataFrame - - -def truncate(self, before=None, after=None, copy=True): - """Function truncate a sorted DataFrame / Series before and/or after - some particular dates. - - Parameters - ---------- - before : date - Truncate before date - after : date - Truncate after date - copy : boolean, default True - - Returns - ------- - truncated : type of caller - """ - from pandas.tseries.tools import to_datetime - before = to_datetime(before) - after = to_datetime(after) - - if before is not None and after is not None: - if before > after: - raise AssertionError('Truncate: %s must be after %s' % - (before, after)) - - result = self.ix[before:after] - - if isinstance(self.index, MultiIndex): - result.index = self.index.truncate(before, after) - - if copy: - result = result.copy() +# install the indexerse +for _name, _indexer in indexing.get_indexers_list(): + NDFrame._create_indexer(_name,_indexer) - return result diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index aef44bd91396d..00fc135f6c21a 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -221,7 +221,7 @@ def name(self): @property def _selection_list(self): - if not isinstance(self._selection, (list, tuple, np.ndarray)): + if not isinstance(self._selection, (list, tuple, Series, np.ndarray)): return [self._selection] return self._selection @@ -270,7 +270,7 @@ def get_group(self, name, obj=None): obj = self.obj inds = self.indices[name] - return obj.take(inds, axis=self.axis) + return obj.take(inds, axis=self.axis, convert=False) def __iter__(self): """ @@ -368,7 +368,11 @@ def median(self): except GroupByError: raise except Exception: # pragma: no cover - f = lambda x: x.median(axis=self.axis) + + def f(x): + if isinstance(x, np.ndarray): + x = Series(x) + return x.median(axis=self.axis) return self._python_agg_general(f) def std(self, ddof=1): @@ -895,9 +899,9 @@ def _aggregate_series_fast(self, obj, func): group_index, _, ngroups = self.group_info # avoids object / Series creation overhead - dummy = obj[:0].copy() + dummy = obj._get_values(slice(None,0)).to_dense() indexer = _algos.groupsort_indexer(group_index, ngroups)[0] - obj = obj.take(indexer) + obj = obj.take(indexer, convert=False) group_index = com.take_nd(group_index, indexer, allow_fill=False) grouper = lib.SeriesGrouper(obj, func, group_index, ngroups, dummy) @@ -905,19 +909,18 @@ def _aggregate_series_fast(self, obj, func): return result, counts def _aggregate_series_pure_python(self, obj, func): + group_index, _, ngroups = self.group_info counts = np.zeros(ngroups, dtype=int) result = None - group_index, _, ngroups = self.group_info - splitter = get_splitter(obj, group_index, ngroups, axis=self.axis) for label, group in splitter: res = func(group) if result is None: - if isinstance(res, np.ndarray) or isinstance(res, list): + if isinstance(res, (Series, np.ndarray)) or isinstance(res, list): raise ValueError('Function does not reduce') result = np.empty(ngroups, dtype='O') @@ -1018,13 +1021,13 @@ def get_iterator(self, data, axis=0): start = 0 for edge, label in izip(self.bins, self.binlabels): inds = range(start, edge) - yield label, data.take(inds, axis=axis) + yield label, data.take(inds, axis=axis, convert=False) start = edge n = len(data.axes[axis]) if edge < n: inds = range(edge, n) - yield self.binlabels[-1], data.take(inds, axis=axis) + yield self.binlabels[-1], data.take(inds, axis=axis, convert=False) def apply(self, f, data, axis=0, keep_internal=False): result_keys = [] @@ -1036,6 +1039,7 @@ def apply(self, f, data, axis=0, keep_internal=False): # group might be modified group_axes = _get_axes(group) res = f(group) + if not _is_indexed_like(res, group_axes): mutated = True @@ -1199,7 +1203,7 @@ def __init__(self, index, grouper=None, name=None, level=None, self.name = factor.name # no level passed - if not isinstance(self.grouper, np.ndarray): + if not isinstance(self.grouper, (Series, np.ndarray)): self.grouper = self.index.map(self.grouper) if not (hasattr(self.grouper,"__len__") and \ len(self.grouper) == len(self.index)): @@ -1281,7 +1285,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True): # what are we after, exactly? match_axis_length = len(keys) == len(group_axis) any_callable = any(callable(g) or isinstance(g, dict) for g in keys) - any_arraylike = any(isinstance(g, (list, tuple, np.ndarray)) + any_arraylike = any(isinstance(g, (list, tuple, Series, np.ndarray)) for g in keys) try: @@ -1346,7 +1350,7 @@ def _convert_grouper(axis, grouper): return grouper.values else: return grouper.reindex(axis).values - elif isinstance(grouper, (list, np.ndarray)): + elif isinstance(grouper, (list, Series, np.ndarray)): if len(grouper) != len(axis): raise AssertionError('Grouper and axis must be same length') return grouper @@ -1506,7 +1510,7 @@ def _aggregate_named(self, func, *args, **kwargs): for name, group in self: group.name = name output = func(group, *args, **kwargs) - if isinstance(output, np.ndarray): + if isinstance(output, (Series, np.ndarray)): raise Exception('Must produce aggregated value') result[name] = self._try_cast(output, group) @@ -1747,7 +1751,7 @@ def _aggregate_generic(self, func, *args, **kwargs): obj = self._obj_with_exclusions result = {} - if axis != obj._het_axis: + if axis != obj._info_axis_number: try: for name, data in self: # for name in self.indices: @@ -1777,9 +1781,10 @@ def _aggregate_item_by_item(self, func, *args, **kwargs): cannot_agg = [] for item in obj: try: - colg = SeriesGroupBy(obj[item], selection=item, + data = obj[item] + colg = SeriesGroupBy(data, selection=item, grouper=self.grouper) - result[item] = colg.aggregate(func, *args, **kwargs) + result[item] = self._try_cast(colg.aggregate(func, *args, **kwargs), data) except ValueError: cannot_agg.append(item) continue @@ -1835,7 +1840,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): else: key_index = Index(keys, name=key_names[0]) - if isinstance(values[0], np.ndarray): + if isinstance(values[0], (np.ndarray, Series)): if isinstance(values[0], Series): applied_index = self.obj._get_axis(self.axis) all_indexed_same = _all_indexes_same([x.index for x in values]) @@ -1998,7 +2003,7 @@ def __getitem__(self, key): if self._selection is not None: raise Exception('Column(s) %s already selected' % self._selection) - if isinstance(key, (list, tuple, np.ndarray)) or not self.as_index: + if isinstance(key, (list, tuple, Series, np.ndarray)) or not self.as_index: return DataFrameGroupBy(self.obj, self.grouper, selection=key, grouper=self.grouper, exclusions=self.exclusions, @@ -2228,7 +2233,7 @@ def __iter__(self): yield i, self._chop(sdata, slice(start, end)) def _get_sorted_data(self): - return self.data.take(self.sort_idx, axis=self.axis) + return self.data.take(self.sort_idx, axis=self.axis, convert=False) def _chop(self, sdata, slice_obj): return sdata[slice_obj] @@ -2244,7 +2249,7 @@ class ArraySplitter(DataSplitter): class SeriesSplitter(DataSplitter): def _chop(self, sdata, slice_obj): - return sdata._get_values(slice_obj) + return sdata._get_values(slice_obj).to_dense() class FrameSplitter(DataSplitter): diff --git a/pandas/core/index.py b/pandas/core/index.py index 5ffd211c86d27..83bd168205d79 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -12,7 +12,9 @@ from pandas.util.decorators import cache_readonly from pandas.core.common import isnull +import pandas.core.base as base import pandas.core.common as com +from pandas.core.common import _values_from_object from pandas.util import py3compat from pandas.core.config import get_option @@ -47,7 +49,7 @@ def _shouldbe_timestamp(obj): or tslib.is_timestamp_array(obj)) -class Index(np.ndarray): +class Index(base.PandasObject, np.ndarray): """ Immutable ndarray implementing an ordered, sliceable set. The basic object storing axis labels for all pandas objects @@ -83,7 +85,14 @@ class Index(np.ndarray): _engine_type = _index.ObjectEngine - def __new__(cls, data, dtype=None, copy=False, name=None): + def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False): + + # no class inference! + if fastpath: + subarr = data.view(cls) + subarr.name = name + return subarr + from pandas.tseries.period import PeriodIndex if isinstance(data, np.ndarray): if issubclass(data.dtype.type, np.datetime64): @@ -114,17 +123,17 @@ def __new__(cls, data, dtype=None, copy=False, name=None): else: # other iterable of some kind subarr = com._asarray_tuplesafe(data, dtype=object) - + if dtype is None: inferred = lib.infer_dtype(subarr) if inferred == 'integer': return Int64Index(subarr.astype('i8'), name=name) elif inferred != 'string': if (inferred.startswith('datetime') or - tslib.is_timestamp_array(subarr)): + tslib.is_timestamp_array(subarr)): from pandas.tseries.index import DatetimeIndex return DatetimeIndex(subarr, copy=copy, name=name) - + elif inferred == 'period': return PeriodIndex(subarr, name=name) @@ -142,28 +151,6 @@ def __array_finalize__(self, obj): def _shallow_copy(self): return self.view() - def __str__(self): - """ - Return a string representation for a particular Index - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular Index - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular Index @@ -178,14 +165,6 @@ def __unicode__(self): prepr = com.pprint_thing(data, escape_chars=('\t', '\r', '\n'),quote_strings=True) return '%s(%s, dtype=%s)' % (type(self).__name__, prepr, self.dtype) - def __repr__(self): - """ - Return a string representation for a particular Index - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) - def to_series(self): """ return a series with both index and values equal to the index keys @@ -276,6 +255,9 @@ def _mpl_repr(self): def values(self): return np.asarray(self) + def get_values(self): + return self.values + @property def is_monotonic(self): return self._engine.is_monotonic @@ -383,6 +365,15 @@ def __getitem__(self, key): return Index(result, name=self.name) + def _getitem_slice(self, key): + """ getitem for a bool/sliceable, fallback to standard getitem """ + try: + arr_idx = self.view(np.ndarray) + result = arr_idx[key] + return self.__class__(result, name=self.name, fastpath=True) + except: + return self.__getitem__(key) + def append(self, other): """ Append a collection of Index options together @@ -752,21 +743,23 @@ def get_loc(self, key): ------- loc : int if unique index, possibly slice or mask if not """ - return self._engine.get_loc(key) + return self._engine.get_loc(_values_from_object(key)) def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ + s = _values_from_object(series) + k = _values_from_object(key) try: - return self._engine.get_value(series, key) + return self._engine.get_value(s, k) except KeyError, e1: if len(self) > 0 and self.inferred_type == 'integer': raise try: - return tslib.get_value_box(series, key) + return tslib.get_value_box(s, key) except IndexError: raise except TypeError: @@ -788,7 +781,7 @@ def set_value(self, arr, key, value): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - self._engine.set_value(arr, key, value) + self._engine.set_value(_values_from_object(arr), _values_from_object(key), value) def get_level_values(self, level): """ @@ -1315,7 +1308,13 @@ class Int64Index(Index): _engine_type = _index.Int64Engine - def __new__(cls, data, dtype=None, copy=False, name=None): + def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False): + + if fastpath: + subarr = data.view(cls) + subarr.name = name + return subarr + if not isinstance(data, np.ndarray): if np.isscalar(data): raise ValueError('Index(...) must be called with a collection ' @@ -1325,7 +1324,7 @@ def __new__(cls, data, dtype=None, copy=False, name=None): if not isinstance(data, (list, tuple)): data = list(data) data = np.asarray(data) - + if issubclass(data.dtype.type, basestring): raise TypeError('String dtype not supported, you may need ' 'to explicitly cast to int') @@ -1334,7 +1333,7 @@ def __new__(cls, data, dtype=None, copy=False, name=None): # with a platform int if dtype is None or not issubclass(np.dtype(dtype).type, np.integer): dtype = np.int64 - + subarr = np.array(data, dtype=dtype, copy=copy) else: subarr = np.array(data, dtype=np.int64, copy=copy) @@ -1474,28 +1473,6 @@ def _array_values(self): def dtype(self): return np.dtype('O') - def __str__(self): - """ - Return a string representation for a particular Index - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular Index - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular Index @@ -1519,14 +1496,6 @@ def __unicode__(self): return output % summary - def __repr__(self): - """ - Return a string representation for a particular Index - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) - def __len__(self): return len(self.labels[0]) @@ -1629,8 +1598,10 @@ def get_value(self, series, key): from pandas.core.series import Series # Label-based + s = _values_from_object(series) + k = _values_from_object(key) try: - return self._engine.get_value(series, key) + return self._engine.get_value(s, k) except KeyError, e1: try: # TODO: what if a level contains tuples?? @@ -1643,7 +1614,7 @@ def get_value(self, series, key): pass try: - return _index.get_value_at(series, key) + return _index.get_value_at(s, k) except IndexError: raise except TypeError: @@ -1882,6 +1853,8 @@ def __getitem__(self, key): return result + _getitem_slice = __getitem__ + def take(self, indexer, axis=None): """ Analogous to ndarray.take @@ -2284,7 +2257,7 @@ def get_loc(self, key): if isinstance(key, tuple): if len(key) == self.nlevels: if self.is_unique: - return self._engine.get_loc(key) + return self._engine.get_loc(_values_from_object(key)) else: return slice(*self.slice_locs(key, key)) else: @@ -2350,7 +2323,7 @@ def _drop_levels(indexer, levels): if not any(isinstance(k, slice) for k in key): if len(key) == self.nlevels: if self.is_unique: - return self._engine.get_loc(key), None + return self._engine.get_loc(_values_from_object(key)), None else: indexer = slice(*self.slice_locs(key, key)) return indexer, self[indexer] diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 7562d20363027..cf4dfe43c5400 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -3,6 +3,7 @@ from pandas.core.common import _asarray_tuplesafe from pandas.core.index import Index, MultiIndex, _ensure_index import pandas.core.common as com +from pandas.core.common import _is_bool_indexer, is_series, is_dataframe import pandas.lib as lib import numpy as np @@ -27,6 +28,7 @@ class IndexingError(Exception): class _NDFrameIndexer(object): + _exception = KeyError def __init__(self, obj, name): self.obj = obj @@ -97,7 +99,6 @@ def _convert_tuple(self, key): return tuple(keyidx) def _setitem_with_indexer(self, indexer, value): - from pandas.core.frame import DataFrame, Series # also has the side effect of consolidating in-place @@ -107,17 +108,17 @@ def _setitem_with_indexer(self, indexer, value): if not isinstance(indexer, tuple): indexer = self._tuplify(indexer) - if isinstance(value, Series): + if is_series(value): value = self._align_series(indexer, value) - het_axis = self.obj._het_axis - het_idx = indexer[het_axis] + info_axis = self.obj._info_axis_number + info_idx = indexer[info_axis] - if com.is_integer(het_idx): - het_idx = [het_idx] + if com.is_integer(info_idx): + info_idx = [info_idx] - plane_indexer = indexer[:het_axis] + indexer[het_axis + 1:] - item_labels = self.obj._get_axis(het_axis) + plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:] + item_labels = self.obj._get_axis(info_axis) def setter(item, v): data = self.obj[item] @@ -127,12 +128,12 @@ def setter(item, v): if changed: self.obj[item] = result - labels = item_labels[het_idx] + labels = item_labels[info_idx] if _is_list_like(value): # we have an equal len Frame - if isinstance(value, DataFrame) and value.ndim > 1: + if is_dataframe(value) and value.ndim > 1: for item in labels: @@ -172,10 +173,10 @@ def setter(item, v): if isinstance(indexer, tuple): indexer = _maybe_convert_ix(*indexer) - if isinstance(value, Series): + if is_series(value): value = self._align_series(indexer, value) - if isinstance(value, DataFrame): + elif is_dataframe(value): value = self._align_frame(indexer, value) # 2096 @@ -205,8 +206,7 @@ def _align_series(self, indexer, ser): raise ValueError('Incompatible indexer with Series') def _align_frame(self, indexer, df): - from pandas import DataFrame - is_frame = isinstance(self.obj, DataFrame) + is_frame = is_dataframe(self.obj) if not is_frame: df = df.T if isinstance(indexer, tuple): @@ -291,22 +291,14 @@ def _multi_take_opportunity(self, tup): return True def _multi_take(self, tup): - from pandas.core.frame import DataFrame - from pandas.core.panel import Panel - from pandas.core.panel4d import Panel4D - - if isinstance(self.obj, DataFrame): - index = self._convert_for_reindex(tup[0], axis=0) - columns = self._convert_for_reindex(tup[1], axis=1) - return self.obj.reindex(index=index, columns=columns) - elif isinstance(self.obj, Panel4D): - conv = [self._convert_for_reindex(x, axis=i) - for i, x in enumerate(tup)] - return self.obj.reindex(labels=tup[0], items=tup[1], major=tup[2], minor=tup[3]) - elif isinstance(self.obj, Panel): - conv = [self._convert_for_reindex(x, axis=i) - for i, x in enumerate(tup)] - return self.obj.reindex(items=tup[0], major=tup[1], minor=tup[2]) + """ create the reindex map for our objects, raise the _exception if we can't create the indexer """ + + try: + o = self.obj + d = dict([ (a,self._convert_for_reindex(t, axis=o._get_axis_number(a))) for t, a in zip(tup, o._AXIS_ORDERS) ]) + return o.reindex(**d) + except: + raise self._exception def _convert_for_reindex(self, key, axis=0): labels = self.obj._get_axis(axis) @@ -328,7 +320,6 @@ def _convert_for_reindex(self, key, axis=0): return keyarr def _getitem_lowerdim(self, tup): - from pandas.core.frame import DataFrame ax0 = self.obj._get_axis(0) # a bit kludgy @@ -373,7 +364,7 @@ def _getitem_lowerdim(self, tup): # unfortunately need an odious kludge here because of # DataFrame transposing convention - if (isinstance(section, DataFrame) and i > 0 + if (is_dataframe(section) and i > 0 and len(new_key) == 2): a, b = new_key new_key = b, a @@ -971,19 +962,21 @@ def _check_bool_indexer(ax, key): result = key if _is_series(key) and not key.index.equals(ax): result = result.reindex(ax) - mask = com.isnull(result) + mask = com.isnull(result.values) if mask.any(): raise IndexingError('Unalignable boolean Series key provided') - # com._is_bool_indexer has already checked for nulls in the case of an - # object array key, so no check needed here - result = np.asarray(result, dtype=bool) - return result + result = result.astype(bool).values + else: + # com._is_bool_indexer has already checked for nulls in the case of an + # object array key, so no check needed here + result = np.asarray(result, dtype=bool) + + return result def _is_series(obj): - from pandas.core.series import Series - return isinstance(obj, Series) + return is_series(obj) def _maybe_convert_indices(indices, n): @@ -1004,9 +997,10 @@ def _maybe_convert_ix(*args): """ We likely want to take the cross-product """ + ixify = True for arg in args: - if not isinstance(arg, (np.ndarray, list)): + if not (isinstance(arg, (np.ndarray, list)) or is_series(arg)): ixify = False if ixify: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 03cfd18f5afe5..2993ceac3c03e 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1,20 +1,23 @@ import itertools from datetime import datetime +import copy +from collections import defaultdict -from numpy import nan import numpy as np +from numpy import nan -from pandas.core.common import _possibly_downcast_to_dtype, isnull +from pandas.core.common import _possibly_downcast_to_dtype, isnull, is_series, is_sparse_series from pandas.core.index import Index, MultiIndex, _ensure_index, _handle_legacy_indexes from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices import pandas.core.common as com +from pandas.sparse.array import _maybe_to_sparse, SparseArray import pandas.lib as lib import pandas.tslib as tslib import pandas.core.expressions as expressions +from pandas.util.decorators import cache_readonly from pandas.tslib import Timestamp -from pandas.util import py3compat - +from pandas.util import py3compat, rwproperty class Block(object): """ @@ -27,10 +30,17 @@ class Block(object): is_numeric = False is_bool = False is_object = False + is_sparse = False _can_hold_na = False _downcast_dtype = None + _can_consolidate = True + _verify_integrity = True + _ftype = 'dense' - def __init__(self, values, items, ref_items, ndim=2, fastpath=False): + def __init__(self, values, items, ref_items, ndim=None, fastpath=False): + + if ndim is None: + ndim = values.ndim if values.ndim != ndim: raise ValueError('Wrong number of dimensions') @@ -53,15 +63,37 @@ def __init__(self, values, items, ref_items, ndim=2, fastpath=False): def _gi(self, arg): return self.values[arg] + @property + def _consolidate_key(self): + return (self._can_consolidate, self.dtype.name) + + @property + def _is_single_block(self): + return self.ndim == 1 + + @property + def fill_value(self): + return np.nan + @property def ref_locs(self): if self._ref_locs is None: - indexer = self.ref_items.get_indexer(self.items) - indexer = com._ensure_platform_int(indexer) - if (indexer == -1).any(): - raise AssertionError('Some block items were not in block ' - 'ref_items') + + # we have a single block, maybe have duplicates + # but indexer is easy + # also if we are not really reindexing, just numbering + if self._is_single_block or self.ref_items.equals(self.items): + indexer = np.arange(len(self.items)) + else: + + indexer = self.ref_items.get_indexer(self.items) + indexer = com._ensure_platform_int(indexer) + if (indexer == -1).any(): + raise AssertionError('Some block items were not in block ' + 'ref_items') + self._ref_locs = indexer + return self._ref_locs def set_ref_items(self, ref_items, maybe_rename=True): @@ -75,10 +107,20 @@ def set_ref_items(self, ref_items, maybe_rename=True): self.ref_items = ref_items def __repr__(self): - shape = ' x '.join([com.pprint_thing(s) for s in self.shape]) name = type(self).__name__ - result = '%s: %s, %s, dtype %s' % ( - name, com.pprint_thing(self.items), shape, self.dtype) + + # don't want to print out all of the items here + if self._is_single_block: + + result = '%s: %s dtype: %s' % ( + name, len(self), self.dtype) + + else: + + shape = ' x '.join([com.pprint_thing(s) for s in self.shape]) + result = '%s: %s, %s, dtype: %s' % ( + name, com.pprint_thing(self.items), shape, self.dtype) + if py3compat.PY3: return unicode(result) return com.console_encode(result) @@ -101,6 +143,10 @@ def __setstate__(self, state): self.values = values self.ndim = values.ndim + def _slice(self, slicer): + """ return a slice of my values """ + return self.values[slicer] + @property def shape(self): return self.values.shape @@ -113,11 +159,17 @@ def itemsize(self): def dtype(self): return self.values.dtype - def copy(self, deep=True): + def copy(self, deep=True, ref_items=None): values = self.values if deep: values = values.copy() - return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True) + if ref_items is None: + ref_items = self.ref_items + return make_block(values, self.items, ref_items, ndim=self.ndim, klass=self.__class__, fastpath=True) + + @property + def ftype(self): + return "%s:%s" % (self.dtype,self._ftype) def merge(self, other): if not self.ref_items.equals(other.ref_items): @@ -128,15 +180,17 @@ def merge(self, other): # union_ref = self.ref_items + other.ref_items return _merge_blocks([self, other], self.ref_items) - def reindex_axis(self, indexer, axis=1, fill_value=np.nan, mask_info=None): + def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, limit=None, mask_info=None): """ Reindex using pre-computed indexer information """ if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) + if fill_value is None: + fill_value = self.fill_value new_values = com.take_nd(self.values, indexer, axis, fill_value=fill_value, mask_info=mask_info) - return make_block(new_values, self.items, self.ref_items, fastpath=True) + return make_block(new_values, self.items, self.ref_items, ndim=self.ndim, fastpath=True) def reindex_items_from(self, new_ref_items, copy=True): """ @@ -158,7 +212,7 @@ def reindex_items_from(self, new_ref_items, copy=True): new_values = com.take_nd(self.values, masked_idx, axis=0, allow_fill=False) new_items = self.items.take(masked_idx) - return make_block(new_values, new_items, new_ref_items, fastpath=True) + return make_block(new_values, new_items, new_ref_items, ndim=self.ndim, fastpath=True) def get(self, item): loc = self.items.get_loc(item) @@ -184,7 +238,7 @@ def delete(self, item): loc = self.items.get_loc(item) new_items = self.items.delete(loc) new_values = np.delete(self.values, loc, 0) - return make_block(new_values, new_items, self.ref_items, klass=self.__class__, fastpath=True) + return make_block(new_values, new_items, self.ref_items, ndim=self.ndim, klass=self.__class__, fastpath=True) def split_block_at(self, item): """ @@ -208,6 +262,7 @@ def split_block_at(self, item): yield make_block(self.values[s:e], self.items[s:e].copy(), self.ref_items, + ndim = self.ndim, klass=self.__class__, fastpath=True) @@ -220,6 +275,8 @@ def fillna(self, value, inplace=False, downcast=None): new_values = self.values if inplace else self.values.copy() mask = com.isnull(new_values) + + value = self._try_fill(value) np.putmask(new_values, mask, value) block = make_block(new_values, self.items, self.ref_items, fastpath=True) @@ -249,14 +306,24 @@ def downcast(self, dtypes = None): return blocks - def astype(self, dtype, copy = True, raise_on_error = True): + def astype(self, dtype, copy=True, raise_on_error=True): + return self._astype(dtype, copy=copy, raise_on_error=raise_on_error) + + def _astype(self, dtype, copy=True, raise_on_error=True, klass=None): """ Coerce to the new type (if copy=True, return a new copy) raise on an except if raise == True """ + dtype = np.dtype(dtype) + if self.dtype == dtype: + if copy: + return self.copy() + return self + try: - newb = make_block(com._astype_nansafe(self.values, dtype, copy = copy), - self.items, self.ref_items, fastpath=True) + newb = make_block(com._astype_nansafe(self.values, dtype, copy=copy), + self.items, self.ref_items, ndim=self.ndim, + fastpath=True, dtype=dtype, klass=klass) except: if raise_on_error is True: raise @@ -278,6 +345,30 @@ def convert(self, copy = True, **kwargs): return self.copy() if copy else self + def prepare_for_merge(self, **kwargs): + """ a regular block is ok to merge as is """ + return self + + def post_merge(self, items, **kwargs): + """ we are non-sparse block, try to convert to a sparse block(s) """ + overlap = set(items.keys()) & set(self.items) + if len(overlap): + overlap = _ensure_index(overlap) + + new_blocks = [] + for item in overlap: + dtypes = set(items[item]) + + # this is a safe bet with multiple dtypes + dtype = list(dtypes)[0] if len(dtypes) == 1 else np.float64 + + b = make_block(SparseArray(self.get(item), dtype=dtype), [ item ], self.ref_items) + new_blocks.append(b) + + return new_blocks + + return self + def _can_hold_element(self, value): raise NotImplementedError() @@ -297,6 +388,9 @@ def _try_coerce_result(self, result): """ reverse of try_coerce_args """ return result + def _try_fill(self, value): + return value + def to_native_types(self, slicer=None, na_rep='', **kwargs): """ convert to our native types format, slicing if desired """ @@ -331,12 +425,12 @@ def putmask(self, mask, new, inplace=False): # may need to align the new if hasattr(new, 'reindex_axis'): - axis = getattr(new, '_het_axis', 0) + axis = getattr(new, '_info_axis_number', 0) new = new.reindex_axis(self.items, axis=axis, copy=False).values.T # may need to align the mask if hasattr(mask, 'reindex_axis'): - axis = getattr(mask, '_het_axis', 0) + axis = getattr(mask, '_info_axis_number', 0) mask = mask.reindex_axis(self.items, axis=axis, copy=False).values.T if self._can_hold_element(new): @@ -348,36 +442,66 @@ def putmask(self, mask, new, inplace=False): # need to go column by column new_blocks = [] - for i, item in enumerate(self.items): - - m = mask[i] - - # need a new block - if m.any(): - n = new[i] if isinstance(new, np.ndarray) else new + def create_block(v,m,n,item,reshape=True): + """ return a new block, try to preserve dtype if possible """ - # type of the new block - dtype, _ = com._maybe_promote(np.array(n).dtype) + # n should the length of the mask or a scalar here + if np.isscalar(n): + n = np.array([n] * len(m)) - # we need to exiplicty astype here to make a copy - nv = new_values[i].astype(dtype) + # see if we are only masking values that if putted + # will work in the current dtype + nv = None + try: + nn = n[m] + nn_at = nn.astype(self.dtype) + if (nn == nn_at).all(): + nv = v.copy() + nv[mask] = nn_at + except: + pass - # we create a new block type + # change the dtype + if nv is None: + dtype, _ = com._maybe_promote(n.dtype) + nv = v.astype(dtype) np.putmask(nv, m, n) + if reshape: + nv = _block_shape(nv) + return make_block(nv, [ item ], self.ref_items) else: - nv = new_values[i] if inplace else new_values[i].copy() + return make_block(nv, item, self.ref_items) - nv = _block_shape(nv) - new_blocks.append(make_block(nv, Index([ item ]), self.ref_items, fastpath=True)) + if self.ndim > 1: + for i, item in enumerate(self.items): + m = mask[i] + v = new_values[i] + + # need a new block + if m.any(): + + n = new[i] if isinstance(new, np.ndarray) else new + block = create_block(v,m,n,item) + + else: + nv = v if inplace else v.copy() + nv = _block_shape(nv) + block = make_block(nv, Index([ item ]), self.ref_items, fastpath=True) + + new_blocks.append(block) + + else: + + new_blocks.append(create_block(new_values,mask,new,self.items,reshape=False)) return new_blocks if inplace: return [ self ] - return [ make_block(new_values, self.items, self.ref_items, fastpath=True) ] + return make_block(new_values, self.items, self.ref_items, fastpath=True) def interpolate(self, method='pad', axis=0, inplace=False, limit=None, missing=None, coerce=False): @@ -392,38 +516,26 @@ def interpolate(self, method='pad', axis=0, inplace=False, return self.copy() values = self.values if inplace else self.values.copy() - - if values.ndim != 2: - raise NotImplementedError - - transf = (lambda x: x) if axis == 0 else (lambda x: x.T) - - if missing is None: - mask = None - else: # todo create faster fill func without masking - mask = com.mask_missing(transf(values), missing) - - if method == 'pad': - com.pad_2d(transf(values), limit=limit, mask=mask) - else: - com.backfill_2d(transf(values), limit=limit, mask=mask) - - return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True) + values = com.interpolate_2d(values, method, axis, limit, missing) + return make_block(values, self.items, self.ref_items, ndim=self.ndim, klass=self.__class__, fastpath=True) def take(self, indexer, ref_items, axis=1): if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) new_values = com.take_nd(self.values, indexer, axis=axis, allow_fill=False) - return make_block(new_values, self.items, ref_items, klass=self.__class__, fastpath=True) + return make_block(new_values, self.items, ref_items, ndim=self.ndim, klass=self.__class__, fastpath=True) - def get_values(self, dtype): + def get_values(self, dtype=None): return self.values + def get_merge_length(self): + return len(self.values) + def diff(self, n): """ return block for the diff of the values """ new_values = com.diff(self.values, n, axis=1) - return make_block(new_values, self.items, self.ref_items, fastpath=True) + return make_block(new_values, self.items, self.ref_items, ndim=self.ndim, fastpath=True) def shift(self, indexer, periods): """ shift the block by periods, possibly upcast """ @@ -436,7 +548,7 @@ def shift(self, indexer, periods): new_values[:, :periods] = fill_value else: new_values[:, periods:] = fill_value - return make_block(new_values, self.items, self.ref_items, fastpath=True) + return make_block(new_values, self.items, self.ref_items, ndim=self.ndim, fastpath=True) def eval(self, func, other, raise_on_error = True, try_cast = False): """ @@ -457,8 +569,8 @@ def eval(self, func, other, raise_on_error = True, try_cast = False): # see if we can align other if hasattr(other, 'reindex_axis'): - axis = getattr(other, '_het_axis', 0) - other = other.reindex_axis(self.items, axis=axis, copy=True).values + axis = getattr(other, '_info_axis_number', 0) + other = other.reindex_axis(self.items, axis=axis, copy=False).values # make sure that we can broadcast is_transposed = False @@ -491,7 +603,7 @@ def eval(self, func, other, raise_on_error = True, try_cast = False): if try_cast: result = self._try_cast_result(result) - return make_block(result, self.items, self.ref_items, fastpath=True) + return make_block(result, self.items, self.ref_items, ndim=self.ndim, fastpath=True) def where(self, other, cond, raise_on_error = True, try_cast = False): """ @@ -513,7 +625,7 @@ def where(self, other, cond, raise_on_error = True, try_cast = False): # see if we can align other if hasattr(other,'reindex_axis'): - axis = getattr(other,'_het_axis',0) + axis = getattr(other,'_info_axis_number',0) other = other.reindex_axis(self.items, axis=axis, copy=True).values # make sure that we can broadcast @@ -527,7 +639,7 @@ def where(self, other, cond, raise_on_error = True, try_cast = False): if not hasattr(cond,'shape'): raise ValueError("where must have a condition that is ndarray like") if hasattr(cond,'reindex_axis'): - axis = getattr(cond,'_het_axis',0) + axis = getattr(cond,'_info_axis_number',0) cond = cond.reindex_axis(self.items, axis=axis, copy=True).values else: cond = cond.values @@ -568,7 +680,7 @@ def create_block(result, items, transpose=True): if try_cast: result = self._try_cast_result(result) - return make_block(result, items, self.ref_items) + return make_block(result, items, self.ref_items, ndim = self.ndim) # see if we can operate on the entire block, or need item-by-item if not self._can_hold_na: @@ -699,7 +811,7 @@ def is_bool(self): """ we can be a bool if we have only bool values but are of type object """ return lib.is_bool_array(self.values.ravel()) - def convert(self, convert_dates = True, convert_numeric = True, copy = True): + def convert(self, convert_dates = True, convert_numeric = True, copy = True, by_item = True): """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we ARE an ObjectBlock!!!!! @@ -709,14 +821,22 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True): # attempt to create new type blocks blocks = [] - for i, c in enumerate(self.items): - values = self.get(c) - values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric) - values = _block_shape(values) - items = self.items.take([i]) - newb = make_block(values, items, self.ref_items, fastpath=True) - blocks.append(newb) + if by_item: + + for i, c in enumerate(self.items): + values = self.get(c) + + values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric) + values = _block_shape(values) + items = self.items.take([i]) + newb = make_block(values, items, self.ref_items, ndim = self.ndim) + blocks.append(newb) + + else: + + values = com._possibly_convert_objects(self.values, convert_dates=convert_dates, convert_numeric=convert_numeric) + blocks.append(make_block(values, self.items, self.ref_items, ndim = self.ndim)) return blocks @@ -738,11 +858,11 @@ def should_store(self, value): class DatetimeBlock(Block): _can_hold_na = True - def __init__(self, values, items, ref_items, ndim=2, fastpath=True): + def __init__(self, values, items, ref_items, fastpath=False, **kwargs): if values.dtype != _NS_DTYPE: values = tslib.cast_to_nanoseconds(values) - super(DatetimeBlock, self).__init__(values, items, ref_items, ndim=ndim, fastpath=fastpath) + Block.__init__(self, values, items, ref_items, fastpath=fastpath, **kwargs) def _gi(self, arg): return lib.Timestamp(self.values[arg]) @@ -779,6 +899,12 @@ def _try_coerce_result(self, result): result = lib.Timestamp(result) return result + def _try_fill(self, value): + """ if we are a NaT, return the actual fill value """ + if isinstance(value, type(tslib.NaT)): + value = tslib.iNaT + return value + def to_native_types(self, slicer=None, na_rep=None, **kwargs): """ convert to our native types format, slicing if desired """ @@ -801,6 +927,15 @@ def to_native_types(self, slicer=None, na_rep=None, **kwargs): def should_store(self, value): return issubclass(value.dtype.type, np.datetime64) + def astype(self, dtype, copy = True, raise_on_error=True): + """ + handle convert to object as a special case + """ + klass = None + if np.dtype(dtype).type == np.object_: + klass = ObjectBlock + return self._astype(dtype, copy=copy, raise_on_error=raise_on_error, klass=klass) + def set(self, item, value): """ Modify Block in-place with new item value @@ -816,49 +951,259 @@ def set(self, item, value): self.values[loc] = value - def get_values(self, dtype): + def get_values(self, dtype = None): if dtype == object: flat_i8 = self.values.ravel().view(np.int64) res = tslib.ints_to_pydatetime(flat_i8) return res.reshape(self.values.shape) return self.values +class SparseBlock(Block): + """ implement as a list of sparse arrays of the same dtype """ + __slots__ = ['items', 'ref_items', '_ref_locs', 'ndim', 'values'] + is_sparse = True + is_numeric = True + _can_hold_na = True + _can_consolidate = False + _verify_integrity = False + _ftype = 'sparse' + + def __init__(self, values, items, ref_items, ndim=None, fastpath=False): + + # kludgetastic + if ndim is not None: + if ndim == 1: + ndim = 1 + elif ndim > 2: + ndim = ndim + else: + if len(items) != 1: + ndim = 1 + else: + ndim = 2 + self.ndim = ndim + + self._ref_locs = None + self.values = values + if fastpath: + self.items = items + self.ref_items = ref_items + else: + self.items = _ensure_index(items) + self.ref_items = _ensure_index(ref_items) + + @property + def shape(self): + return (len(self.items),self.sp_index.length) + + @property + def itemsize(self): + return self.dtype.itemsize + + @rwproperty.getproperty + def fill_value(self): + return self.values.fill_value + + @rwproperty.setproperty + def fill_value(self, v): + # we may need to upcast our fill to match our dtype + if issubclass(self.dtype.type, np.floating): + v = float(v) + self.values.fill_value = v + + @rwproperty.getproperty + def sp_values(self): + return self.values.sp_values + + @rwproperty.setproperty + def sp_values(self, v): + # reset the sparse values + self.values = SparseArray(v,sparse_index=self.sp_index,kind=self.kind,dtype=v.dtype,fill_value=self.fill_value,copy=False) + + @property + def sp_index(self): + return self.values.sp_index + + @property + def kind(self): + return self.values.kind + + def __len__(self): + try: + return self.sp_index.length + except: + return 0 + + def should_store(self, value): + return isinstance(value, SparseArray) + + def prepare_for_merge(self, **kwargs): + """ create a dense block """ + return make_block(self.get_values(), self.items, self.ref_items) + + def post_merge(self, items, **kwargs): + return self + + def set(self, item, value): + self.values = value + + def get(self, item): + if self.ndim == 1: + loc = self.items.get_loc(item) + return self.values[loc] + else: + return self.values + + def _slice(self, slicer): + """ return a slice of my values (but densify first) """ + return self.get_values()[slicer] + + def get_values(self, dtype=None): + """ need to to_dense myself (and always return a ndim sized object) """ + values = self.values.to_dense() + if values.ndim == self.ndim - 1: + values = values.reshape((1,) + values.shape) + return values + + def get_merge_length(self): + return 1 + + def make_block(self, values, items=None, ref_items=None, sparse_index=None, kind=None, dtype=None, fill_value=None, + copy=False, fastpath=True): + """ return a new block """ + if dtype is None: + dtype = self.dtype + if fill_value is None: + fill_value = self.fill_value + if items is None: + items = self.items + if ref_items is None: + ref_items = self.ref_items + new_values = SparseArray(values,sparse_index=sparse_index,kind=kind or self.kind,dtype=dtype,fill_value=fill_value,copy=copy) + return make_block(new_values, items, ref_items, ndim=self.ndim, fastpath=fastpath) + + def interpolate(self, method='pad', axis=0, inplace=False, + limit=None, missing=None, **kwargs): + + values = com.interpolate_2d(self.values.to_dense(), method, axis, limit, missing) + return self.make_block(values, self.items, self.ref_items) + + def fillna(self, value, inplace=False, downcast=None): + # we may need to upcast our fill to match our dtype + if issubclass(self.dtype.type, np.floating): + value = float(value) + values = self.values if inplace else self.values.copy() + return self.make_block(values.get_values(value),fill_value=value) + + def shift(self, indexer, periods): + """ shift the block by periods """ + + new_values = self.values.to_dense().take(indexer) + # convert integer to float if necessary. need to do a lot more than + # that, handle boolean etc also + new_values, fill_value = com._maybe_upcast(new_values) + if periods > 0: + new_values[:periods] = fill_value + else: + new_values[periods:] = fill_value + return self.make_block(new_values) + + def take(self, indexer, ref_items, axis=1): + """ going to take our items + along the long dimension""" + if axis < 1: + raise AssertionError('axis must be at least 1, got %d' % axis) + + return self.make_block(self.values.take(indexer)) + + def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, limit=None, mask_info=None): + """ + Reindex using pre-computed indexer information + """ + if axis < 1: + raise AssertionError('axis must be at least 1, got %d' % axis) + + # taking on the 0th axis always here + if fill_value is None: + fill_value = self.fill_value + return self.make_block(self.values.take(indexer),items=self.items,fill_value=fill_value) + + def reindex_items_from(self, new_ref_items, copy=True): + """ + Reindex to only those items contained in the input set of items + + E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'], + then the resulting items will be ['b'] + + Returns + ------- + reindexed : Block + """ + + # 2-d + if self.ndim >= 2: + if self.items[0] not in self.ref_items: + return None + return self.make_block(self.values,ref_items=new_ref_items,copy=copy) + + # 1-d + new_ref_items, indexer = self.items.reindex(new_ref_items) + if indexer is None: + indexer = np.arange(len(self.items)) + + return self.make_block(com.take_1d(self.values.values, indexer),items=new_ref_items,ref_items=new_ref_items,copy=copy) + + def sparse_reindex(self, new_index): + """ sparse reindex and return a new block + current reindex only works for float64 dtype! """ + values = self.values + values = values.sp_index.to_int_index().reindex(values.sp_values.astype('float64'),values.fill_value,new_index) + return self.make_block(values,sparse_index=new_index) + + def split_block_at(self, item): + if len(self.items) == 1 and item == self.items[0]: + return [] + return super(SparseBlock, self).split_block_at(self, item) -def make_block(values, items, ref_items, klass = None, fastpath=False): +def make_block(values, items, ref_items, klass=None, ndim=None, dtype=None, fastpath=False): if klass is None: - dtype = values.dtype + dtype = dtype or values.dtype vtype = dtype.type - if issubclass(vtype, np.floating): + if isinstance(values, SparseArray): + klass = SparseBlock + elif issubclass(vtype, np.floating): klass = FloatBlock - elif issubclass(vtype, np.complexfloating): - klass = ComplexBlock - elif issubclass(vtype, np.datetime64): - klass = DatetimeBlock - elif issubclass(vtype, np.integer): + elif issubclass(vtype, np.integer) and not issubclass(vtype, np.datetime64): klass = IntBlock elif dtype == np.bool_: klass = BoolBlock + elif issubclass(vtype, np.datetime64): + klass = DatetimeBlock + elif issubclass(vtype, np.complexfloating): + klass = ComplexBlock + + # try to infer a DatetimeBlock, or set to an ObjectBlock + else: - # try to infer a datetimeblock - if klass is None and np.prod(values.shape): - flat = values.ravel() - inferred_type = lib.infer_dtype(flat) - if inferred_type == 'datetime': + if np.prod(values.shape): + flat = values.ravel() + inferred_type = lib.infer_dtype(flat) + if inferred_type == 'datetime': - # we have an object array that has been inferred as datetime, so - # convert it - try: - values = tslib.array_to_datetime(flat).reshape(values.shape) - klass = DatetimeBlock - except: # it already object, so leave it - pass + # we have an object array that has been inferred as datetime, so + # convert it + try: + values = tslib.array_to_datetime(flat).reshape(values.shape) + klass = DatetimeBlock + except: # it already object, so leave it + pass - if klass is None: - klass = ObjectBlock + if klass is None: + klass = ObjectBlock - return klass(values, items, ref_items, ndim=values.ndim, fastpath=fastpath) + return klass(values, items, ref_items, ndim=ndim, fastpath=fastpath) # TODO: flexible with index=None and/or items=None @@ -879,34 +1224,43 @@ class BlockManager(object): ----- This is *not* a public API class """ - __slots__ = ['axes', 'blocks', '_known_consolidated', '_is_consolidated'] + __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', '_is_consolidated', '_has_sparse'] - def __init__(self, blocks, axes, do_integrity_check=True): + def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True): self.axes = [_ensure_index(ax) for ax in axes] self.blocks = blocks - ndim = len(axes) + ndim = self.ndim for block in blocks: - if ndim != block.values.ndim: + if not block.is_sparse and ndim != block.ndim: raise AssertionError(('Number of Block dimensions (%d) must ' 'equal number of axes (%d)') - % (block.values.ndim, ndim)) + % (block.ndim, ndim)) if do_integrity_check: self._verify_integrity() + self._has_sparse = False self._consolidate_check() @classmethod - def make_empty(self): - return BlockManager([], [[], []]) + def make_empty(cls): + return cls([], [[], []]) def __nonzero__(self): return True + @property + def shape(self): + if getattr(self,'_shape',None) is None: + self._shape = tuple(len(ax) for ax in self.axes) + return self._shape + @property def ndim(self): - return len(self.axes) + if getattr(self,'_ndim',None) is None: + self._ndim = len(self.axes) + return self._ndim def set_axis(self, axis, value): cur_axis = self.axes[axis] @@ -916,6 +1270,7 @@ def set_axis(self, axis, value): raise Exception('Length mismatch (%d vs %d)' % (len(value), len(cur_axis))) self.axes[axis] = value + self._shape = None if axis == 0: for block in self.blocks: @@ -931,7 +1286,15 @@ def get_dtype_counts(self): self._consolidate_inplace() counts = dict() for b in self.blocks: - counts[b.dtype.name] = counts.get(b.dtype,0) + b.shape[0] + counts[b.dtype.name] = counts.get(b.dtype.name,0) + b.shape[0] + return counts + + def get_ftype_counts(self): + """ return a dict of the counts of dtypes in BlockManager """ + self._consolidate_inplace() + counts = dict() + for b in self.blocks: + counts[b.ftype] = counts.get(b.ftype,0) + b.shape[0] return counts def __getstate__(self): @@ -948,20 +1311,24 @@ def __setstate__(self, state): self.axes = [_ensure_index(ax) for ax in ax_arrays] self.axes = _handle_legacy_indexes(self.axes) - self._is_consolidated = False - self._known_consolidated = False - blocks = [] for values, items in zip(bvalues, bitems): blk = make_block(values, items, self.axes[0]) blocks.append(blk) self.blocks = blocks + self._post_setstate() + + def _post_setstate(self): + self._is_consolidated = False + self._known_consolidated = False + self._set_has_sparse() + def __len__(self): return len(self.items) def __repr__(self): - output = 'BlockManager' + output = str(self.__class__.__name__) for i, ax in enumerate(self.axes): if i == 0: output += '\nItems: %s' % ax @@ -972,10 +1339,6 @@ def __repr__(self): output += '\n%s' % repr(block) return output - @property - def shape(self): - return tuple(len(ax) for ax in self.axes) - def _verify_integrity(self): mgr_shape = self.shape tot_items = sum(len(x.items) for x in self.blocks) @@ -983,9 +1346,8 @@ def _verify_integrity(self): if block.ref_items is not self.items: raise AssertionError("Block ref_items must be BlockManager " "items") - if block.values.shape[1:] != mgr_shape[1:]: + if not block.is_sparse and block.values.shape[1:] != mgr_shape[1:]: construction_error(tot_items,block.values.shape[1:],self.axes) - if len(self.items) != tot_items: raise AssertionError('Number of manager items must equal union of ' 'block items') @@ -1002,6 +1364,7 @@ def apply(self, f, *args, **kwargs): axes = kwargs.pop('axes',None) filter = kwargs.get('filter') + do_integrity_check = kwargs.pop('do_integrity_check',False) result_blocks = [] for blk in self.blocks: if filter is not None: @@ -1018,7 +1381,7 @@ def apply(self, f, *args, **kwargs): result_blocks.extend(applied) else: result_blocks.append(applied) - bm = self.__class__(result_blocks, axes or self.axes) + bm = self.__class__(result_blocks, axes or self.axes, do_integrity_check=do_integrity_check) bm._consolidate_inplace() return bm @@ -1089,6 +1452,30 @@ def comp(s): bm._consolidate_inplace() return bm + def prepare_for_merge(self, *args, **kwargs): + """ prepare for merging, return a new block manager with Sparse -> Dense """ + self._consolidate_inplace() + if self._has_sparse: + return self.apply('prepare_for_merge', *args, **kwargs) + return self + + def post_merge(self, objs, **kwargs): + """ try to sparsify items that were previously sparse """ + is_sparse = defaultdict(list) + for o in objs: + for blk in o._data.blocks: + if blk.is_sparse: + + # record the dtype of each item + for i in blk.items: + is_sparse[i].append(blk.dtype) + + if len(is_sparse): + return self.apply('post_merge', items = is_sparse) + + return self + + def is_consolidated(self): """ Return True if more than one block with the same dtype @@ -1098,9 +1485,13 @@ def is_consolidated(self): return self._is_consolidated def _consolidate_check(self): - dtypes = [blk.dtype.type for blk in self.blocks] - self._is_consolidated = len(dtypes) == len(set(dtypes)) + ftypes = [blk.ftype for blk in self.blocks] + self._is_consolidated = len(ftypes) == len(set(ftypes)) self._known_consolidated = True + self._set_has_sparse() + + def _set_has_sparse(self): + self._has_sparse = any((blk.is_sparse for blk in self.blocks)) @property def is_mixed_type(self): @@ -1112,61 +1503,100 @@ def is_numeric_mixed_type(self): self._consolidate_inplace() return all([ block.is_numeric for block in self.blocks ]) - def get_numeric_data(self, copy=False, type_list=None, as_blocks = False): + def get_block_map(self, copy=False, typ=None, columns=None, is_numeric=False, is_bool=False): + """ return a dictionary mapping the ftype -> block list + + Parameters + ---------- + typ : return a list/dict + copy : copy if indicated + columns : a column filter list + filter if the type is indicated """ + + # short circuit - mainly for merging + if typ == 'dict' and columns is None and not is_numeric and not is_bool and not copy: + bm = defaultdict(list) + for b in self.blocks: + bm[str(b.ftype)].append(b) + return bm + + self._consolidate_inplace() + + if is_numeric: + filter_blocks = lambda block: block.is_numeric + elif is_bool: + filter_blocks = lambda block: block.is_bool + else: + filter_blocks = lambda block: True + + def filter_columns(b): + if columns: + if not columns in b.items: + return None + b = b.reindex_items_from(columns) + return b + + maybe_copy = lambda b: b.copy() if copy else b + def maybe_copy(b): + if copy: + b = b.copy() + return b + + if typ == 'list': + bm = [] + for b in self.blocks: + if filter_blocks(b): + b = filter_columns(b) + if b is not None: + bm.append(maybe_copy(b)) + + else: + if typ == 'dtype': + key = lambda b: b.dtype + else: + key = lambda b: b.ftype + bm = defaultdict(list) + for b in self.blocks: + if filter_blocks(b): + b = filter_columns(b) + if b is not None: + bm[str(key(b))].append(maybe_copy(b)) + return bm + + def get_bool_data(self, **kwargs): + kwargs['is_bool'] = True + return self.get_data(**kwargs) + + def get_numeric_data(self, **kwargs): + kwargs['is_numeric'] = True + return self.get_data(**kwargs) + + def get_data(self, copy=False, columns=None, **kwargs): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks - type_list : tuple of type, default None - Numeric types by default (Float/Complex/Int but not Datetime) """ - if type_list is None: - filter_blocks = lambda block: block.is_numeric - else: - type_list = self._get_clean_block_types(type_list) - filter_blocks = lambda block: isinstance(block, type_list) + blocks = self.get_block_map(typ='list', copy=copy, columns=columns, **kwargs) + if len(blocks) == 0: + return self.__class__.make_empty() - maybe_copy = lambda b: b.copy() if copy else b - num_blocks = [maybe_copy(b) for b in self.blocks if filter_blocks(b)] - if as_blocks: - return num_blocks + return self.combine(blocks) - if len(num_blocks) == 0: - return BlockManager.make_empty() - - indexer = np.sort(np.concatenate([b.ref_locs for b in num_blocks])) + def combine(self, blocks): + """ reutrn a new manager with the blocks """ + indexer = np.sort(np.concatenate([b.ref_locs for b in blocks])) new_items = self.items.take(indexer) new_blocks = [] - for b in num_blocks: + for b in blocks: b = b.copy(deep=False) b.ref_items = new_items new_blocks.append(b) new_axes = list(self.axes) new_axes[0] = new_items - return BlockManager(new_blocks, new_axes, do_integrity_check=False) - - def _get_clean_block_types(self, type_list): - if not isinstance(type_list, tuple): - try: - type_list = tuple(type_list) - except TypeError: - type_list = (type_list,) - - type_map = {int: IntBlock, float: FloatBlock, - complex: ComplexBlock, - np.datetime64: DatetimeBlock, - datetime: DatetimeBlock, - bool: BoolBlock, - object: ObjectBlock} - - type_list = tuple([type_map.get(t, t) for t in type_list]) - return type_list - - def get_bool_data(self, copy=False, as_blocks=False): - return self.get_numeric_data(copy=copy, type_list=(BoolBlock,), - as_blocks=as_blocks) + return self.__class__(new_blocks, new_axes, do_integrity_check=False) def get_slice(self, slobj, axis=0, raise_on_error=False): new_axes = list(self.axes) @@ -1180,7 +1610,7 @@ def get_slice(self, slobj, axis=0, raise_on_error=False): new_items = new_axes[0] if len(self.blocks) == 1: blk = self.blocks[0] - newb = make_block(blk.values[slobj], + newb = make_block(blk._slice(slobj), new_items, new_items, klass=blk.__class__, @@ -1191,7 +1621,9 @@ def get_slice(self, slobj, axis=0, raise_on_error=False): else: new_blocks = self._slice_blocks(slobj, axis) - return BlockManager(new_blocks, new_axes, do_integrity_check=False) + bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) + bm._consolidate_inplace() + return bm def _slice_blocks(self, slobj, axis): new_blocks = [] @@ -1201,10 +1633,9 @@ def _slice_blocks(self, slobj, axis): slicer = tuple(slicer) for block in self.blocks: - newb = make_block(block.values[slicer], + newb = make_block(block._slice(slicer), block.items, block.ref_items, - klass=block.__class__, fastpath=True) new_blocks.append(newb) return new_blocks @@ -1233,10 +1664,8 @@ def copy(self, deep=True): ------- copy : BlockManager """ - copy_blocks = [block.copy(deep=deep) for block in self.blocks] - # copy_axes = [ax.copy() for ax in self.axes] - copy_axes = list(self.axes) - return BlockManager(copy_blocks, copy_axes, do_integrity_check=False) + new_axes = list(self.axes) + return self.apply('copy', axes=new_axes, deep=deep, do_integrity_check=False) def as_matrix(self, items=None): if len(self.blocks) == 0: @@ -1245,7 +1674,7 @@ def as_matrix(self, items=None): blk = self.blocks[0] if items is None or blk.items.equals(items): # if not, then just call interleave per below - mat = blk.values + mat = blk.get_values() else: mat = self.reindex_items(items).as_matrix() else: @@ -1331,7 +1760,7 @@ def xs(self, key, axis=1, copy=True): klass=block.__class__, fastpath=True)] - return BlockManager(new_blocks, new_axes) + return self.__class__(new_blocks, new_axes) def fast_2d_xs(self, loc, copy=False): """ @@ -1370,14 +1799,16 @@ def consolidate(self): if self.is_consolidated(): return self - new_blocks = _consolidate(self.blocks, self.items) - return BlockManager(new_blocks, self.axes) + bm = self.__class__(self.blocks, self.axes) + bm._consolidate_inplace() + return bm def _consolidate_inplace(self): if not self.is_consolidated(): self.blocks = _consolidate(self.blocks, self.items) self._is_consolidated = True self._known_consolidated = True + self._set_has_sparse() def get(self, item): _, block = self._find_block(item) @@ -1439,10 +1870,12 @@ def set(self, item, value): Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ - value = _block_shape(value,self.ndim-1) - if value.shape[1:] != self.shape[1:]: - raise AssertionError('Shape of new values must be compatible ' - 'with manager shape') + if not isinstance(value, SparseArray): + if value.ndim == self.ndim - 1: + value = value.reshape((1,) + value.shape) + if value.shape[1:] != self.shape[1:]: + raise AssertionError('Shape of new values must be compatible ' + 'with manager shape') def _set_item(item, arr): i, block = self._find_block(item) @@ -1499,6 +1932,7 @@ def insert(self, loc, item, value): def set_items_norename(self, value): value = _ensure_index(value) self.axes[0] = value + self._shape = None for block in self.blocks: block.set_ref_items(value, maybe_rename=False) @@ -1531,7 +1965,7 @@ def _check_have(self, item): if item not in self.items: raise KeyError('no item named %s' % com.pprint_thing(item)) - def reindex_axis(self, new_axis, method=None, axis=0, copy=True): + def reindex_axis(self, new_axis, method=None, axis=0, fill_value=None, limit=None, copy=True): new_axis = _ensure_index(new_axis) cur_axis = self.axes[axis] @@ -1539,6 +1973,7 @@ def reindex_axis(self, new_axis, method=None, axis=0, copy=True): if copy: result = self.copy(deep=True) result.axes[axis] = new_axis + result._shape = None if axis == 0: # patch ref_items, #1823 @@ -1553,12 +1988,12 @@ def reindex_axis(self, new_axis, method=None, axis=0, copy=True): if method is not None: raise AssertionError('method argument not supported for ' 'axis == 0') - return self.reindex_items(new_axis) + return self.reindex_items(new_axis, copy=copy, fill_value=fill_value) new_axis, indexer = cur_axis.reindex(new_axis, method) - return self.reindex_indexer(new_axis, indexer, axis=axis) + return self.reindex_indexer(new_axis, indexer, axis=axis, fill_value=fill_value) - def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan): + def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=None): """ pandas-indexer with -1's only. """ @@ -1572,7 +2007,7 @@ def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan): new_axes = list(self.axes) new_axes[axis] = new_axis - return BlockManager(new_blocks, new_axes) + return self.__class__(new_blocks, new_axes) def _reindex_indexer_items(self, new_items, indexer, fill_value): # TODO: less efficient than I'd like @@ -1605,9 +2040,9 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value): new_blocks.append(na_block) new_blocks = _consolidate(new_blocks, new_items) - return BlockManager(new_blocks, [new_items] + self.axes[1:]) + return self.__class__(new_blocks, [new_items] + self.axes[1:]) - def reindex_items(self, new_items, copy=True, fill_value=np.nan): + def reindex_items(self, new_items, copy=True, fill_value=None): """ """ @@ -1615,7 +2050,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): data = self if not data.is_consolidated(): data = data.consolidate() - return data.reindex_items(new_items) + return data.reindex_items(new_items, copy=copy, fill_value=fill_value) # TODO: this part could be faster (!) new_items, indexer = self.items.reindex(new_items) @@ -1635,6 +2070,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): if len(newb.items) > 0: new_blocks.append(newb) + # add a na block if we are missing items mask = indexer == -1 if mask.any(): extra_items = new_items[mask] @@ -1643,11 +2079,13 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): new_blocks.append(na_block) new_blocks = _consolidate(new_blocks, new_items) - return BlockManager(new_blocks, [new_items] + self.axes[1:]) + return self.__class__(new_blocks, [new_items] + self.axes[1:]) - def _make_na_block(self, items, ref_items, fill_value=np.nan): + def _make_na_block(self, items, ref_items, fill_value=None): # TODO: infer dtypes other than float64 from fill_value + if fill_value is None: + fill_value = np.nan block_shape = list(self.shape) block_shape[0] = len(items) @@ -1661,6 +2099,9 @@ def take(self, indexer, new_index=None, axis=1, verify=True): if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) + if isinstance(indexer, list): + indexer = np.array(indexer) + indexer = com._ensure_platform_int(indexer) n = len(self.axes[axis]) @@ -1690,7 +2131,7 @@ def merge(self, other, lsuffix=None, rsuffix=None): new_axes = list(this.axes) new_axes[0] = cons_items - return BlockManager(consolidated, new_axes) + return self.__class__(consolidated, new_axes) def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True): to_rename = self.items.intersection(other.items) @@ -1740,7 +2181,7 @@ def rename_axis(self, mapper, axis=1): new_axes = list(self.axes) new_axes[axis] = new_axis - return BlockManager(self.blocks, new_axes) + return self.__class__(self.blocks, new_axes) def rename_items(self, mapper, copydata=True): new_items = Index([mapper(x) for x in self.items]) @@ -1753,7 +2194,7 @@ def rename_items(self, mapper, copydata=True): new_blocks.append(newb) new_axes = list(self.axes) new_axes[0] = new_items - return BlockManager(new_blocks, new_axes) + return self.__class__(new_blocks, new_axes) def add_prefix(self, prefix): f = (('%s' % prefix) + '%s').__mod__ @@ -1785,12 +2226,144 @@ def item_dtypes(self): mask = np.zeros(len(self.items), dtype=bool) for i, blk in enumerate(self.blocks): indexer = self.items.get_indexer(blk.items) - result.put(indexer, blk.values.dtype.name) + result.put(indexer, blk.dtype.name) mask.put(indexer, 1) if not (mask.all()): raise AssertionError('Some items were not in any block') return result +class SingleBlockManager(BlockManager): + """ manage a single block with """ + ndim = 1 + _is_consolidated = True + _known_consolidated = True + __slots__ = ['axes', 'blocks', '_block', '_values', '_shape', '_has_sparse'] + + def __init__(self, block, axis, do_integrity_check=False, fastpath=True): + + if isinstance(axis, list): + if len(axis) != 1: + raise ValueError("cannot create SingleBlockManager with more than 1 axis") + axis = axis[0] + + # passed from constructor, single block, single axis + if fastpath: + self.axes = [ axis ] + if isinstance(block, list): + if len(block) != 1: + raise ValueError("cannot create SingleBlockManager with more than 1 block") + block = block[0] + if not isinstance(block, Block): + block = make_block(block, axis, axis, ndim=1, fastpath=True) + + else: + + self.axes = [ _ensure_index(axis) ] + + # create the block here + if isinstance(block, list): + + # provide consolidation to the interleaved_dtype + if len(block) > 1: + dtype = _interleaved_dtype(block) + block = [ b.astype(dtype) for b in block ] + block = _consolidate(block, axis) + + if len(block) != 1: + raise ValueError("cannot create SingleBlockManager with more than 1 block") + block = block[0] + + if not isinstance(block, Block): + block = make_block(block, axis, axis, ndim=1, fastpath=True) + + self.blocks = [ block ] + self._block = self.blocks[0] + self._values = self._block.values + self._has_sparse = self._block.is_sparse + + def _post_setstate(self): + self._block = self.blocks[0] + self._values = self._block.values + + @property + def shape(self): + if getattr(self,'_shape',None) is None: + self._shape = tuple([len(self.axes[0])]) + return self._shape + + def reindex(self, new_axis, method=None, limit=None, copy=True): + + # if we are the same and don't copy, just return + if not copy and self.index.equals(new_axis): + return self + + block = self._block.reindex_items_from(new_axis, copy=copy) + + if method is not None or limit is not None: + block = block.interpolate(method=method, limit=limit) + mgr = SingleBlockManager(block, new_axis) + mgr._consolidate_inplace() + return mgr + + def get_slice(self, slobj, raise_on_error=False): + if raise_on_error: + _check_slice_bounds(slobj, self.index) + return self.__class__(self._block._slice(slobj), self.index._getitem_slice(slobj), fastpath=True) + + def set_axis(self, axis, value): + cur_axis = self.axes[axis] + value = _ensure_index(value) + + if len(value) != len(cur_axis): + raise Exception('Length mismatch (%d vs %d)' + % (len(value), len(cur_axis))) + self.axes[axis] = value + self._shape = None + self._block.set_ref_items(self.items, maybe_rename=True) + + def set_ref_items(self, ref_items, maybe_rename=True): + """ we can optimize and our ref_locs are always equal to ref_items """ + if maybe_rename: + self.items = ref_items + self.ref_items = ref_items + + @property + def index(self): + return self.axes[0] + + def convert(self, *args, **kwargs): + """ convert the whole block as one """ + kwargs['by_item'] = False + return self.apply('convert', *args, **kwargs) + + @property + def dtype(self): + return self._block.dtype + + @property + def ftype(self): + return self._block.ftype + + @property + def values(self): + return self._values.view() + + @property + def itemsize(self): + return self._block.itemsize + + @property + def _can_hold_na(self): + return self._block._can_hold_na + + def is_consolidated(self): + return True + + def _consolidate_check(self): + pass + + def _consolidate_inplace(self): + pass def construction_error(tot_items, block_shape, axes): """ raise a helpful message about our construction """ @@ -1841,9 +2414,13 @@ def form_blocks(arrays, names, axes): int_items = [] bool_items = [] object_items = [] + sparse_items = [] datetime_items = [] + for k, v in zip(names, arrays): - if issubclass(v.dtype.type, np.floating): + if isinstance(v, SparseArray) or is_sparse_series(v): + sparse_items.append((k,v)) + elif issubclass(v.dtype.type, np.floating): float_items.append((k, v)) elif issubclass(v.dtype.type, np.complexfloating): complex_items.append((k, v)) @@ -1892,6 +2469,10 @@ def form_blocks(arrays, names, axes): object_blocks = _simple_blockify(object_items, items, np.object_) blocks.extend(object_blocks) + if len(sparse_items) > 0: + sparse_blocks = _sparse_blockify(sparse_items, items) + blocks.extend(sparse_blocks) + if len(extra_items): shape = (len(extra_items),) + tuple(len(x) for x in axes[1:]) @@ -1933,20 +2514,33 @@ def _multi_blockify(tuples, ref_items, dtype = None): return new_blocks +def _sparse_blockify(tuples, ref_items, dtype = None): + """ return an array of blocks that potentially have different dtypes (and are sparse) """ + + new_blocks = [] + for names, array in tuples: + + if not isinstance(names, (list,tuple)): + names = [ names ] + items = ref_items[ref_items.isin(names)] + + array = _maybe_to_sparse(array) + block = make_block(array, items, ref_items, klass=SparseBlock, fastpath=True) + new_blocks.append(block) + + return new_blocks + def _stack_arrays(tuples, ref_items, dtype): - from pandas.core.series import Series # fml def _asarray_compat(x): - # asarray shouldn't be called on SparseSeries - if isinstance(x, Series): + if is_series(x): return x.values else: return np.asarray(x) def _shape_compat(x): - # sparseseries - if isinstance(x, Series): + if is_series(x): return len(x), else: return x.shape @@ -1980,7 +2574,6 @@ def _blocks_to_series_dict(blocks, index=None): def _interleaved_dtype(blocks): if not len(blocks): return None - from collections import defaultdict counts = defaultdict(lambda: []) for x in blocks: counts[type(x)].append(x) @@ -1999,6 +2592,7 @@ def _lcd_dtype(l): have_float = len(counts[FloatBlock]) > 0 have_complex = len(counts[ComplexBlock]) > 0 have_dt64 = len(counts[DatetimeBlock]) > 0 + have_sparse = len(counts[SparseBlock]) > 0 have_numeric = have_float or have_complex or have_int if (have_object or @@ -2014,28 +2608,30 @@ def _lcd_dtype(l): elif have_complex: return np.dtype('c16') else: - return _lcd_dtype(counts[FloatBlock]) + return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock]) def _consolidate(blocks, items): """ - Merge blocks having same dtype + Merge blocks having same dtype, exclude non-consolidating blocks """ - get_dtype = lambda x: x.dtype.name - # sort by dtype - grouper = itertools.groupby(sorted(blocks, key=get_dtype), - lambda x: x.dtype) + # sort by _can_consolidate, dtype + gkey = lambda x: x._consolidate_key + grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks = [] - for dtype, group_blocks in grouper: - new_block = _merge_blocks(list(group_blocks), items, dtype) - new_blocks.append(new_block) + for (_can_consolidate, dtype), group_blocks in grouper: + merged_blocks = _merge_blocks(list(group_blocks), items, dtype=dtype, _can_consolidate=_can_consolidate) + if isinstance(merged_blocks, list): + new_blocks.extend(merged_blocks) + else: + new_blocks.append(merged_blocks) return new_blocks -def _merge_blocks(blocks, items, dtype=None): +def _merge_blocks(blocks, items, dtype=None, _can_consolidate = True): if len(blocks) == 1: return blocks[0] @@ -2044,11 +2640,14 @@ def _merge_blocks(blocks, items, dtype=None): raise AssertionError("_merge_blocks are invalid!") dtype = blocks[0].dtype - new_values = _vstack([ b.values for b in blocks ], dtype) - new_items = blocks[0].items.append([b.items for b in blocks[1:]]) - new_block = make_block(new_values, new_items, items) - return new_block.reindex_items_from(items) + if _can_consolidate: + new_values = _vstack([ b.values for b in blocks ], dtype) + new_items = blocks[0].items.append([b.items for b in blocks[1:]]) + new_block = make_block(new_values, new_items, items) + return new_block.reindex_items_from(items) + # no merge + return blocks def _block_shape(values, ndim=1, shape=None): """ guarantee the shape of the values to be at least 1 d """ diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index f841c0dbecd8e..b7ce1c65018da 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -2,7 +2,7 @@ import numpy as np -from pandas.core.common import isnull, notnull +from pandas.core.common import isnull, notnull, _values_from_object import pandas.core.common as com import pandas.core.config as cf import pandas.lib as lib @@ -96,6 +96,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, isfinite=F """ utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy """ + values = _values_from_object(values) if isfinite: mask = _isfinite(values) else: @@ -192,7 +193,7 @@ def get_median(x): mask = notnull(x) if not skipna and not mask.all(): return np.nan - return algos.median(x[mask]) + return algos.median(_values_from_object(x[mask])) if values.dtype != np.float64: values = values.astype('f8') diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 8d6828421c3fa..b656e39d45bd6 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -6,9 +6,8 @@ import operator import sys import numpy as np -from pandas.core.common import (PandasError, _mut_exclusive, - _try_sort, _default_index, - _infer_dtype_from_scalar, +from pandas.core.common import (PandasError, + _try_sort, _default_index, _infer_dtype_from_scalar, notnull) from pandas.core.categorical import Factor from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -166,75 +165,12 @@ class Panel(NDFrame): Copy data from inputs. Only affects DataFrame / 2d ndarray input """ - _AXIS_ORDERS = ['items', 'major_axis', 'minor_axis'] - _AXIS_NUMBERS = dict([(a, i) for i, a in enumerate(_AXIS_ORDERS)]) - _AXIS_ALIASES = { - 'major': 'major_axis', - 'minor': 'minor_axis' - } - _AXIS_NAMES = dict([(i, a) for i, a in enumerate(_AXIS_ORDERS)]) - _AXIS_SLICEMAP = { - 'major_axis': 'index', - 'minor_axis': 'columns' - } - _AXIS_LEN = len(_AXIS_ORDERS) - - # major - _default_stat_axis = 1 - - # info axis - _het_axis = 0 - _info_axis = _AXIS_ORDERS[_het_axis] - - items = lib.AxisProperty(0) - major_axis = lib.AxisProperty(1) - minor_axis = lib.AxisProperty(2) - @property def _constructor(self): return type(self) - # return the type of the slice constructor _constructor_sliced = DataFrame - def _construct_axes_dict(self, axes=None, **kwargs): - """ Return an axes dictionary for myself """ - d = dict([(a, getattr(self, a)) for a in (axes or self._AXIS_ORDERS)]) - d.update(kwargs) - return d - - @staticmethod - def _construct_axes_dict_from(self, axes, **kwargs): - """ Return an axes dictionary for the passed axes """ - d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)]) - d.update(kwargs) - return d - - def _construct_axes_dict_for_slice(self, axes=None, **kwargs): - """ Return an axes dictionary for myself """ - d = dict([(self._AXIS_SLICEMAP[a], getattr(self, a)) - for a in (axes or self._AXIS_ORDERS)]) - d.update(kwargs) - return d - - __add__ = _arith_method(operator.add, '__add__') - __sub__ = _arith_method(operator.sub, '__sub__') - __truediv__ = _arith_method(operator.truediv, '__truediv__') - __floordiv__ = _arith_method(operator.floordiv, '__floordiv__') - __mul__ = _arith_method(operator.mul, '__mul__') - __pow__ = _arith_method(operator.pow, '__pow__') - - __radd__ = _arith_method(operator.add, '__radd__') - __rmul__ = _arith_method(operator.mul, '__rmul__') - __rsub__ = _arith_method(lambda x, y: y - x, '__rsub__') - __rtruediv__ = _arith_method(lambda x, y: y / x, '__rtruediv__') - __rfloordiv__ = _arith_method(lambda x, y: y // x, '__rfloordiv__') - __rpow__ = _arith_method(lambda x, y: y ** x, '__rpow__') - - if not py3compat.PY3: - __div__ = _arith_method(operator.div, '__div__') - __rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__') - def __init__(self, data=None, items=None, major_axis=None, minor_axis=None, copy=False, dtype=None): self._init_data( @@ -269,18 +205,9 @@ def _init_data(self, data, copy, dtype, **kwargs): NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype) - @classmethod - def _from_axes(cls, data, axes): - # for construction from BlockManager - if isinstance(data, BlockManager): - return cls(data) - else: - d = cls._construct_axes_dict_from(cls, axes, copy=False) - return cls(data, **d) - def _init_dict(self, data, axes, dtype=None): from pandas.util.compat import OrderedDict - haxis = axes.pop(self._het_axis) + haxis = axes.pop(self._info_axis_number) # prefilter if haxis passed if haxis is not None: @@ -324,10 +251,6 @@ def _init_dict(self, data, axes, dtype=None): def _init_arrays(self, arrays, arr_names, axes): return create_block_manager_from_arrays(arrays, arr_names, axes) - @property - def shape(self): - return [len(getattr(self, a)) for a in self._AXIS_ORDERS] - @classmethod def from_dict(cls, data, intersect=False, orient='items', dtype=None): """ @@ -367,16 +290,35 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): ks = d['data'].keys() if not isinstance(d['data'],OrderedDict): ks = list(sorted(ks)) - d[cls._info_axis] = Index(ks) + d[cls._info_axis_name] = Index(ks) return cls(**d) + # Comparison methods + __add__ = _arith_method(operator.add, '__add__') + __sub__ = _arith_method(operator.sub, '__sub__') + __truediv__ = _arith_method(operator.truediv, '__truediv__') + __floordiv__ = _arith_method(operator.floordiv, '__floordiv__') + __mul__ = _arith_method(operator.mul, '__mul__') + __pow__ = _arith_method(operator.pow, '__pow__') + + __radd__ = _arith_method(operator.add, '__radd__') + __rmul__ = _arith_method(operator.mul, '__rmul__') + __rsub__ = _arith_method(lambda x, y: y - x, '__rsub__') + __rtruediv__ = _arith_method(lambda x, y: y / x, '__rtruediv__') + __rfloordiv__ = _arith_method(lambda x, y: y // x, '__rfloordiv__') + __rpow__ = _arith_method(lambda x, y: y ** x, '__rpow__') + + if not py3compat.PY3: + __div__ = _arith_method(operator.div, '__div__') + __rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__') + def __getitem__(self, key): - if isinstance(getattr(self, self._info_axis), MultiIndex): + if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) return super(Panel, self).__getitem__(key) def _getitem_multilevel(self, key): - info = getattr(self, self._info_axis) + info = self._info_axis loc = info.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_index = info[loc] @@ -386,7 +328,7 @@ def _getitem_multilevel(self, key): new_values = self.values[slices] d = self._construct_axes_dict(self._AXIS_ORDERS[1:]) - d[self._info_axis] = result_index + d[self._info_axis_name] = result_index result = self._constructor(new_values, **d) return result else: @@ -412,30 +354,16 @@ def _init_matrix(self, data, axes, dtype=None, copy=False): return create_block_manager_from_blocks([ values ], fixed_axes) - #---------------------------------------------------------------------- - # Array interface - - def __array__(self, dtype=None): - return self.values - - def __array_wrap__(self, result): - d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) - return self._constructor(result, **d) - #---------------------------------------------------------------------- # Comparison methods - def _indexed_same(self, other): - return all([getattr(self, a).equals(getattr(other, a)) - for a in self._AXIS_ORDERS]) - def _compare_constructor(self, other, func): if not self._indexed_same(other): raise Exception('Can only compare identically-labeled ' 'same type objects') new_data = {} - for col in getattr(self, self._info_axis): + for col in self._info_axis: new_data[col] = func(self[col], other[col]) d = self._construct_axes_dict(copy=False) @@ -446,12 +374,6 @@ def _compare_constructor(self, other, func): __or__ = _arith_method(operator.or_, '__or__') __xor__ = _arith_method(operator.xor, '__xor__') - def __neg__(self): - return -1 * self - - def __invert__(self): - return -1 * self - # Comparison methods __eq__ = _comp_method(operator.eq, '__eq__') __ne__ = _comp_method(operator.ne, '__ne__') @@ -470,28 +392,6 @@ def __invert__(self): #---------------------------------------------------------------------- # Magic methods - def __str__(self): - """ - Return a string representation for a particular Panel - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular Panel - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular Panel @@ -519,25 +419,6 @@ def axis_pretty(a): [class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS]) return output - def __repr__(self): - """ - Return a string representation for a particular Panel - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) - - def __iter__(self): - return iter(getattr(self, self._info_axis)) - - def iteritems(self): - for h in getattr(self, self._info_axis): - yield h, self[h] - - # Name that won't get automatically converted to items by 2to3. items is - # already in use for the first axis. - iterkv = iteritems - def _get_plane_axes(self, axis): """ Get my plane axes: these are already @@ -558,10 +439,6 @@ def _get_plane_axes(self, axis): return index, columns - def _wrap_array(self, arr, axes, copy=False): - d = self._construct_axes_dict_from(self, axes, copy=copy) - return self._constructor(arr, **d) - fromDict = from_dict def to_sparse(self, fill_value=None, kind='block'): @@ -603,16 +480,10 @@ def to_excel(self, path, na_rep=''): df.to_excel(writer, name, na_rep=na_rep) writer.save() - # TODO: needed? - def keys(self): - return list(getattr(self, self._info_axis)) - - def _get_values(self): + def as_matrix(self): self._consolidate_inplace() return self._data.as_matrix() - values = property(fget=_get_values) - #---------------------------------------------------------------------- # Getting and setting elements @@ -670,7 +541,7 @@ def set_value(self, *args): args = list(args) likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1]) made_bigger = not np.array_equal( - axes[0], getattr(self, self._info_axis)) + axes[0], self._info_axis) # how to make this logic simpler? if made_bigger: com._possibly_cast_item(result, args[0], likely_dtype) @@ -681,14 +552,6 @@ def _box_item_values(self, key, values): d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]) return self._constructor_sliced(values, **d) - def __getattr__(self, name): - """After regular attribute access, try looking up the name of an item. - This allows simpler access to items for interactive use.""" - if name in getattr(self, self._info_axis): - return self[name] - raise AttributeError("'%s' object has no attribute '%s'" % - (type(self).__name__, name)) - def _slice(self, slobj, axis=0, raise_on_error=False): new_data = self._data.get_slice(slobj, axis=axis, @@ -715,35 +578,6 @@ def __setitem__(self, key, value): mat = mat.reshape(tuple([1]) + shape[1:]) NDFrame._set_item(self, key, mat) - def pop(self, item): - """ - Return item slice from panel and delete from panel - - Parameters - ---------- - key : object - Must be contained in panel's items - - Returns - ------- - y : DataFrame - """ - return NDFrame.pop(self, item) - - def __getstate__(self): - "Returned pickled representation of the panel" - return self._data - - def __setstate__(self, state): - # old Panel pickle - if isinstance(state, BlockManager): - self._data = state - elif len(state) == 4: # pragma: no cover - self._unpickle_panel_compat(state) - else: # pragma: no cover - raise ValueError('unrecognized pickle') - self._item_cache = {} - def _unpickle_panel_compat(self, state): # pragma: no cover "Unpickle the panel" _unpickle = com._unpickle_array @@ -776,62 +610,15 @@ def conform(self, frame, axis='items'): axes = self._get_plane_axes(axis) return frame.reindex(**self._extract_axes_for_slice(self, axes)) - def reindex(self, major=None, minor=None, method=None, - major_axis=None, minor_axis=None, copy=True, **kwargs): - """ - Conform panel to new axis or axes - - Parameters - ---------- - major : Index or sequence, default None - Can also use 'major_axis' keyword - items : Index or sequence, default None - minor : Index or sequence, default None - Can also use 'minor_axis' keyword - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed Series - - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - copy : boolean, default True - Return a new object, even if the passed indexes are the same - - Returns - ------- - Panel (new object) - """ - result = self - - major = _mut_exclusive(major, major_axis) - minor = _mut_exclusive(minor, minor_axis) - al = self._AXIS_LEN - + def _needs_reindex_multi(self, axes, method, level): # only allowing multi-index on Panel (and not > dims) - if (method is None and not self._is_mixed_type and al <= 3): - items = kwargs.get('items') - if com._count_not_none(items, major, minor) == 3: - try: - return self._reindex_multi(items, major, minor) - except: - pass - - if major is not None: - result = result._reindex_axis(major, method, al - 2, copy) - - if minor is not None: - result = result._reindex_axis(minor, method, al - 1, copy) - - for i, a in enumerate(self._AXIS_ORDERS[0:al - 2]): - a = kwargs.get(a) - if a is not None: - result = result._reindex_axis(a, method, i, copy) - - if result is self and copy: - raise ValueError('Must specify at least one axis') + return method is None and not self._is_mixed_type and self._AXIS_LEN <= 3 and com._count_not_none(*axes.values()) == 3 - return result - - def _reindex_multi(self, items, major, minor): + def _reindex_multi(self, axes, copy, fill_value): + """ we are guaranteed non-Nones in the axes! """ + items = axes['items'] + major = axes['major_axis'] + minor = axes['minor_axis'] a0, a1, a2 = len(items), len(major), len(minor) values = self.values @@ -857,52 +644,6 @@ def _reindex_multi(self, items, major, minor): return Panel(new_values, items=new_items, major_axis=new_major, minor_axis=new_minor) - def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True): - """Conform Panel to new index with optional filling logic, placing - NA/NaN in locations having no value in the previous index. A new object - is produced unless the new index is equivalent to the current one and - copy=False - - Parameters - ---------- - index : array-like, optional - New labels / index to conform to. Preferably an Index object to - avoid duplicating data - axis : {0, 1} - 0 -> index (rows) - 1 -> columns - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed DataFrame - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - copy : boolean, default True - Return a new object, even if the passed indexes are the same - level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - - Returns - ------- - reindexed : Panel - """ - self._consolidate_inplace() - return self._reindex_axis(labels, method, axis, copy) - - def reindex_like(self, other, method=None): - """ return an object with matching indicies to myself - - Parameters - ---------- - other : Panel - method : string or None - - Returns - ------- - reindexed : Panel - """ - d = other._construct_axes_dict(method=method) - return self.reindex(**d) - def dropna(self, axis=0, how='any'): """ Drop 2D from panel, holding passed axis constant @@ -925,7 +666,7 @@ def dropna(self, axis=0, how='any'): values = self.values mask = com.notnull(values) - for ax in reversed(sorted(set(range(3)) - set([axis]))): + for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))): mask = mask.sum(ax) per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:]) @@ -1120,76 +861,6 @@ def groupby(self, function, axis='major'): axis = self._get_axis_number(axis) return PanelGroupBy(self, function, axis=axis) - def swapaxes(self, axis1='major', axis2='minor', copy=True): - """ - Interchange axes and swap values axes appropriately - - Returns - ------- - y : Panel (new object) - """ - i = self._get_axis_number(axis1) - j = self._get_axis_number(axis2) - - if i == j: - raise ValueError('Cannot specify the same axis') - - mapping = {i: j, j: i} - - new_axes = (self._get_axis(mapping.get(k, k)) - for k in range(self._AXIS_LEN)) - new_values = self.values.swapaxes(i, j) - if copy: - new_values = new_values.copy() - - return self._constructor(new_values, *new_axes) - - def transpose(self, *args, **kwargs): - """ - Permute the dimensions of the Panel - - Parameters - ---------- - items : int or one of {'items', 'major', 'minor'} - major : int or one of {'items', 'major', 'minor'} - minor : int or one of {'items', 'major', 'minor'} - copy : boolean, default False - Make a copy of the underlying data. Mixed-dtype data will - always result in a copy - - Examples - -------- - >>> p.transpose(2, 0, 1) - >>> p.transpose(2, 0, 1, copy=True) - - Returns - ------- - y : Panel (new object) - """ - - # construct the args - args = list(args) - for a in self._AXIS_ORDERS: - if not a in kwargs: - try: - kwargs[a] = args.pop(0) - except (IndexError): - raise ValueError( - "not enough arguments specified to transpose!") - - axes = [self._get_axis_number(kwargs[a]) for a in self._AXIS_ORDERS] - - # we must have unique axes - if len(axes) != len(set(axes)): - raise ValueError('Must specify %s unique axes' % self._AXIS_LEN) - - new_axes = self._construct_axes_dict_from( - self, [self._get_axis(x) for x in axes]) - new_values = self.values.transpose(tuple(axes)) - if kwargs.get('copy') or (len(args) and args[-1]): - new_values = new_values.copy() - return self._constructor(new_values, **new_axes) - def to_frame(self, filter_observations=True): """ Transform wide format into long (stacked) format as DataFrame @@ -1237,21 +908,6 @@ def to_frame(self, filter_observations=True): to_long = deprecate('to_long', to_frame) toLong = deprecate('toLong', to_frame) - def filter(self, items): - """ - Restrict items in panel to input list - - Parameters - ---------- - items : sequence - - Returns - ------- - y : Panel - """ - intersection = self.items.intersection(items) - return self.reindex(items=intersection) - def apply(self, func, axis='major'): """ Apply @@ -1280,7 +936,7 @@ def _reduce(self, op, axis=0, skipna=True): result = f(self.values) axes = self._get_plane_axes(axis_name) - if result.ndim == 2 and axis_name != self._info_axis: + if result.ndim == 2 and axis_name != self._info_axis_name: result = result.T return self._constructor_sliced(result, @@ -1289,7 +945,7 @@ def _reduce(self, op, axis=0, skipna=True): def _wrap_result(self, result, axis): axis = self._get_axis_name(axis) axes = self._get_plane_axes(axis) - if result.ndim == 2 and axis != self._info_axis: + if result.ndim == 2 and axis != self._info_axis_name: result = result.T # do we have reduced dimensionalility? @@ -1460,9 +1116,9 @@ def update(self, other, join='left', overwrite=True, filter_func=None, if not isinstance(other, self._constructor): other = self._constructor(other) - axis = self._info_axis - axis_values = getattr(self, axis) - other = other.reindex(**{axis: axis_values}) + axis_name = self._info_axis_name + axis_values = self._info_axis + other = other.reindex(**{axis_name: axis_values}) for frame in axis_values: self[frame].update(other[frame], join, overwrite, filter_func, @@ -1709,16 +1365,19 @@ def min(self, axis='major', skipna=True): return self._reduce(nanops.nanmin, axis=axis, skipna=skipna) cls.min = min +Panel._setup_axes(axes = ['items', 'major_axis', 'minor_axis'], + info_axis = 0, + stat_axis = 1, + aliases = { 'major': 'major_axis', + 'minor': 'minor_axis' }, + slicers = { 'major_axis': 'index', + 'minor_axis': 'columns' }) Panel._add_aggregate_operations() WidePanel = Panel LongPanel = DataFrame -def _monotonic(arr): - return not (arr[1:] < arr[:-1]).any() - - def install_ipython_completers(): # pragma: no cover """Register the Panel type with IPython's tab completion machinery, so that it knows about accessing column names as attributes.""" diff --git a/pandas/core/panel4d.py b/pandas/core/panel4d.py index 4113832f086fb..e46b7aa315669 100644 --- a/pandas/core/panel4d.py +++ b/pandas/core/panel4d.py @@ -1,44 +1,42 @@ -""" Panel4D: a 4-d dict like collection of panels """ - -from pandas.core.panelnd import create_nd_panel_factory -from pandas.core.panel import Panel - -Panel4D = create_nd_panel_factory( - klass_name='Panel4D', - axis_orders=['labels', 'items', 'major_axis', 'minor_axis'], - axis_slices={'labels': 'labels', 'items': 'items', - 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, - slicer=Panel, - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, - stat_axis=2, - ns=dict(__doc__= """ - Represents a 4 dimensonal structured - - Parameters - ---------- - data : ndarray (labels x items x major x minor), or dict of Panels - - labels : Index or array-like : axis=0 - items : Index or array-like : axis=1 - major_axis : Index or array-like: axis=2 - minor_axis : Index or array-like: axis=3 - - dtype : dtype, default None - Data type to force, otherwise infer - copy : boolean, default False - Copy data from inputs. Only affects DataFrame / 2d ndarray input - """ - - ) - ) - - -def panel4d_init(self, data=None, labels=None, items=None, major_axis=None, - minor_axis=None, copy=False, dtype=None): - - self._init_data(data=data, labels=labels, items=items, - major_axis=major_axis, minor_axis=minor_axis, - copy=copy, dtype=dtype) - -Panel4D.__init__ = panel4d_init +""" Panel4D: a 4-d dict like collection of panels """ + +from pandas.core.panelnd import create_nd_panel_factory +from pandas.core.panel import Panel + +Panel4D = create_nd_panel_factory( + klass_name = 'Panel4D', + orders = ['labels', 'items', 'major_axis', 'minor_axis'], + slices = {'labels': 'labels', 'items': 'items', + 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, + slicer = Panel, + aliases = {'major': 'major_axis', 'minor': 'minor_axis'}, + stat_axis = 2, + ns = dict(__doc__= """ + Represents a 4 dimensonal structured + + Parameters + ---------- + data : ndarray (labels x items x major x minor), or dict of Panels + + labels : Index or array-like : axis=0 + items : Index or array-like : axis=1 + major_axis : Index or array-like: axis=2 + minor_axis : Index or array-like: axis=3 + + dtype : dtype, default None + Data type to force, otherwise infer + copy : boolean, default False + Copy data from inputs. Only affects DataFrame / 2d ndarray input + """ + ) + ) + +def panel4d_init(self, data=None, labels=None, items=None, major_axis=None, + minor_axis=None, copy=False, dtype=None): + + self._init_data(data=data, labels=labels, items=items, + major_axis=major_axis, minor_axis=minor_axis, + copy=copy, dtype=dtype) + +Panel4D.__init__ = panel4d_init diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py index 08ff3b70dcb13..eb15b7e56e451 100644 --- a/pandas/core/panelnd.py +++ b/pandas/core/panelnd.py @@ -3,20 +3,21 @@ import pandas.lib as lib -def create_nd_panel_factory(klass_name, axis_orders, axis_slices, slicer, axis_aliases=None, stat_axis=2,ns=None): + +def create_nd_panel_factory(klass_name, orders, slices, slicer, aliases=None, stat_axis=2, info_axis=0, ns=None): """ manufacture a n-d class: parameters ---------- - klass_name : the klass name - axis_orders : the names of the axes in order (highest to lowest) - axis_slices : a dictionary that defines how the axes map to the sliced axis - slicer : the class representing a slice of this panel - axis_aliases: a dictionary defining aliases for various axes + klass_name : the klass name + orders : the names of the axes in order (highest to lowest) + slices : a dictionary that defines how the axes map to the sliced axis + slicer : the class representing a slice of this panel + aliases : a dictionary defining aliases for various axes default = { major : major_axis, minor : minor_axis } - stat_axis : the default statistic axis + stat_axis : the default statistic axis default = 2 - het_axis : the info axis + info_axis : the info axis returns @@ -38,23 +39,15 @@ def create_nd_panel_factory(klass_name, axis_orders, axis_slices, slicer, axis_a ns = {} if not ns else ns klass = type(klass_name, (slicer,), ns) - # add the class variables - klass._AXIS_ORDERS = axis_orders - klass._AXIS_NUMBERS = dict([(a, i) for i, a in enumerate(axis_orders)]) - klass._AXIS_ALIASES = axis_aliases or dict() - klass._AXIS_NAMES = dict([(i, a) for i, a in enumerate(axis_orders)]) - klass._AXIS_SLICEMAP = axis_slices - klass._AXIS_LEN = len(axis_orders) - klass._default_stat_axis = stat_axis - klass._het_axis = 0 - klass._info_axis = axis_orders[klass._het_axis] + # setup the axes + klass._setup_axes(axes = orders, + info_axis = info_axis, + stat_axis = stat_axis, + aliases = aliases, + slicers = slices) klass._constructor_sliced = slicer - # add the axes - for i, a in enumerate(axis_orders): - setattr(klass, a, lib.AxisProperty(i)) - #### define the methods #### def __init__(self, *args, **kwargs): if not (kwargs.get('data') or len(args)): diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 8595e2a91906d..00f2d39a19d5c 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -295,7 +295,7 @@ def pivot(self, index=None, columns=None, values=None): return indexed.unstack(columns) else: indexed = Series(self[values].values, - index=[self[index], self[columns]]) + index=MultiIndex.from_arrays([self[index], self[columns]])) return indexed.unstack(columns) diff --git a/pandas/core/series.py b/pandas/core/series.py index 7d9303fa75acd..f82b5b9620d0d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -17,10 +17,14 @@ from pandas.core.common import (isnull, notnull, _is_bool_indexer, _default_index, _maybe_promote, _maybe_upcast, _asarray_tuplesafe, is_integer_dtype, - _infer_dtype_from_scalar, is_list_like) + _infer_dtype_from_scalar, is_list_like, _values_from_object, + is_sparse_array_like) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, _ensure_index, _handle_legacy_indexes) -from pandas.core.indexing import _SeriesIndexer, _check_bool_indexer, _check_slice_bounds +from pandas.core.indexing import (_SeriesIndexer, _check_bool_indexer, _check_slice_bounds, + _is_index_slice, _maybe_convert_indices) +from pandas.core import generic +from pandas.core.internals import SingleBlockManager from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex, Period from pandas.util import py3compat @@ -31,7 +35,6 @@ import pandas.core.common as com import pandas.core.datetools as datetools import pandas.core.format as fmt -import pandas.core.generic as generic import pandas.core.nanops as nanops from pandas.util.decorators import Appender, Substitution, cache_readonly @@ -42,7 +45,7 @@ from pandas.compat.scipy import scoreatpercentile as _quantile from pandas.core.config import get_option -__all__ = ['Series', 'TimeSeries'] +__all__ = ['Series'] _np_version = np.version.short_version _np_version_under1p6 = LooseVersion(_np_version) < '1.6' @@ -64,7 +67,7 @@ def na_op(x, y): result = op(x, y) except TypeError: result = pa.empty(len(x), dtype=x.dtype) - if isinstance(y, pa.Array): + if isinstance(y, (pa.Array,Series)): mask = notnull(x) & notnull(y) result[mask] = op(x[mask], y[mask]) else: @@ -93,14 +96,14 @@ def convert_to_array(values): values = np.array([values]) inferred_type = lib.infer_dtype(values) if inferred_type in set(['datetime64','datetime','date','time']): - if isinstance(values, pa.Array) and com.is_datetime64_dtype(values): + if isinstance(values, (pa.Array, Series)) and com.is_datetime64_dtype(values): pass else: values = tslib.array_to_datetime(values) elif inferred_type in set(['timedelta','timedelta64']): # need to convert timedelta to ns here # safest to convert it to an object arrany to process - if isinstance(values, pa.Array) and com.is_timedelta64_dtype(values): + if isinstance(values, (pa.Array, Series)) and com.is_timedelta64_dtype(values): pass else: values = com._possibly_cast_to_timedelta(values) @@ -151,8 +154,8 @@ def wrap_results(x): if self.index.equals(other.index): name = _maybe_match_name(self, other) - return Series(wrap_results(na_op(lvalues, rvalues)), - index=self.index, name=name, dtype=dtype) + return self._constructor(wrap_results(na_op(lvalues, rvalues)), + index=self.index, dtype=dtype, name=name) join_idx, lidx, ridx = self.index.join(other.index, how='outer', return_indexers=True) @@ -166,19 +169,19 @@ def wrap_results(x): arr = na_op(lvalues, rvalues) name = _maybe_match_name(self, other) - return Series(wrap_results(arr), index=join_idx, name=name,dtype=dtype) + return self._constructor(wrap_results(arr), index=join_idx, name=name,dtype=dtype) elif isinstance(other, DataFrame): return NotImplemented else: # scalars if hasattr(lvalues,'values'): lvalues = lvalues.values - return Series(wrap_results(na_op(lvalues, rvalues)), + return self._constructor(wrap_results(na_op(lvalues, rvalues)), index=self.index, name=self.name, dtype=dtype) return wrapper -def _comp_method(op, name): +def _comp_method(op, name, masker = False): """ Wrapper function for Series arithmetic operations, to avoid code duplication. @@ -188,7 +191,7 @@ def na_op(x, y): if isinstance(y, list): y = lib.list_to_object_array(y) - if isinstance(y, pa.Array): + if isinstance(y, (pa.Array,Series)): if y.dtype != np.object_: result = lib.vec_compare(x, y.astype(np.object_), op) else: @@ -207,16 +210,19 @@ def wrapper(self, other): name = _maybe_match_name(self, other) if len(self) != len(other): raise ValueError('Series lengths must match to compare') - return Series(na_op(self.values, other.values), + return self._constructor(na_op(self.values, other.values), index=self.index, name=name) elif isinstance(other, DataFrame): # pragma: no cover return NotImplemented - elif isinstance(other, pa.Array): + elif isinstance(other, (pa.Array,Series)): if len(self) != len(other): raise ValueError('Lengths must match to compare') - return Series(na_op(self.values, np.asarray(other)), + return self._constructor(na_op(self.values, np.asarray(other)), index=self.index, name=self.name) else: + + mask = isnull(self) + values = self.values other = _index.convert_scalar(values, other) @@ -228,8 +234,17 @@ def wrapper(self, other): if np.isscalar(res): raise TypeError('Could not compare %s type with Series' % type(other)) - return Series(na_op(values, other), - index=self.index, name=self.name) + + # always return a full value series here + res = _values_from_object(res) + + res = Series(res, index=self.index, name=self.name, dtype='bool') + + # mask out the invalids + if mask.any(): + res[mask.values] = masker + + return res return wrapper @@ -245,7 +260,7 @@ def na_op(x, y): if isinstance(y, list): y = lib.list_to_object_array(y) - if isinstance(y, pa.Array): + if isinstance(y, (pa.Array,Series)): if (x.dtype == np.bool_ and y.dtype == np.bool_): # pragma: no cover result = op(x, y) # when would this be hit? @@ -263,13 +278,13 @@ def wrapper(self, other): if isinstance(other, Series): name = _maybe_match_name(self, other) - return Series(na_op(self.values, other.values), + return self._constructor(na_op(self.values, other.values), index=self.index, name=name) elif isinstance(other, DataFrame): return NotImplemented else: # scalars - return Series(na_op(self.values, other), + return self._constructor(na_op(self.values, other), index=self.index, name=self.name) return wrapper @@ -322,13 +337,13 @@ def _flex_method(op, name): def f(self, other, level=None, fill_value=None): if isinstance(other, Series): return self._binop(other, op, level=level, fill_value=fill_value) - elif isinstance(other, (pa.Array, list, tuple)): + elif isinstance(other, (pa.Array, Series, list, tuple)): if len(other) != len(self): raise ValueError('Lengths must be equal') - return self._binop(Series(other, self.index), op, + return self._binop(self._constructor(other, self.index), op, level=level, fill_value=fill_value) else: - return Series(op(self.values, other), self.index, + return self._constructor(op(self.values, other), self.index, name=self.name) f.__name__ = name @@ -338,8 +353,8 @@ def f(self, other, level=None, fill_value=None): def _unbox(func): @Appender(func.__doc__) def f(self, *args, **kwargs): - result = func(self, *args, **kwargs) - if isinstance(result, pa.Array) and result.ndim == 0: + result = func(self.values, *args, **kwargs) + if isinstance(result, (pa.Array, Series)) and result.ndim == 0: # return NumPy type return result.dtype.type(result.item()) else: # pragma: no cover @@ -377,200 +392,306 @@ def _make_stat_func(nanop, name, shortname, na_action=_doc_exclude_na, def f(self, axis=0, dtype=None, out=None, skipna=True, level=None): if level is not None: return self._agg_by_level(shortname, level=level, skipna=skipna) - return nanop(self.values, skipna=skipna) + return nanop(_values_from_object(self), skipna=skipna) f.__name__ = shortname return f #---------------------------------------------------------------------- # Series class +class Series(generic.NDFrame): + """One-dimensional ndarray with axis labels (including time +series). Labels need not be unique but must be any hashable type. The object +supports both integer- and label-based indexing and provides a host of methods +for performing operations involving the index. Statistical methods from ndarray +have been overridden to automatically exclude missing data (currently +represented as NaN) -class Series(pa.Array, generic.PandasObject): - """ - One-dimensional ndarray with axis labels (including time series). - Labels need not be unique but must be any hashable type. The object - supports both integer- and label-based indexing and provides a host of - methods for performing operations involving the index. Statistical - methods from ndarray have been overridden to automatically exclude - missing data (currently represented as NaN) +Operations between Series (+, -, /, *, **) align values based on their +associated index values-- they need not be the same length. The result +index will be the sorted union of the two indexes. + +Parameters +---------- +data : array-like, dict, or scalar value + Contains data stored in Series +index : array-like or Index (1d) + + Values must be unique and hashable, same length as data. Index object + (or other iterable of same length as data) Will default to + np.arange(len(data)) if not provided. If both a dict and index sequence + are used, the index will override the keys found in the dict. +""" - Operations between Series (+, -, /, *, **) align values based on their - associated index values-- they need not be the same length. The result - index will be the sorted union of the two indexes. + def __init__(self, data=None, index=None, dtype=None, name=None, + copy=False, fastpath=False): - Parameters - ---------- - data : array-like, dict, or scalar value - Contains data stored in Series - index : array-like or Index (1d) - Values must be unique and hashable, same length as data. Index - object (or other iterable of same length as data) Will default to - np.arange(len(data)) if not provided. If both a dict and index - sequence are used, the index will override the keys found in the - dict. - dtype : numpy.dtype or None - If None, dtype will be inferred copy : boolean, default False Copy - input data - copy : boolean, default False - """ - _AXIS_NUMBERS = { - 'index': 0 - } + # we are called internally, so short-circuit + if fastpath: - _AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems()) + # data is an ndarray, index is defined + if not isinstance(data, SingleBlockManager): + data = SingleBlockManager(data, index, fastpath=True) + if copy: + data = data.copy() + if index is None: + index = data.index - def __new__(cls, data=None, index=None, dtype=None, name=None, - copy=False): - if data is None: - data = {} + else: - if index is not None: - index = _ensure_index(index) + if data is None: + data = {} - if isinstance(data, Series): - if name is None: - name = data.name + if index is not None: + index = _ensure_index(index) - if index is None: - index = data.index - else: - data = data.reindex(index).values - elif isinstance(data, dict): - if index is None: - from pandas.util.compat import OrderedDict - if isinstance(data, OrderedDict): - index = Index(data) + if isinstance(data, pa.Array): + pass + elif isinstance(data, Series): + if name is None: + name = data.name + if index is None: + index = data.index else: - index = Index(sorted(data)) - try: - if isinstance(index, DatetimeIndex): - # coerce back to datetime objects for lookup - data = lib.fast_multiget(data, index.astype('O'), - default=pa.NA) - elif isinstance(index, PeriodIndex): + data = data.reindex(index, copy=copy) + data = data._data + elif isinstance(data, dict): + if index is None: + from pandas.util.compat import OrderedDict + if isinstance(data, OrderedDict): + index = Index(data) + else: + index = Index(sorted(data)) + try: + if isinstance(index, DatetimeIndex): + # coerce back to datetime objects for lookup + data = lib.fast_multiget(data, index.astype('O'), + default=pa.NA) + elif isinstance(index, PeriodIndex): + data = [data.get(i, nan) for i in index] + else: + data = lib.fast_multiget(data, index.values, + default=pa.NA) + except TypeError: data = [data.get(i, nan) for i in index] + + elif isinstance(data, SingleBlockManager): + if index is None: + index = data.index else: - data = lib.fast_multiget(data, index.values, - default=pa.NA) - except TypeError: - data = [data.get(i, nan) for i in index] - elif isinstance(data, types.GeneratorType): - data = list(data) - elif isinstance(data, set): - raise TypeError('Set value is unordered') + data = data.reindex(index, copy=copy) + elif isinstance(data, types.GeneratorType): + data = list(data) + elif isinstance(data, set): + raise TypeError('Set value is unordered') + else: - if dtype is not None: - dtype = np.dtype(dtype) + # handle sparse passed here (and force conversion) + if is_sparse_array_like(data): + data = data.to_dense() - subarr = _sanitize_array(data, index, dtype, copy, - raise_cast_failure=True) + if index is None: + index = _default_index(len(data)) - if not isinstance(subarr, pa.Array): - return subarr + # create/copy the manager + if isinstance(data, SingleBlockManager): + if dtype is not None: + data = data.astype(dtype,copy=copy) + elif copy: + data = data.copy() + else: + data = _sanitize_array(data, index, dtype, copy, + raise_cast_failure=True) - if index is None: - index = _default_index(len(subarr)) + data = SingleBlockManager(data, index, fastpath=True) - # Change the class of the array to be the subclass type. - if index.is_all_dates: - if not isinstance(index, (DatetimeIndex, PeriodIndex)): - index = DatetimeIndex(index) - subarr = subarr.view(TimeSeries) - else: - subarr = subarr.view(Series) - subarr.index = index - subarr.name = name - return subarr + generic.NDFrame.__init__(self, data, fastpath=True) - def _make_time_series(self): - # oh boy #2139 - self.__class__ = TimeSeries + object.__setattr__(self,'name',name) + self._set_axis(0,index,fastpath=True) @classmethod - def from_array(cls, arr, index=None, name=None, copy=False): - """ - Simplified alternate constructor - """ - if copy: - arr = arr.copy() - - klass = Series - if index.is_all_dates: - if not isinstance(index, (DatetimeIndex, PeriodIndex)): - index = DatetimeIndex(index) - klass = TimeSeries + def from_array(cls, arr, index=None, name=None, copy=False, fastpath=False): - result = arr.view(klass) - result.index = index - result.name = name + # return a sparse series here + if is_sparse_array_like(arr): + from pandas.sparse.series import SparseSeries + cls = SparseSeries - return result - - def __init__(self, data=None, index=None, dtype=None, name=None, - copy=False): - pass + return cls(arr, index=index, name=name, copy=copy, fastpath=fastpath) @property def _constructor(self): return Series + # types @property def _can_hold_na(self): - return not is_integer_dtype(self.dtype) + return self._data._can_hold_na - def __hash__(self): - raise TypeError('unhashable type') + @property + def is_time_series(self): + return self._subtyp in ['time_series','sparse_time_series'] _index = None - index = lib.SeriesIndex() - def __array_finalize__(self, obj): + def _set_axis(self, axis, labels, fastpath=False): + """ override generic, we want to set the _typ here """ + + if not fastpath: + labels = _ensure_index(labels) + + is_all_dates = labels.is_all_dates + if is_all_dates: + from pandas.tseries.index import DatetimeIndex + from pandas.tseries.period import PeriodIndex + if not isinstance(labels, (DatetimeIndex, PeriodIndex)): + labels = DatetimeIndex(labels) + + # need to set here becuase we changed the index + if fastpath: + self._data.set_axis(axis, labels) + self._set_subtyp(is_all_dates) + + object.__setattr__(self,'_index',labels) + if not fastpath: + self._data.set_axis(axis, labels) + + def _set_subtyp(self, is_all_dates): + if is_all_dates: + object.__setattr__(self,'_subtyp','time_series') + else: + object.__setattr__(self,'_subtyp','series') + + # ndarray compatibility + @property + def flags(self): + return self.values.flags + + @property + def dtype(self): + return self._data.dtype + + @property + def ftype(self): + return self._data.ftype + + @property + def shape(self): + return self._data.shape + + @property + def ndim(self): + return 1 + + @property + def base(self): + return self.values.base + + def ravel(self): + return self.values.ravel() + + def transpose(self): + """ support for compatiblity """ + return self + + T = property(transpose) + + def nonzero(self): + """ numpy like, returns same as nonzero """ + return self.values.nonzero() + + def put(self, *args, **kwargs): + self.values.put(*args, **kwargs) + + def __len__(self): + return len(self._data) + + @property + def size(self): + return self.__len__() + + def view(self, dtype = None): + return self._constructor(self.values.view(dtype),index=self.index,name=self.name) + + def __array__(self, result = None): + """ the array interface, return my values """ + return self.values + + def __array_wrap__(self, result): """ - Gets called after any ufunc or other array operations, necessary - to pass on the index. + Gets called prior to a ufunc (and after) """ - self._index = getattr(obj, '_index', None) - self.name = getattr(obj, 'name', None) + return self._constructor(result, index=self.index, name=self.name, copy=False) def __contains__(self, key): return key in self.index - def __reduce__(self): - """Necessary for making this object picklable""" - object_state = list(ndarray.__reduce__(self)) - subclass_state = (self.index, self.name) - object_state[2] = (object_state[2], subclass_state) - return tuple(object_state) - - def __setstate__(self, state): - """Necessary for making this object picklable""" - nd_state, own_state = state - ndarray.__setstate__(self, nd_state) + # we are preserving name here + def __getstate__(self): + return dict(_data = self._data, name = self.name) + + def _unpickle_series_compat(self, state): + if isinstance(state, dict): + self._data = state['_data'] + self.name = state['name'] + self.index = self._data.index + + elif isinstance(state, tuple): + + # < 0.12 series pickle + + nd_state, own_state = state + + # recreate the ndarray + data = np.empty(nd_state[1],dtype=nd_state[2]) + np.ndarray.__setstate__(data, nd_state) + + # backwards compat + index, name = own_state[0], None + if len(own_state) > 1: + name = own_state[1] + index = _handle_legacy_indexes([index])[0] + + # recreate + self._data = SingleBlockManager(data, index, fastpath=True) + self.index = index + self.name = name - # backwards compat - index, name = own_state[0], None - if len(own_state) > 1: - name = own_state[1] - - self.index = _handle_legacy_indexes([index])[0] - self.name = name + else: + raise Exception("cannot unpickle legacy formats -> [%s]" % state) # indexers @property def axes(self): return [ self.index ] - @property - def ix(self): - if self._ix is None: - self._ix = _SeriesIndexer(self, 'ix') + def _maybe_box(self, values): + """ genericically box the values """ + + if isinstance(values,self.__class__): + return values + elif not hasattr(values,'__iter__'): + v = lib.infer_dtype([values]) + if v == 'datetime': + return lib.Timestamp(v) + return values - return self._ix + v = lib.infer_dtype(values) + if v == 'datetime': + return lib.map_infer(values, lib.Timestamp) + + if isinstance(values,np.ndarray): + return self.__class__(values) + + return values def _xs(self, key, axis=0, level=None, copy=True): return self.__getitem__(key) + xs = _xs + def _ixs(self, i, axis=0): """ Return the i-th value or values in the Series by location @@ -584,7 +705,7 @@ def _ixs(self, i, axis=0): value : scalar (int) or Series (slice, sequence) """ try: - return _index.get_value_at(self, i) + return _index.get_value_at(self.values, i) except IndexError: raise except: @@ -606,19 +727,22 @@ def _slice(self, slobj, axis=0, raise_on_error=False): if raise_on_error: _check_slice_bounds(slobj, self.values) - return self._constructor(self.values[slobj], index=self.index[slobj]) + return self._constructor(self.values[slobj], index=self.index[slobj], + name=self.name) def __getitem__(self, key): try: return self.index.get_value(self, key) except InvalidIndexError: pass - except KeyError: + except (KeyError,ValueError): if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # kludge pass elif key is Ellipsis: return self + elif _is_bool_indexer(key): + pass else: raise except Exception: @@ -627,9 +751,6 @@ def __getitem__(self, key): if com.is_iterator(key): key = list(key) - # boolean - # special handling of boolean data with NAs stored in object - # arrays. Since we can't represent NA with dtype=bool if _is_bool_indexer(key): key = _check_bool_indexer(self.index, key) @@ -638,7 +759,6 @@ def __getitem__(self, key): def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): - from pandas.core.indexing import _is_index_slice idx_type = self.index.inferred_type if idx_type == 'floating': @@ -659,7 +779,7 @@ def _get_with(self, key): return self._get_values(key) raise - if not isinstance(key, (list, pa.Array)): # pragma: no cover + if not isinstance(key, (list, pa.Array, Series)): # pragma: no cover key = list(key) if isinstance(key, Index): @@ -695,121 +815,21 @@ def _get_values_tuple(self, key): # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) - return Series(self.values[indexer], index=new_index, name=self.name) + return self._constructor(self.values[indexer], index=new_index, name=self.name) def _get_values(self, indexer): try: - return Series(self.values[indexer], index=self.index[indexer], - name=self.name) + return self._constructor(self._data.get_slice(indexer), + name=self.name,fastpath=True) except Exception: return self.values[indexer] - def get_dtype_counts(self): - return Series({ self.dtype.name : 1 }) - - def where(self, cond, other=nan, inplace=False): - """ - Return a Series where cond is True; otherwise values are from other - - Parameters - ---------- - cond: boolean Series or array - other: scalar or Series - - Returns - ------- - wh: Series - """ - if isinstance(cond, Series): - cond = cond.reindex(self.index, fill_value=True) - if not hasattr(cond, 'shape'): - raise ValueError('where requires an ndarray like object for its ' - 'condition') - if len(cond) != len(self): - raise ValueError('condition must have same length as series') - - if cond.dtype != np.bool_: - cond = cond.astype(np.bool_) - - ser = self if inplace else self.copy() - if not isinstance(other, (list, tuple, pa.Array)): - ser._set_with(~cond, other) - return None if inplace else ser - - if isinstance(other, Series): - other = other.reindex(ser.index) - elif isinstance(other, (tuple,list)): - - # try to set the same dtype as ourselves - new_other = np.array(other,dtype=self.dtype) - if not (new_other == np.array(other)).all(): - other = np.array(other) - else: - other = new_other - - if len(other) != len(ser): - icond = ~cond - - # GH 2745 - # treat like a scalar - if len(other) == 1: - other = np.array(other[0]*len(ser)) - - # GH 3235 - # match True cond to other - elif len(icond[icond]) == len(other): - dtype, fill_value = _maybe_promote(other.dtype) - new_other = np.empty(len(cond),dtype=dtype) - new_other.fill(fill_value) - new_other[icond] = other - other = new_other - - else: - raise ValueError('Length of replacements must equal series length') - - change = ser if inplace else None - com._maybe_upcast_putmask(ser,~cond,other,change=change) - - return None if inplace else ser - - def mask(self, cond): - """ - Returns copy of self whose values are replaced with nan if the - inverted condition is True - - Parameters - ---------- - cond: boolean Series or array - - Returns - ------- - wh: Series - """ - return self.where(~cond, nan) - - def abs(self): - """ - Return an object with absolute value taken. Only applicable to objects - that are all numeric - - Returns - ------- - abs: type of caller - """ - obj = np.abs(self) - obj = com._possibly_cast_to_timedelta(obj, coerce=False) - return obj - def __setitem__(self, key, value): try: - try: - self.index._engine.set_value(self, key, value) - return - except KeyError: - values = self.values - values[self.index.get_loc(key)] = value - return - except KeyError: + self._set_with_engine(key, value) + return + except (KeyError,ValueError): + values = self.values if (com.is_integer(key) and not self.index.inferred_type == 'integer'): @@ -818,36 +838,45 @@ def __setitem__(self, key, value): elif key is Ellipsis: self[:] = value return - - raise KeyError('%s not in this series!' % str(key)) - except TypeError, e: - # python 3 type errors should be raised - if 'unorderable' in str(e): # pragma: no cover - raise IndexError(key) - # Could not hash item - except ValueError: - - # reassign a null value to iNaT - if com.is_timedelta64_dtype(self.dtype): + elif _is_bool_indexer(key): + pass + elif com.is_timedelta64_dtype(self.dtype): + # reassign a null value to iNaT if isnull(value): value = tslib.iNaT try: - self.index._engine.set_value(self, key, value) + self.index._engine.set_value(self.values, key, value) return except (TypeError): pass + raise KeyError('%s not in this series!' % str(key)) + + except TypeError, e: + # python 3 type errors should be raised + if 'unorderable' in str(e): # pragma: no cover + raise IndexError(key) + # Could not hash item + if _is_bool_indexer(key): key = _check_bool_indexer(self.index, key) self.where(~key,value,inplace=True) else: self._set_with(key, value) + def _set_with_engine(self, key, value): + values = self.values + try: + self.index._engine.set_value(values, key, value) + return + except KeyError: + values[self.index.get_loc(key)] = value + return + def _set_with(self, key, value): # other: fancy integer or otherwise if isinstance(key, slice): - from pandas.core.indexing import _is_index_slice if self.index.inferred_type == 'integer' or _is_index_slice(key): indexer = key else: @@ -860,7 +889,7 @@ def _set_with(self, key, value): except Exception: pass - if not isinstance(key, (list, pa.Array)): + if not isinstance(key, (list, Series, pa.Array, Series)): key = list(key) if isinstance(key, Index): @@ -874,7 +903,7 @@ def _set_with(self, key, value): else: return self._set_values(key, value) elif key_type == 'boolean': - self._set_values(key, value) + self._set_values(key.astype(np.bool_), value) else: self._set_labels(key, value) @@ -892,6 +921,8 @@ def _set_labels(self, key, value): def _set_values(self, key, value): values = self.values + if isinstance(key, Series): + key = key.values values[key] = _index.convert_scalar(values, value) # help out SparseSeries @@ -903,7 +934,7 @@ def __getslice__(self, i, j): if j < 0: j = 0 slobj = slice(i, j) - return self.__getitem__(slobj) + return self._slice(slobj) def __setslice__(self, i, j, value): """Set slice equal to given value(s)""" @@ -914,45 +945,13 @@ def __setslice__(self, i, j, value): slobj = slice(i, j) return self.__setitem__(slobj, value) - def astype(self, dtype): - """ - See numpy.ndarray.astype - """ - casted = com._astype_nansafe(self.values, dtype) - return self._constructor(casted, index=self.index, name=self.name, - dtype=casted.dtype) - - def convert_objects(self, convert_dates=True, convert_numeric=True): - """ - Attempt to infer better dtype - Always return a copy - - Parameters - ---------- - convert_dates : boolean, default True - if True, attempt to soft convert_dates, if 'coerce', force - conversion (and non-convertibles get NaT) - convert_numeric : boolean, default True - if True attempt to coerce to numbers (including strings), - non-convertibles get NaN - - Returns - ------- - converted : Series - """ - if self.dtype == np.object_: - return Series(com._possibly_convert_objects(self.values, - convert_dates=convert_dates, convert_numeric=convert_numeric), - index=self.index, name=self.name) - return self.copy() - def repeat(self, reps): """ See ndarray.repeat """ new_index = self.index.repeat(reps) new_values = self.values.repeat(reps) - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) def reshape(self, newshape, order='C'): """ @@ -1003,7 +1002,7 @@ def get_value(self, label): ------- value : scalar value """ - return self.index._engine.get_value(self, label) + return self.index._engine.get_value(self.values, label) def set_value(self, label, value): """ @@ -1025,7 +1024,7 @@ def set_value(self, label, value): otherwise a new object """ try: - self.index._engine.set_value(self, label, value) + self.index._engine.set_value(self.values, label, value) return self except KeyError: if len(self.index) == 0: @@ -1034,7 +1033,7 @@ def set_value(self, label, value): new_index = self.index.insert(len(self), label) new_values = np.concatenate([self.values, [value]]) - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) def reset_index(self, level=None, drop=False, name=None, inplace=False): """ @@ -1070,7 +1069,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): # set name if it was passed, otherwise, keep the previous name self.name = name or self.name else: - return Series(self.values.copy(), index=new_index, + return self._constructor(self.values.copy(), index=new_index, name=self.name) elif inplace: raise TypeError('Cannot reset_index inplace on a Series ' @@ -1084,28 +1083,6 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): return df.reset_index(level=level, drop=drop) - def __str__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular DataFrame @@ -1130,14 +1107,6 @@ def __unicode__(self): raise AssertionError() return result - def __repr__(self): - """ - Return a string representation for a particular Series - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) - def _tidy_repr(self, max_vals=20): """ @@ -1156,6 +1125,19 @@ def _tidy_repr(self, max_vals=20): return unicode(result) def _repr_footer(self): + + # time series + if self.is_time_series: + if self.index.freq is not None: + freqstr = 'Freq: %s, ' % self.index.freqstr + else: + freqstr = '' + + namestr = "Name: %s, " % str( + self.name) if self.name is not None else "" + return '%s%sLength: %d' % (freqstr, namestr, len(self)) + + # reg series namestr = u"Name: %s, " % com.pprint_thing( self.name) if self.name is not None else "" return u'%sLength: %d, dtype: %s' % (namestr, len(self), @@ -1244,6 +1226,7 @@ def iteritems(self): __truediv__ = _arith_method(operator.truediv, '__truediv__') __floordiv__ = _arith_method(operator.floordiv, '__floordiv__') __pow__ = _arith_method(operator.pow, '__pow__') + __mod__ = _arith_method(operator.mod, '__mod__') __radd__ = _arith_method(_radd_compat, '__add__') __rmul__ = _arith_method(operator.mul, '__mul__') @@ -1251,6 +1234,7 @@ def iteritems(self): __rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__') __rfloordiv__ = _arith_method(lambda x, y: y // x, '__floordiv__') __rpow__ = _arith_method(lambda x, y: y ** x, '__pow__') + __rmod__ = _arith_method(lambda x, y: y % x, '__mod__') # comparisons __gt__ = _comp_method(operator.gt, '__gt__') @@ -1258,16 +1242,16 @@ def iteritems(self): __lt__ = _comp_method(operator.lt, '__lt__') __le__ = _comp_method(operator.le, '__le__') __eq__ = _comp_method(operator.eq, '__eq__') - __ne__ = _comp_method(operator.ne, '__ne__') + __ne__ = _comp_method(operator.ne, '__ne__', True) # inversion def __neg__(self): arr = operator.neg(self.values) - return Series(arr, self.index, name=self.name) + return self._constructor(arr, self.index, name=self.name) def __invert__(self): arr = operator.inv(self.values) - return Series(arr, self.index, name=self.name) + return self._constructor(arr, self.index, name=self.name) # binary logic __or__ = _bool_method(operator.or_, '__or__') @@ -1301,9 +1285,6 @@ def keys(self): "Alias for index" return self.index - # alas, I wish this worked - # values = lib.ValuesProperty() - @property def values(self): """ @@ -1313,27 +1294,20 @@ def values(self): ------- arr : numpy.ndarray """ - return self.view(ndarray) + return self._data.values - def copy(self, order='C'): - """ - Return new Series with copy of underlying values + def get_values(self): + """ same as values (but handles sparseness conversions); is a view """ + return self._data.values - Returns - ------- - cp : Series - """ - return Series(self.values.copy(order), index=self.index, - name=self.name) + def copy(self, order='C'): + new_self = super(Series, self).copy(deep=True) + new_self.name = self.name + return new_self def tolist(self): - """ - Convert Series to a nested list - Overrides numpy.ndarray.tolist - """ - if com.is_datetime64_dtype(self): - return list(self) - return self.values.tolist() + """ Convert Series to a nested list """ + return list(self) def to_dict(self): """ @@ -1400,16 +1374,16 @@ def count(self, level=None): level_index = self.index.levels[level] if len(self) == 0: - return Series(0, index=level_index) + return self._constructor(0, index=level_index) # call cython function max_bin = len(level_index) labels = com._ensure_int64(self.index.labels[level]) counts = lib.count_level_1d(mask.view(pa.uint8), labels, max_bin) - return Series(counts, index=level_index) + return self._constructor(counts, index=level_index) - return notnull(self.values).sum() + return notnull(_values_from_object(self)).sum() def value_counts(self, normalize=False): """ @@ -1483,7 +1457,7 @@ def duplicated(self, take_last=False): """ keys = com._ensure_object(self.values) duplicated = lib.duplicated(keys, take_last=take_last) - return Series(duplicated, index=self.index, name=self.name) + return self._constructor(duplicated, index=self.index, name=self.name) sum = _make_stat_func(nanops.nansum, 'sum', 'sum') mean = _make_stat_func(nanops.nanmean, 'mean', 'mean') @@ -1506,7 +1480,7 @@ def mad(self, skipna=True, level=None): def min(self, axis=None, out=None, skipna=True, level=None): if level is not None: return self._agg_by_level('min', level=level, skipna=skipna) - return nanops.nanmin(self.values, skipna=skipna) + return nanops.nanmin(_values_from_object(self), skipna=skipna) @Substitution(name='maximum', shortname='max', na_action=_doc_exclude_na, extras='') @@ -1514,7 +1488,7 @@ def min(self, axis=None, out=None, skipna=True, level=None): def max(self, axis=None, out=None, skipna=True, level=None): if level is not None: return self._agg_by_level('max', level=level, skipna=skipna) - return nanops.nanmax(self.values, skipna=skipna) + return nanops.nanmax(_values_from_object(self), skipna=skipna) @Substitution(name='standard deviation', shortname='stdev', na_action=_doc_exclude_na, extras='') @@ -1527,7 +1501,7 @@ def std(self, axis=None, dtype=None, out=None, ddof=1, skipna=True, if level is not None: return self._agg_by_level('std', level=level, skipna=skipna, ddof=ddof) - return np.sqrt(nanops.nanvar(self.values, skipna=skipna, ddof=ddof)) + return np.sqrt(nanops.nanvar(_values_from_object(self), skipna=skipna, ddof=ddof)) @Substitution(name='variance', shortname='var', na_action=_doc_exclude_na, extras='') @@ -1540,7 +1514,7 @@ def var(self, axis=None, dtype=None, out=None, ddof=1, skipna=True, if level is not None: return self._agg_by_level('var', level=level, skipna=skipna, ddof=ddof) - return nanops.nanvar(self.values, skipna=skipna, ddof=ddof) + return nanops.nanvar(_values_from_object(self), skipna=skipna, ddof=ddof) @Substitution(name='unbiased skewness', shortname='skew', na_action=_doc_exclude_na, extras='') @@ -1549,7 +1523,7 @@ def skew(self, skipna=True, level=None): if level is not None: return self._agg_by_level('skew', level=level, skipna=skipna) - return nanops.nanskew(self.values, skipna=skipna) + return nanops.nanskew(_values_from_object(self), skipna=skipna) @Substitution(name='unbiased kurtosis', shortname='kurt', na_action=_doc_exclude_na, extras='') @@ -1558,7 +1532,7 @@ def kurt(self, skipna=True, level=None): if level is not None: return self._agg_by_level('kurt', level=level, skipna=skipna) - return nanops.nankurt(self.values, skipna=skipna) + return nanops.nankurt(_values_from_object(self), skipna=skipna) def _agg_by_level(self, name, level=0, skipna=True, **kwds): grouped = self.groupby(level=level) @@ -1581,7 +1555,7 @@ def idxmin(self, axis=None, out=None, skipna=True): ------- idxmin : Index of minimum of values """ - i = nanops.nanargmin(self.values, skipna=skipna) + i = nanops.nanargmin(_values_from_object(self), skipna=skipna) if i == -1: return pa.NA return self.index[i] @@ -1599,7 +1573,7 @@ def idxmax(self, axis=None, out=None, skipna=True): ------- idxmax : Index of minimum of values """ - i = nanops.nanargmax(self.values, skipna=skipna) + i = nanops.nanargmax(_values_from_object(self), skipna=skipna) if i == -1: return pa.NA return self.index[i] @@ -1619,7 +1593,7 @@ def cumsum(self, axis=0, dtype=None, out=None, skipna=True): ------- cumsum : Series """ - arr = self.values.copy() + arr = _values_from_object(self).copy() do_mask = skipna and not issubclass(self.dtype.type, np.integer) if do_mask: @@ -1631,7 +1605,7 @@ def cumsum(self, axis=0, dtype=None, out=None, skipna=True): if do_mask: np.putmask(result, mask, pa.NA) - return Series(result, index=self.index) + return self._constructor(result, index=self.index, name=self.name) def cumprod(self, axis=0, dtype=None, out=None, skipna=True): """ @@ -1648,7 +1622,7 @@ def cumprod(self, axis=0, dtype=None, out=None, skipna=True): ------- cumprod : Series """ - arr = self.values.copy() + arr = _values_from_object(self).copy() do_mask = skipna and not issubclass(self.dtype.type, np.integer) if do_mask: @@ -1660,7 +1634,7 @@ def cumprod(self, axis=0, dtype=None, out=None, skipna=True): if do_mask: np.putmask(result, mask, pa.NA) - return Series(result, index=self.index) + return self._constructor(result, index=self.index, name=self.name) def cummax(self, axis=0, dtype=None, out=None, skipna=True): """ @@ -1677,7 +1651,7 @@ def cummax(self, axis=0, dtype=None, out=None, skipna=True): ------- cummax : Series """ - arr = self.values.copy() + arr = _values_from_object(self).copy() do_mask = skipna and not issubclass(self.dtype.type, np.integer) if do_mask: @@ -1689,7 +1663,7 @@ def cummax(self, axis=0, dtype=None, out=None, skipna=True): if do_mask: np.putmask(result, mask, pa.NA) - return Series(result, index=self.index) + return self._constructor(result, index=self.index, name=self.name) def cummin(self, axis=0, dtype=None, out=None, skipna=True): """ @@ -1706,7 +1680,7 @@ def cummin(self, axis=0, dtype=None, out=None, skipna=True): ------- cummin : Series """ - arr = self.values.copy() + arr = _values_from_object(self).copy() do_mask = skipna and not issubclass(self.dtype.type, np.integer) if do_mask: @@ -1718,16 +1692,16 @@ def cummin(self, axis=0, dtype=None, out=None, skipna=True): if do_mask: np.putmask(result, mask, pa.NA) - return Series(result, index=self.index) + return self._constructor(result, index=self.index, name=self.name) @Appender(pa.Array.round.__doc__) def round(self, decimals=0, out=None): """ """ - result = self.values.round(decimals, out=out) + result = _values_from_object(self).round(decimals, out=out) if out is None: - result = Series(result, index=self.index, name=self.name) + result = self._constructor(result, index=self.index, name=self.name) return result @@ -1751,7 +1725,7 @@ def quantile(self, q=0.5): return _quantile(valid_values, q * 100) def ptp(self, axis=None, out=None): - return self.values.ptp(axis, out) + return _values_from_object(self).ptp(axis, out) def describe(self, percentile_width=50): """ @@ -1786,7 +1760,7 @@ def describe(self, percentile_width=50): elif issubclass(self.dtype.type, np.datetime64): names = ['count', 'unique'] - asint = self.dropna().view('i8') + asint = self.dropna().values.view('i8') objcounts = Counter(asint) data = [self.count(), len(objcounts)] if data[1] > 0: @@ -1816,7 +1790,7 @@ def pretty_name(x): lb), self.median(), self.quantile(ub), self.max()] - return Series(data, index=names) + return self._constructor(data, index=names) def corr(self, other, method='pearson', min_periods=None): @@ -1879,8 +1853,8 @@ def diff(self, periods=1): ------- diffed : Series """ - result = com.diff(self.values, periods) - return Series(result, self.index, name=self.name) + result = com.diff(_values_from_object(self), periods) + return self._constructor(result, self.index, name=self.name) def autocorr(self): """ @@ -1928,7 +1902,7 @@ def clip_upper(self, threshold): ------- clipped : Series """ - return pa.where(self > threshold, threshold, self) + return self._constructor(pa.where(self > threshold, threshold, self),index=self.index,name=self.name) def clip_lower(self, threshold): """ @@ -1942,7 +1916,7 @@ def clip_lower(self, threshold): ------- clipped : Series """ - return pa.where(self < threshold, threshold, self) + return self._constructor(pa.where(self < threshold, threshold, self),index=self.index,name=self.name) #------------------------------------------------------------------------------ # Combination @@ -1962,6 +1936,7 @@ def append(self, to_append, verify_integrity=False): appended : Series """ from pandas.tools.merge import concat + if isinstance(to_append, (list, tuple)): to_concat = [self] + to_append else: @@ -2014,7 +1989,7 @@ def _binop(self, other, func, level=None, fill_value=None): result = func(this_vals, other_vals) name = _maybe_match_name(self, other) - return Series(result, index=new_index, name=name) + return self._constructor(result, index=new_index, name=name) add = _flex_method(operator.add, 'add') sub = _flex_method(operator.sub, 'subtract') @@ -2053,7 +2028,7 @@ def combine(self, other, func, fill_value=nan): new_index = self.index new_values = func(self.values, other) new_name = self.name - return Series(new_values, index=new_index, name=new_name) + return self._constructor(new_values, index=new_index, name=new_name) def combine_first(self, other): """ @@ -2072,8 +2047,8 @@ def combine_first(self, other): this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) name = _maybe_match_name(self, other) - rs_vals = com._where_compat(isnull(this), other, this) - return Series(rs_vals, index=new_index, name=name) + rs_vals = com._where_compat(isnull(this), other.values, this.values) + return self._constructor(rs_vals, index=new_index, name=name) def update(self, other): """ @@ -2104,9 +2079,10 @@ def sort(self, axis=0, kind='quicksort', order=None): information. 'mergesort' is the only stable algorithm order : ignored """ + sortedSeries = self.order(na_last=True, kind=kind) - true_base = self + true_base = self.values while true_base.base is not None: true_base = true_base.base @@ -2148,7 +2124,7 @@ def sort_index(self, ascending=True): ascending=ascending) new_values = self.values.take(indexer) - return Series(new_values, new_labels, name=self.name) + return self._constructor(new_values, new_labels, name=self.name) def argsort(self, axis=0, kind='quicksort', order=None): """ @@ -2174,10 +2150,10 @@ def argsort(self, axis=0, kind='quicksort', order=None): if mask.any(): result = Series(-1,index=self.index,name=self.name,dtype='int64') notmask = -mask - result.values[notmask] = np.argsort(self.values[notmask], kind=kind) - return result + result[notmask] = np.argsort(values[notmask], kind=kind) + return self._constructor(result, index=self.index, name=self.name) else: - return Series(np.argsort(values, kind=kind), index=self.index, + return self._constructor(np.argsort(values, kind=kind), index=self.index, name=self.name,dtype='int64') def rank(self, method='average', na_option='keep', ascending=True): @@ -2204,7 +2180,7 @@ def rank(self, method='average', na_option='keep', ascending=True): from pandas.core.algorithms import rank ranks = rank(self.values, method=method, na_option=na_option, ascending=ascending) - return Series(ranks, index=self.index, name=self.name) + return self._constructor(ranks, index=self.index, name=self.name) def order(self, na_last=True, ascending=True, kind='mergesort'): """ @@ -2256,7 +2232,7 @@ def _try_kind_sort(arr): sortedIdx[n:] = idx[good][argsorted] sortedIdx[:n] = idx[bad] - return Series(arr[sortedIdx], index=self.index[sortedIdx], + return self._constructor(arr[sortedIdx], index=self.index[sortedIdx], name=self.name) def sortlevel(self, level=0, ascending=True): @@ -2279,7 +2255,7 @@ def sortlevel(self, level=0, ascending=True): new_index, indexer = self.index.sortlevel(level, ascending=ascending) new_values = self.values.take(indexer) - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) def swaplevel(self, i, j, copy=True): """ @@ -2295,7 +2271,7 @@ def swaplevel(self, i, j, copy=True): swapped : Series """ new_index = self.index.swaplevel(i, j) - return Series(self.values, index=new_index, copy=copy, name=self.name) + return self._constructor(self.values, index=new_index, copy=copy, name=self.name) def reorder_levels(self, order): """ @@ -2403,14 +2379,14 @@ def map_f(values, f): if isinstance(arg, (dict, Series)): if isinstance(arg, dict): - arg = Series(arg) + arg = self._constructor(arg) indexer = arg.index.get_indexer(values) new_values = com.take_1d(arg.values, indexer) - return Series(new_values, index=self.index, name=self.name) + return self._constructor(new_values, index=self.index, name=self.name) else: mapped = map_f(values, arg) - return Series(mapped, index=self.index, name=self.name) + return self._constructor(mapped, index=self.index, name=self.name) def apply(self, func, convert_dtype=True, args=(), **kwds): """ @@ -2444,54 +2420,157 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): if isinstance(f, np.ufunc): return f(self) - values = self.values + values = _values_from_object(self) if com.is_datetime64_dtype(values.dtype): values = lib.map_infer(values, lib.Timestamp) mapped = lib.map_infer(values, f, convert=convert_dtype) - if isinstance(mapped[0], Series): + if len(mapped) and isinstance(mapped[0], Series): from pandas.core.frame import DataFrame return DataFrame(mapped.tolist(), index=self.index) else: - return Series(mapped, index=self.index, name=self.name) + return self._constructor(mapped, index=self.index, name=self.name) - def align(self, other, join='outer', level=None, copy=True, - fill_value=None, method=None, limit=None): + def replace(self, to_replace, value=None, method='pad', inplace=False, + limit=None): """ - Align two Series object with the specified join method + Replace arbitrary values in a Series Parameters ---------- - other : Series - join : {'outer', 'inner', 'left', 'right'}, default 'outer' - level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - copy : boolean, default True - Always return new objects. If copy=False and no reindexing is - required, the same object will be returned (for better performance) - fill_value : object, default None - method : str, default 'pad' + to_replace : list or dict + list of values to be replaced or dict of replacement values + value : anything + if to_replace is a list then value is the replacement value + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + inplace : boolean, default False + If True, fill the Series in place. Note: this will modify any other + views on this Series, for example a column in a DataFrame. Returns + a reference to the filled object, which is self if inplace=True limit : int, default None - fill_value, method, inplace, limit are passed to fillna + Maximum size gap to forward or backward fill + + Notes + ----- + replace does not distinguish between NaN and None + + See also + -------- + fillna, reindex, asfreq Returns ------- - (left, right) : (Series, Series) - Aligned Series + replaced : Series """ - join_index, lidx, ridx = self.index.join(other.index, how=join, - level=level, - return_indexers=True) - left = self._reindex_indexer(join_index, lidx, copy) - right = other._reindex_indexer(join_index, ridx, copy) - fill_na = (fill_value is not None) or (method is not None) - if fill_na: - return (left.fillna(fill_value, method=method, limit=limit), - right.fillna(fill_value, method=method, limit=limit)) + if inplace: + result = self + change = self else: - return left, right + result = self.copy() + change = None + + def _rep_one(s, to_rep, v): # replace single value + mask = com.mask_missing(s.values, to_rep) + com._maybe_upcast_putmask(s.values,mask,v,change=change) + + def _rep_dict(rs, to_rep): # replace {[src] -> dest} + + all_src = set() + dd = {} # group by unique destination value + for s, d in to_rep.iteritems(): + dd.setdefault(d, []).append(s) + all_src.add(s) + + if any(d in all_src for d in dd.keys()): + # don't clobber each other at the cost of temporaries + masks = {} + for d, sset in dd.iteritems(): # now replace by each dest + masks[d] = com.mask_missing(rs.values, sset) + + for d, m in masks.iteritems(): + com._maybe_upcast_putmask(rs.values,m,d,change=change) + else: # if no risk of clobbering then simple + for d, sset in dd.iteritems(): + _rep_one(rs, sset, d) + + + + if np.isscalar(to_replace): + to_replace = [to_replace] + + if isinstance(to_replace, dict): + _rep_dict(result, to_replace) + elif isinstance(to_replace, (list, pa.Array, Series)): + + if isinstance(value, (list, pa.Array, Series)): # check same length + vl, rl = len(value), len(to_replace) + if vl == rl: + _rep_dict(result, dict(zip(to_replace, value))) + else: + raise ValueError('Got %d to replace but %d values' + % (rl, vl)) + + elif value is not None: # otherwise all replaced with same value + _rep_one(result, to_replace, value) + else: # method + if method is None: # pragma: no cover + raise ValueError('must specify a fill method') + fill_f = _get_fill_func(method) + + mask = com.mask_missing(result.values, to_replace) + fill_f(result.values, limit=limit, mask=mask) + + if not inplace: + result = Series(result.values, index=self.index, + name=self.name) + else: + raise ValueError('Unrecognized to_replace type %s' % + type(to_replace)) + + if not inplace: + return result + + def align(self, other, join='outer', level=None, copy=True, + fill_value=None, method=None, limit=None): + """ + Align two Series object with the specified join method + + Parameters + ---------- + other : Series + join : {'outer', 'inner', 'left', 'right'}, default 'outer' + level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + copy : boolean, default True + Always return new objects. If copy=False and no reindexing is + required, the same object will be returned (for better performance) + fill_value : object, default None + method : str, default 'pad' + limit : int, default None + fill_value, method, inplace, limit are passed to fillna + + Returns + ------- + (left, right) : (Series, Series) + Aligned Series + """ + join_index, lidx, ridx = self.index.join(other.index, how=join, + level=level, + return_indexers=True) + + left = self._reindex_indexer(join_index, lidx, copy) + right = other._reindex_indexer(join_index, ridx, copy) + fill_na = (fill_value is not None) or (method is not None) + if fill_na: + return (left.fillna(fill_value, method=method, limit=limit), + right.fillna(fill_value, method=method, limit=limit)) + else: + return left, right def _reindex_indexer(self, new_index, indexer, copy): if indexer is not None: @@ -2550,12 +2629,12 @@ def reindex(self, index=None, method=None, level=None, fill_value=pa.NA, return self if len(self.index) == 0: - return Series(nan, index=index, name=self.name) + return self._constructor(nan, index=index, name=self.name) new_index, indexer = self.index.reindex(index, method=method, level=level, limit=limit) new_values = com.take_1d(self.values, indexer, fill_value=fill_value) - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) def reindex_axis(self, labels, axis=0, **kwargs): """ for compatibility with higher dims """ @@ -2563,30 +2642,6 @@ def reindex_axis(self, labels, axis=0, **kwargs): raise ValueError("cannot reindex series on non-zero axis!") return self.reindex(index=labels,**kwargs) - def reindex_like(self, other, method=None, limit=None, fill_value=pa.NA): - """ - Reindex Series to match index of another Series, optionally with - filling logic - - Parameters - ---------- - other : Series - method : string or None - See Series.reindex docstring - limit : int, default None - Maximum size gap to forward or backward fill - - Notes - ----- - Like calling s.reindex(other.index, method=...) - - Returns - ------- - reindexed : Series - """ - return self.reindex(other.index, method=method, limit=limit, - fill_value=fill_value) - def take(self, indices, axis=0, convert=True): """ Analogous to ndarray.take, return Series corresponding to requested @@ -2601,177 +2656,14 @@ def take(self, indices, axis=0, convert=True): ------- taken : Series """ + # check/convert indicies here + if convert: + indices = _maybe_convert_indices(indices, len(self._get_axis(axis))) + indices = com._ensure_platform_int(indices) new_index = self.index.take(indices) new_values = self.values.take(indices) - return Series(new_values, index=new_index, name=self.name) - - truncate = generic.truncate - - def fillna(self, value=None, method=None, inplace=False, - limit=None): - """ - Fill NA/NaN values using the specified method - - Parameters - ---------- - value : any kind (should be same type as array) - Value to use to fill holes (e.g. 0) - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - inplace : boolean, default False - If True, fill the Series in place. Note: this will modify any other - views on this Series, for example a column in a DataFrame. Returns - a reference to the filled object, which is self if inplace=True - limit : int, default None - Maximum size gap to forward or backward fill - - See also - -------- - reindex, asfreq - - Returns - ------- - filled : Series - """ - if not self._can_hold_na: - return self.copy() if not inplace else None - - if value is not None: - if method is not None: - raise ValueError('Cannot specify both a fill value and method') - result = self.copy() if not inplace else self - mask = isnull(self.values) - np.putmask(result, mask, value) - else: - if method is None: # pragma: no cover - raise ValueError('must specify a fill method or value') - - fill_f = _get_fill_func(method) - - if inplace: - values = self.values - else: - values = self.values.copy() - - fill_f(values, limit=limit) - - if inplace: - result = self - else: - result = Series(values, index=self.index, name=self.name) - - if not inplace: - return result - - def ffill(self, inplace=False, limit=None): - return self.fillna(method='ffill', inplace=inplace, limit=limit) - - def bfill(self, inplace=False, limit=None): - return self.fillna(method='bfill', inplace=inplace, limit=limit) - - def replace(self, to_replace, value=None, method='pad', inplace=False, - limit=None): - """ - Replace arbitrary values in a Series - - Parameters - ---------- - to_replace : list or dict - list of values to be replaced or dict of replacement values - value : anything - if to_replace is a list then value is the replacement value - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - inplace : boolean, default False - If True, fill the Series in place. Note: this will modify any other - views on this Series, for example a column in a DataFrame. Returns - a reference to the filled object, which is self if inplace=True - limit : int, default None - Maximum size gap to forward or backward fill - - Notes - ----- - replace does not distinguish between NaN and None - - See also - -------- - fillna, reindex, asfreq - - Returns - ------- - replaced : Series - """ - - if inplace: - result = self - change = self - else: - result = self.copy() - change = None - - def _rep_one(s, to_rep, v): # replace single value - mask = com.mask_missing(s.values, to_rep) - com._maybe_upcast_putmask(s.values,mask,v,change=change) - - def _rep_dict(rs, to_rep): # replace {[src] -> dest} - - all_src = set() - dd = {} # group by unique destination value - for s, d in to_rep.iteritems(): - dd.setdefault(d, []).append(s) - all_src.add(s) - - if any(d in all_src for d in dd.keys()): - # don't clobber each other at the cost of temporaries - masks = {} - for d, sset in dd.iteritems(): # now replace by each dest - masks[d] = com.mask_missing(rs.values, sset) - - for d, m in masks.iteritems(): - com._maybe_upcast_putmask(rs.values,m,d,change=change) - else: # if no risk of clobbering then simple - for d, sset in dd.iteritems(): - _rep_one(rs, sset, d) - - if np.isscalar(to_replace): - to_replace = [to_replace] - - if isinstance(to_replace, dict): - _rep_dict(result, to_replace) - elif isinstance(to_replace, (list, pa.Array)): - - if isinstance(value, (list, pa.Array)): # check same length - vl, rl = len(value), len(to_replace) - if vl == rl: - _rep_dict(result, dict(zip(to_replace, value))) - else: - raise ValueError('Got %d to replace but %d values' - % (rl, vl)) - - elif value is not None: # otherwise all replaced with same value - _rep_one(result, to_replace, value) - else: # method - if method is None: # pragma: no cover - raise ValueError('must specify a fill method') - fill_f = _get_fill_func(method) - - mask = com.mask_missing(result, to_replace) - fill_f(result.values, limit=limit, mask=mask) - - if not inplace: - result = Series(result.values, index=self.index, - name=self.name) - else: - raise ValueError('Unrecognized to_replace type %s' % - type(to_replace)) - - if not inplace: - return result + return self._constructor(new_values, index=new_index, name=self.name) def isin(self, values): """ @@ -2787,8 +2679,8 @@ def isin(self, values): isin : Series (boolean dtype) """ value_set = set(values) - result = lib.ismember(self.values, value_set) - return Series(result, self.index, name=self.name) + result = lib.ismember(_values_from_object(self), value_set) + return self._constructor(result, self.index, name=self.name) def between(self, left, right, inclusive=True): """ @@ -2971,17 +2863,17 @@ def _get_values(): new_values[:periods] = self.values[-periods:] new_values[periods:] = fill_value - return Series(new_values, index=self.index, name=self.name) + return self._constructor(new_values, index=self.index, name=self.name) elif isinstance(self.index, PeriodIndex): orig_offset = datetools.to_offset(self.index.freq) if orig_offset == offset: - return Series(_get_values(), self.index.shift(periods), + return self._constructor(_get_values(), self.index.shift(periods), name=self.name) msg = ('Given freq %s does not match PeriodIndex freq %s' % (offset.rule_code, orig_offset.rule_code)) raise ValueError(msg) else: - return Series(_get_values(), + return self._constructor(_get_values(), index=self.index.shift(periods, offset), name=self.name) @@ -3029,7 +2921,7 @@ def asof(self, where): locs = self.index.asof_locs(where, notnull(values)) new_values = com.take_1d(values, locs) - return Series(new_values, index=where, name=self.name) + return self._constructor(new_values, index=where, name=self.name) def interpolate(self, method='linear'): """ @@ -3048,7 +2940,7 @@ def interpolate(self, method='linear'): interpolated : Series """ if method == 'time': - if not isinstance(self, TimeSeries): + if not self.is_time_series: raise Exception('time-weighted interpolation only works' 'on TimeSeries') method = 'values' @@ -3079,7 +2971,7 @@ def interpolate(self, method='linear'): result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid], values[firstIndex:][valid]) - return Series(result, index=self.index, name=self.name) + return self._constructor(result, index=self.index, name=self.name) def rename(self, mapper, inplace=False): """ @@ -3124,7 +3016,7 @@ def rename(self, mapper, inplace=False): @property def weekday(self): - return Series([d.weekday() for d in self.index], index=self.index) + return self._constructor([d.weekday() for d in self.index], index=self.index) def tz_convert(self, tz, copy=True): """ @@ -3146,7 +3038,7 @@ def tz_convert(self, tz, copy=True): if copy: new_values = new_values.copy() - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) def tz_localize(self, tz, copy=True): """ @@ -3181,27 +3073,78 @@ def tz_localize(self, tz, copy=True): if copy: new_values = new_values.copy() - return Series(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index, name=self.name) @cache_readonly def str(self): from pandas.core.strings import StringMethods return StringMethods(self) + def to_timestamp(self, freq=None, how='start', copy=True): + """ + Cast to datetimeindex of timestamps, at *beginning* of period + + Parameters + ---------- + freq : string, default frequency of PeriodIndex + Desired frequency + how : {'s', 'e', 'start', 'end'} + Convention for converting period to timestamp; start of period + vs. end + + Returns + ------- + ts : TimeSeries with DatetimeIndex + """ + new_values = self.values + if copy: + new_values = new_values.copy() + + new_index = self.index.to_timestamp(freq=freq, how=how) + return self._constructor(new_values, index=new_index, name=self.name) + + def to_period(self, freq=None, copy=True): + """ + Convert TimeSeries from DatetimeIndex to PeriodIndex with desired + frequency (inferred from index if not passed) + + Parameters + ---------- + freq : string, default + + Returns + ------- + ts : TimeSeries with PeriodIndex + """ + new_values = self.values + if copy: + new_values = new_values.copy() + + if freq is None: + freq = self.index.freqstr or self.index.inferred_freq + new_index = self.index.to_period(freq=freq) + return self._constructor(new_values, index=new_index, name=self.name) + +Series._setup_axes(['index'], info_axis=0) _INDEX_TYPES = ndarray, Index, list, tuple +# reinstall the SeriesIndexer +Series._create_indexer('ix',_SeriesIndexer) + #------------------------------------------------------------------------------ # Supplementary functions -def remove_na(arr): +def remove_na(series): """ - Return array containing only true/non-NaN values, possibly empty. + Return series containing only true/non-NaN values, possibly empty. """ - return arr[notnull(arr)] + return series[notnull(_values_from_object(series))] def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False): + if dtype is not None: + dtype = np.dtype(dtype) if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) @@ -3229,8 +3172,8 @@ def _try_cast(arr, take_fast_path): return subarr # GH #846 - if isinstance(data, pa.Array): - subarr = data + if isinstance(data, (pa.Array, Series)): + subarr = np.array(data, copy=False) if dtype is not None: # possibility of nan -> garbage @@ -3358,6 +3301,9 @@ def _get_fill_func(method): fill_f = com.backfill_1d return fill_f +# backwards compatiblity +TimeSeries = Series + #---------------------------------------------------------------------- # Add plotting methods to Series @@ -3366,90 +3312,3 @@ def _get_fill_func(method): Series.plot = _gfx.plot_series Series.hist = _gfx.hist_series -# Put here, otherwise monkey-patching in methods fails - - -class TimeSeries(Series): - """ - The time series varians of Series, a One-dimensional ndarray with `TimeStamp` - axis labels. - Labels need not be unique but must be any hashable type. The object - supports both integer- and label-based indexing and provides a host of - methods for performing operations involving the index. Statistical - methods from ndarray have been overridden to automatically exclude - missing data (currently represented as NaN) - - Operations between Series (+, -, /, *, **) align values based on their - associated index values-- they need not be the same length. The result - index will be the sorted union of the two indexes. - - Parameters - ---------- - data : array-like, dict, or scalar value - Contains data stored in Series - index : array-like or Index (1d) - Values must be unique and hashable, same length as data. Index - object (or other iterable of same length as data) Will default to - np.arange(len(data)) if not provided. If both a dict and index - sequence are used, the index will override the keys found in the - dict. - dtype : numpy.dtype or None - If None, dtype will be inferred copy : boolean, default False Copy - input data - copy : boolean, default False - """ - def _repr_footer(self): - if self.index.freq is not None: - freqstr = 'Freq: %s, ' % self.index.freqstr - else: - freqstr = '' - - namestr = "Name: %s, " % str( - self.name) if self.name is not None else "" - return '%s%sLength: %d, dtype: %s' % (freqstr, namestr, len(self), - com.pprint_thing(self.dtype.name)) - - def to_timestamp(self, freq=None, how='start', copy=True): - """ - Cast to datetimeindex of timestamps, at *beginning* of period - - Parameters - ---------- - freq : string, default frequency of PeriodIndex - Desired frequency - how : {'s', 'e', 'start', 'end'} - Convention for converting period to timestamp; start of period - vs. end - - Returns - ------- - ts : TimeSeries with DatetimeIndex - """ - new_values = self.values - if copy: - new_values = new_values.copy() - - new_index = self.index.to_timestamp(freq=freq, how=how) - return Series(new_values, index=new_index, name=self.name) - - def to_period(self, freq=None, copy=True): - """ - Convert TimeSeries from DatetimeIndex to PeriodIndex with desired - frequency (inferred from index if not passed) - - Parameters - ---------- - freq : string, default - - Returns - ------- - ts : TimeSeries with PeriodIndex - """ - new_values = self.values - if copy: - new_values = new_values.copy() - - if freq is None: - freq = self.index.freqstr or self.index.inferred_freq - new_index = self.index.to_period(freq=freq) - return Series(new_values, index=new_index, name=self.name) diff --git a/pandas/core/sparse.py b/pandas/core/sparse.py index 1405e88a1343a..7b9caaa3a0139 100644 --- a/pandas/core/sparse.py +++ b/pandas/core/sparse.py @@ -5,6 +5,6 @@ # pylint: disable=W0611 -from pandas.sparse.series import SparseSeries, SparseTimeSeries +from pandas.sparse.series import SparseSeries from pandas.sparse.frame import SparseDataFrame from pandas.sparse.panel import SparsePanel diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 3521c9ff94b11..6a5588a48bd76 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1,7 +1,7 @@ import numpy as np from itertools import izip -from pandas.core.common import isnull +from pandas.core.common import isnull, _values_from_object from pandas.core.series import Series import re import pandas.lib as lib @@ -90,6 +90,8 @@ def _na_map(f, arr, na_result=np.nan): def _map(f, arr, na_mask=False, na_value=np.nan): + if isinstance(arr, Series): + arr = arr.values if not isinstance(arr, np.ndarray): arr = np.asarray(arr, dtype=object) if na_mask: @@ -293,7 +295,7 @@ def rep(x, r): except TypeError: return unicode.__mul__(x, r) repeats = np.asarray(repeats, dtype=object) - result = lib.vec_binop(arr, repeats, rep) + result = lib.vec_binop(_values_from_object(arr), repeats, rep) return result diff --git a/pandas/index.pyx b/pandas/index.pyx index 2ad5474549ec6..d164d3eec4dbf 100644 --- a/pandas/index.pyx +++ b/pandas/index.pyx @@ -52,8 +52,9 @@ cdef inline is_definitely_invalid_key(object val): except TypeError: return True + # we have a _data, means we are a NDFrame return (PySlice_Check(val) or cnp.PyArray_Check(val) - or PyList_Check(val)) + or PyList_Check(val) or hasattr(val,'_data')) def get_value_at(ndarray arr, object loc): if arr.descr.type_num == NPY_DATETIME: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b9db30245eb1b..bdcffc2b83baa 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -668,7 +668,7 @@ def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, * dc = data_columns if k == selector else None # compute the val - val = value.reindex_axis(v, axis=axis, copy=False) + val = value.reindex_axis(v, axis=axis) self.append(k, val, data_columns=dc, **kwargs) @@ -2423,7 +2423,9 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, # reindex by our non_index_axes & compute data_columns for a in self.non_index_axes: - obj = obj.reindex_axis(a[1], axis=a[0], copy=False) + labels = _ensure_index(a[1]) + if not labels.equals(obj._get_axis(a[0])): + obj = obj.reindex_axis(labels, axis=a[0]) # figure out data_columns and get out blocks block_obj = self.get_object(obj) @@ -2494,7 +2496,9 @@ def process_axes(self, obj, columns=None): for axis, labels in self.non_index_axes: if columns is not None: labels = Index(labels) & Index(columns) - obj = obj.reindex_axis(labels, axis=axis, copy=False) + labels = _ensure_index(labels) + if not labels.equals(obj._get_axis(axis)): + obj = obj.reindex_axis(labels, axis=axis) # apply the selection filters (but keep in the same order) if self.selection.filter: diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index d49597860cd16..d40237e6a8c3f 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -15,6 +15,7 @@ from pandas import Index from pandas.sparse.tests import test_sparse from pandas.util import py3compat +import pandas class TestPickle(unittest.TestCase): _multiprocess_can_split_ = True @@ -26,14 +27,20 @@ def setUp(self): def compare(self, vf): # py3 compat when reading py2 pickle - try: with open(vf,'rb') as fh: data = pickle.load(fh) - except (ValueError): + except (ValueError), detail: # we are trying to read a py3 pickle in py2..... return + + # we have a deprecated klass + except (TypeError), detail: + + from pandas.compat.pickle_compat import load + data = load(vf) + except: if not py3compat.PY3: raise diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 1999789f206be..7603f9e576d83 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -1102,7 +1102,8 @@ def test_append_raise(self): # datetime with embedded nans as object df = tm.makeDataFrame() - s = Series(datetime.datetime(2001,1,2),index=df.index,dtype=object) + s = Series(datetime.datetime(2001,1,2),index=df.index) + s = s.astype(object) s[0:5] = np.nan df['invalid'] = s self.assert_(df.dtypes['invalid'] == np.object_) diff --git a/pandas/sparse/api.py b/pandas/sparse/api.py index 230ad15937c92..a2ff9be81ac4b 100644 --- a/pandas/sparse/api.py +++ b/pandas/sparse/api.py @@ -5,3 +5,4 @@ from pandas.sparse.series import SparseSeries, SparseTimeSeries from pandas.sparse.frame import SparseDataFrame from pandas.sparse.panel import SparsePanel + diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py index 035db279064a0..7ba9bd9bb8e38 100644 --- a/pandas/sparse/array.py +++ b/pandas/sparse/array.py @@ -101,18 +101,31 @@ class SparseArray(np.ndarray): Notes ----- -SparseSeries objects are immutable via the typical Python means. If you +SparseArray objects are immutable via the typical Python means. If you must change values, convert to dense, make your changes, then convert back to sparse """ __array_priority__ = 15 + _typ = 'array' + _subtyp = 'sparse_array' sp_index = None fill_value = None - def __new__(cls, data, sparse_index=None, kind='integer', fill_value=None, - copy=False): + def __new__(cls, data, sparse_index=None, index=None, kind='integer', fill_value=None, + dtype=np.float64, copy=False): + if index is not None: + if data is None: + data = np.nan + if not np.isscalar(data): + raise Exception("must only pass scalars with an index ") + values = np.empty(len(index),dtype='float64') + values.fill(data) + data = values + + if dtype is not None: + dtype = np.dtype(dtype) is_sparse_array = isinstance(data, SparseArray) if fill_value is None: if is_sparse_array: @@ -135,14 +148,22 @@ def __new__(cls, data, sparse_index=None, kind='integer', fill_value=None, # Create array, do *not* copy data by default if copy: - subarr = np.array(values, dtype=np.float64, copy=True) + subarr = np.array(values, dtype=dtype, copy=True) else: - subarr = np.asarray(values, dtype=np.float64) + subarr = np.asarray(values, dtype=dtype) + + + # if we have a bool type, make sure that we have a bool fill_value + if (dtype is not None and issubclass(dtype.type,np.bool_)) or (data is not None and lib.is_bool_array(subarr)): + if np.isnan(fill_value) or not fill_value: + fill_value = False + else: + fill_value = bool(fill_value) # Change the class of the array to be the subclass type. output = subarr.view(cls) output.sp_index = sparse_index - output.fill_value = np.float64(fill_value) + output.fill_value = fill_value return output @property @@ -182,11 +203,15 @@ def __setstate__(self, state): self.fill_value = fill_value def __len__(self): - return self.sp_index.length + try: + return self.sp_index.length + except: + return 0 def __repr__(self): - return '%s\n%s' % (np.ndarray.__repr__(self), - repr(self.sp_index)) + return '%s\nFill: %s\n%s' % (np.ndarray.__repr__(self), + repr(self.fill_value), + repr(self.sp_index)) # Arithmetic operators @@ -236,6 +261,29 @@ def values(self): def sp_values(self): # caching not an option, leaks memory return self.view(np.ndarray) + + def get_values(self, fill=None): + """ return a dense representation """ + return self.to_dense(fill=fill) + + def to_dense(self, fill=None): + """ + Convert SparseSeries to (dense) Series + """ + values = self.values + + # fill the nans + if fill is None: + fill = self.fill_value + if not np.isnan(fill): + values[np.isnan(values)] = fill + + return values + + def __iter__(self): + for i in xrange(len(self)): + yield self._get_val_at(i) + raise StopIteration def __getitem__(self, key): """ @@ -260,7 +308,7 @@ def _get_val_at(self, loc): if loc < 0: loc += n - if loc >= len(self) or loc < 0: + if loc >= n or loc < 0: raise Exception('Out of bounds access') sp_loc = self.sp_index.lookup(loc) @@ -282,13 +330,21 @@ def take(self, indices, axis=0): indices = np.asarray(indices, dtype=int) n = len(self) - if (indices < 0).any() or (indices >= n).any(): + if (indices >= n).any(): raise Exception('out of bounds access') if self.sp_index.npoints > 0: - locs = np.array([self.sp_index.lookup(loc) for loc in indices]) + locs = np.array([self.sp_index.lookup(loc) if loc > -1 else -1 for loc in indices ]) result = self.sp_values.take(locs) - result[locs == -1] = self.fill_value + mask = locs == -1 + if mask.any(): + try: + result[mask] = self.fill_value + except (ValueError): + # wrong dtype + result = result.astype('float64') + result[mask] = self.fill_value + else: result = np.empty(len(indices)) result.fill(self.fill_value) @@ -296,16 +352,26 @@ def take(self, indices, axis=0): return result def __setitem__(self, key, value): - raise Exception('SparseArray objects are immutable') + #if com.is_integer(key): + # self.values[key] = value + #else: + # raise Exception("SparseArray does not support seting non-scalars via setitem") + raise Exception("SparseArray does not support setting via setitem") def __setslice__(self, i, j, value): - raise Exception('SparseArray objects are immutable') + if i < 0: + i = 0 + if j < 0: + j = 0 + slobj = slice(i, j) - def to_dense(self): - """ - Convert SparseSeries to (dense) Series - """ - return self.values + #if not np.isscalar(value): + # raise Exception("SparseArray does not support seting non-scalars via slices") + + #x = self.values + #x[slobj] = value + #self.values = x + raise Exception("SparseArray does not support seting via slices") def astype(self, dtype=None): """ @@ -326,6 +392,7 @@ def copy(self, deep=True): else: values = self.sp_values return SparseArray(values, sparse_index=self.sp_index, + dtype = self.dtype, fill_value=self.fill_value) def count(self): @@ -407,6 +474,19 @@ def mean(self, axis=None, dtype=None, out=None): return (sp_sum + self.fill_value * nsparse) / (ct + nsparse) +def _maybe_to_dense(obj): + """ try to convert to dense """ + if hasattr(obj,'to_dense'): + return obj.to_dense() + return obj + +def _maybe_to_sparse(array): + if com.is_sparse_series(array): + array = SparseArray(array.values,sparse_index=array.sp_index,fill_value=array.fill_value,copy=True) + if not isinstance(array, SparseArray): + array = com._values_from_object(array) + return array + def make_sparse(arr, kind='block', fill_value=nan): """ Convert ndarray to sparse format @@ -421,7 +501,13 @@ def make_sparse(arr, kind='block', fill_value=nan): ------- (sparse_values, index) : (ndarray, SparseIndex) """ - arr = np.asarray(arr) + if hasattr(arr,'values'): + arr = arr.values + else: + if np.isscalar(arr): + arr = [ arr ] + arr = np.asarray(arr) + length = len(arr) if np.isnan(fill_value): diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index ed33be33ac02a..dbc44b961102e 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -8,7 +8,8 @@ from numpy import nan import numpy as np -from pandas.core.common import _pickle_array, _unpickle_array, _try_sort +from pandas.core.common import (isnull, notnull, _pickle_array, + _unpickle_array, _try_sort) from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices from pandas.core.series import Series @@ -17,44 +18,13 @@ from pandas.util.decorators import cache_readonly import pandas.core.common as com import pandas.core.datetools as datetools - -from pandas.sparse.series import SparseSeries +from pandas.core.internals import BlockManager, form_blocks +from pandas.core.generic import NDFrame +from pandas.sparse.series import SparseSeries,SparseArray from pandas.util.decorators import Appender import pandas.lib as lib -class _SparseMockBlockManager(object): - - def __init__(self, sp_frame): - self.sp_frame = sp_frame - - def get(self, item): - return self.sp_frame[item].values - - def iget(self, i): - return self.get(self.sp_frame.columns[i]) - - @property - def shape(self): - x, y = self.sp_frame.shape - return y, x - - @property - def axes(self): - return [self.sp_frame.columns, self.sp_frame.index] - - @property - def blocks(self): - """ return our series in the column order """ - return [ self.iget(i) for i, c in enumerate(self.sp_frame.columns) ] - - def get_numeric_data(self): - # does not check, but assuming all numeric for now - return self.sp_frame - - def get_bool_data(self): - raise NotImplementedError - class SparseDataFrame(DataFrame): """ DataFrame containing sparse floating point data in the form of SparseSeries @@ -72,29 +42,61 @@ class SparseDataFrame(DataFrame): Default fill_value for converting Series to SparseSeries. Will not override SparseSeries passed in """ - _columns = None - _series = None - _is_mixed_type = False - _col_klass = SparseSeries - ndim = 2 + _verbose_info = False + _constructor_sliced = SparseSeries + _subtyp = 'sparse_frame' def __init__(self, data=None, index=None, columns=None, - default_kind='block', default_fill_value=None): + default_kind=None, default_fill_value=None, + dtype=None, copy=False): + + # pick up the defaults from the Sparse structures + if isinstance(data, SparseDataFrame): + if index is None: + index = data.index + if columns is None: + columns = data.columns + if default_fill_value is None: + default_fill_value = data.default_fill_value + if default_kind is None: + default_kind = data.default_kind + elif isinstance(data, (SparseSeries,SparseArray)): + if index is None: + index = data.index + if default_fill_value is None: + default_fill_value = data.fill_value + if columns is None and hasattr(data,'name'): + columns = [ data.name ] + if columns is None: + raise Exception("cannot pass a series w/o a name or columns") + data = { columns[0] : data } + if default_fill_value is None: default_fill_value = np.nan + if default_kind is None: + default_kind = 'block' - self.default_kind = default_kind - self.default_fill_value = default_fill_value + self._default_kind = default_kind + self._default_fill_value = default_fill_value if isinstance(data, dict): - sdict, columns, index = self._init_dict(data, index, columns) + mgr = self._init_dict(data, index, columns) + if dtype is not None: + mgr = mgr.astype(dtype) elif isinstance(data, (np.ndarray, list)): - sdict, columns, index = self._init_matrix(data, index, columns) + mgr = self._init_matrix(data, index, columns) + if dtype is not None: + mgr = mgr.astype(dtype) + elif isinstance(data, SparseDataFrame): + mgr = self._init_mgr(data._data, dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, DataFrame): - sdict, columns, index = self._init_dict(data, data.index, - data.columns) + mgr = self._init_dict(data, data.index, data.columns) + if dtype is not None: + mgr = mgr.astype(dtype) + elif isinstance(data, BlockManager): + mgr = self._init_mgr(data, axes = dict(index=index, columns=columns), dtype=dtype, copy=copy) elif data is None: - sdict = {} + data = {} if index is None: index = Index([]) @@ -105,39 +107,33 @@ def __init__(self, data=None, index=None, columns=None, columns = Index([]) else: for c in columns: - sdict[c] = SparseSeries(np.nan, index=index, - kind=self.default_kind, - fill_value=self.default_fill_value) - - self._series = sdict - self.columns = columns - self.index = index - - def _from_axes(self, data, axes): - columns, index = axes - return self._constructor(data, index=index, columns=columns) - - @cache_readonly - def _data(self): - return _SparseMockBlockManager(self) - - def _consolidate_inplace(self): - # do nothing when DataFrame calls this method - pass - - def convert_objects(self, convert_dates=True): - # XXX - return self + data[c] = SparseArray(np.nan, + index=index, + kind=self._default_kind, + fill_value=self._default_fill_value) + mgr = dict_to_manager(data, columns, index) + if dtype is not None: + mgr = mgr.astype(dtype) + + NDFrame.__init__(self, mgr) @property def _constructor(self): - def wrapper(data, index=None, columns=None, copy=False): - sf = SparseDataFrame(data, index=index, columns=columns, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) - if copy: - sf = sf.copy() - return sf + def wrapper(data, index=None, columns=None, default_fill_value=None, kind=None, fill_value=None, copy=False): + result = SparseDataFrame(data, index=index, columns=columns, + default_fill_value=fill_value, + default_kind=kind, + copy=copy) + + # fill if requested + if fill_value is not None and not isnull(fill_value): + result.fillna(fill_value,inplace=True) + + # set the default_fill_value + #if default_fill_value is not None: + # result._default_fill_value = default_fill_value + return result + return wrapper def _init_dict(self, data, index, columns, dtype=None): @@ -151,11 +147,10 @@ def _init_dict(self, data, index, columns, dtype=None): if index is None: index = extract_index(data.values()) - sp_maker = lambda x: SparseSeries(x, index=index, - kind=self.default_kind, - fill_value=self.default_fill_value, - copy=True) - + sp_maker = lambda x: SparseArray(x, + kind=self._default_kind, + fill_value=self._default_fill_value, + copy=True) sdict = {} for k, v in data.iteritems(): if isinstance(v, Series): @@ -164,7 +159,9 @@ def _init_dict(self, data, index, columns, dtype=None): v = v.reindex(index) if not isinstance(v, SparseSeries): - v = sp_maker(v) + v = sp_maker(v.values) + elif isinstance(v, SparseArray): + v = sp_maker(v.values) else: if isinstance(v, dict): v = [v.get(i, nan) for i in index] @@ -180,7 +177,7 @@ def _init_dict(self, data, index, columns, dtype=None): if c not in sdict: sdict[c] = sp_maker(nan_vec) - return sdict, columns, index + return dict_to_manager(sdict, columns, index) def _init_matrix(self, data, index, columns, dtype=None): data = _prep_ndarray(data, copy=False) @@ -202,19 +199,19 @@ def _init_matrix(self, data, index, columns, dtype=None): def __array_wrap__(self, result): return SparseDataFrame(result, index=self.index, columns=self.columns, - default_kind=self.default_kind, - default_fill_value=self.default_fill_value) + default_kind=self._default_kind, + default_fill_value=self._default_fill_value) def __getstate__(self): - series = dict((k, (v.sp_index, v.sp_values)) - for k, v in self.iteritems()) - columns = self.columns - index = self.index - - return (series, columns, index, self.default_fill_value, - self.default_kind) - - def __setstate__(self, state): + # pickling + return dict(_typ = self._typ, + _subtyp = self._subtyp, + _data = self._data, + _default_fill_value = self._default_fill_value, + _default_kind = self._default_kind) + + def _unpickle_sparse_frame_compat(self, state): + """ original pickle format """ series, cols, idx, fv, kind = state if not isinstance(cols, Index): # pragma: no cover @@ -232,11 +229,9 @@ def __setstate__(self, state): series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index, fill_value=fv) - self._series = series_dict - self.index = index - self.columns = columns - self.default_fill_value = fv - self.default_kind = kind + self._data = dict_to_manager(series_dict, columns, index) + self._default_fill_value = fv + self._default_kind = kind def to_dense(self): """ @@ -249,13 +244,6 @@ def to_dense(self): data = dict((k, v.to_dense()) for k, v in self.iteritems()) return DataFrame(data, index=self.index) - def get_dtype_counts(self): - from collections import defaultdict - d = defaultdict(int) - for k, v in self.iteritems(): - d[v.dtype.name] += 1 - return Series(d) - def astype(self, dtype): raise NotImplementedError @@ -263,10 +251,18 @@ def copy(self, deep=True): """ Make a copy of this SparseDataFrame """ - series = dict((k, v.copy()) for k, v in self.iteritems()) - return SparseDataFrame(series, index=self.index, columns=self.columns, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) + result = super(SparseDataFrame, self).copy(deep=deep) + result._default_fill_value = self._default_fill_value + result._default_kind = self._default_kind + return result + + @property + def default_fill_value(self): + return self._default_fill_value + + @property + def default_kind(self): + return self._default_kind @property def density(self): @@ -279,143 +275,71 @@ def density(self): tot = len(self.index) * len(self.columns) return tot_nonsparse / float(tot) + def fillna(self, value=None, method=None, axis=0, inplace=False, + limit=None, downcast=None): + new_self = super(SparseDataFrame, self).fillna(value=value, method=method, axis=axis, + inplace=inplace, limit=limit, downcast=downcast) + if not inplace: + self = new_self + + # set the fill value if we are filling as a scalar with nothing special going on + if value is not None and value == value and method is None and limit is None: + self._default_fill_value = value + + if not inplace: + return self + #---------------------------------------------------------------------- # Support different internal representation of SparseDataFrame - def _set_item(self, key, value): - sp_maker = lambda x: SparseSeries(x, index=self.index, - fill_value=self.default_fill_value, - kind=self.default_kind) - if hasattr(value, '__iter__'): + def _sanitize_column(self, key, value): + sp_maker = lambda x, index=None: SparseArray(x, + index=index, + fill_value=self._default_fill_value, + kind=self._default_kind) + if isinstance(value, SparseSeries): + clean = value.reindex(self.index).as_sparse_array(fill_value=self._default_fill_value, + kind=self._default_kind) + + elif isinstance(value, SparseArray): + if len(value) != len(self.index): + raise AssertionError('Length of values does not match ' + 'length of index') + clean = value + + elif hasattr(value, '__iter__'): if isinstance(value, Series): - clean_series = value.reindex(self.index) + clean = value.reindex(self.index) if not isinstance(value, SparseSeries): - clean_series = sp_maker(clean_series) + clean = sp_maker(clean) else: - clean_series = sp_maker(value) + if len(value) != len(self.index): + raise AssertionError('Length of values does not match ' + 'length of index') + clean = sp_maker(value) - self._series[key] = clean_series # Scalar else: - self._series[key] = sp_maker(value) - - if key not in self.columns: - self._insert_column(key) - - def _insert_column(self, key): - self.columns = self.columns.insert(len(self.columns), key) - - def __delitem__(self, key): - """ - Delete column from DataFrame - """ - loc = self.columns.get_loc(key) - del self._series[key] - self._delete_column_index(loc) - - def _delete_column_index(self, loc): - if loc == len(self.columns) - 1: - new_columns = self.columns[:loc] - else: - new_columns = Index(np.concatenate((self.columns[:loc], - self.columns[loc + 1:]))) - self.columns = new_columns - - _index = None + clean = sp_maker(value,self.index) - def _set_index(self, index): - self._index = _ensure_index(index) - for v in self._series.values(): - v.index = self._index - - def _get_index(self): - return self._index - - def _get_columns(self): - return self._columns - - def _set_columns(self, cols): - if len(cols) != len(self._series): - raise Exception('Columns length %d did not match data %d!' % - (len(cols), len(self._series))) - - cols = _ensure_index(cols) - - # rename the _series if needed - existing = getattr(self,'_columns',None) - if existing is not None and len(existing) == len(cols): - - new_series = {} - for i, col in enumerate(existing): - new_col = cols[i] - if new_col in new_series: # pragma: no cover - raise Exception('Non-unique mapping!') - new_series[new_col] = self._series.get(col) - - self._series = new_series - - self._columns = cols - - index = property(fget=_get_index, fset=_set_index) - columns = property(fget=_get_columns, fset=_set_columns) + # always return a SparseArray! + return clean def __getitem__(self, key): """ Retrieve column or slice from DataFrame """ - try: - # unsure about how kludgy this is - s = self._series[key] - s.name = key - return s - except (TypeError, KeyError): - if isinstance(key, slice): - date_rng = self.index[key] - return self.reindex(date_rng) - elif isinstance(key, (np.ndarray, list)): - return self._getitem_array(key) - else: # pragma: no cover - raise - - def icol(self, i): - """ - Retrieve the i-th column or columns of the DataFrame by location - - Parameters - ---------- - i : int, slice, or sequence of integers - - Notes - ----- - If slice passed, the resulting data will be a view - - Returns - ------- - column : Series (int) or DataFrame (slice, sequence) - """ - if isinstance(i, slice): - # need to return view - lab_slice = slice(label[0], label[-1]) - return self.ix[:, lab_slice] + if isinstance(key, slice): + date_rng = self.index[key] + return self.reindex(date_rng) + elif isinstance(key, (np.ndarray, list, Series)): + return self._getitem_array(key) else: - label = self.columns[i] - if isinstance(label, Index): - if self.columns.inferred_type == 'integer': - # XXX re: #2228 - return self.reindex(columns=label) - else: - return self.ix[:, i] - - return self[label] - # values = self._data.iget(i) - # return self._col_klass.from_array( - # values, index=self.index, name=label, - # fill_value= self.default_fill_value) + return self._get_item_cache(key) @Appender(DataFrame.get_value.__doc__, indents=0) def get_value(self, index, col): - s = self._series[col] - return s.get_value(index) + return self._get_item_cache(col).get_value(index) def set_value(self, index, col, value): """ @@ -438,8 +362,8 @@ def set_value(self, index, col, value): frame : DataFrame """ dense = self.to_dense().set_value(index, col, value) - return dense.to_sparse(kind=self.default_kind, - fill_value=self.default_fill_value) + return dense.to_sparse(kind=self._default_kind, + fill_value=self._default_fill_value) def _slice(self, slobj, axis=0, raise_on_error=False): if axis == 0: @@ -455,24 +379,6 @@ def _slice(self, slobj, axis=0, raise_on_error=False): return self.reindex(index=new_index, columns=new_columns) - def as_matrix(self, columns=None): - """ - Convert the frame to its Numpy-array matrix representation - - Columns are presented in sorted order unless a specific list - of columns is provided. - """ - if columns is None: - columns = self.columns - - if len(columns) == 0: - return np.zeros((len(self.index), 0), dtype=float) - - return np.array([self.icol(i).values - for i in range(len(self.columns))]).T - - values = property(as_matrix) - def xs(self, key, axis=0, copy=False): """ Returns a row (cross-section) from the SparseDataFrame as a Series @@ -491,9 +397,8 @@ def xs(self, key, axis=0, copy=False): return data i = self.index.get_loc(key) - series = self._series - values = [series[k][i] for k in self.columns] - return Series(values, index=self.columns) + data = self.take([i]).get_values()[0] + return Series(data, index=self.columns) #---------------------------------------------------------------------- # Arithmetic-related methods @@ -510,6 +415,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None): return SparseDataFrame(index=new_index) new_data = {} + new_fill_value = None if fill_value is not None: # TODO: be a bit more intelligent here for col in new_columns: @@ -520,12 +426,25 @@ def _combine_frame(self, other, func, fill_value=None, level=None): result = result.to_sparse(fill_value=this[col].fill_value) new_data[col] = result else: + for col in new_columns: if col in this and col in other: new_data[col] = func(this[col], other[col]) - return self._constructor(data=new_data, index=new_index, - columns=new_columns) + # if the fill values are the same use them? or use a valid one + other_fill_value = getattr(other,'default_fill_value',np.nan) + if self.default_fill_value == other_fill_value: + new_fill_value = self.default_fill_value + elif np.isnan(self.default_fill_value) and not np.isnan(other_fill_value): + new_fill_value = other_fill_value + elif not np.isnan(self.default_fill_value) and np.isnan(other_fill_value): + new_fill_value = self.default_fill_value + + return self._constructor(data=new_data, + index=new_index, + columns=new_columns, + default_fill_value=new_fill_value, + fill_value=new_fill_value) def _combine_match_index(self, other, func, fill_value=None): new_data = {} @@ -544,8 +463,18 @@ def _combine_match_index(self, other, func, fill_value=None): for col, series in this.iteritems(): new_data[col] = func(series.values, other.values) - return self._constructor(new_data, index=new_index, - columns=self.columns) + # fill_value is a function of our operator + if isnull(other.fill_value) or isnull(self.default_fill_value): + fill_value = np.nan + else: + fill_value = func(np.float64(self.default_fill_value), + np.float64(other.fill_value)) + + return self._constructor(new_data, + index=new_index, + columns=self.columns, + default_fill_value=fill_value, + fill_value=self.default_fill_value) def _combine_match_columns(self, other, func, fill_value): # patched version of DataFrame._combine_match_columns to account for @@ -567,16 +496,22 @@ def _combine_match_columns(self, other, func, fill_value): for col in intersection: new_data[col] = func(self[col], float(other[col])) - return self._constructor(new_data, index=self.index, - columns=union) + return self._constructor(new_data, + index=self.index, + columns=union, + default_fill_value=self.default_fill_value, + fill_value=self.default_fill_value) def _combine_const(self, other, func): new_data = {} for col, series in self.iteritems(): new_data[col] = func(series, other) - return self._constructor(data=new_data, index=self.index, - columns=self.columns) + return self._constructor(data=new_data, + index=self.index, + columns=self.columns, + default_fill_value=self.default_fill_value, + fill_value=self.default_fill_value) def _reindex_index(self, index, method, copy, level, fill_value=np.nan, limit=None): @@ -599,6 +534,9 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, new_series = {} for col, series in self.iteritems(): + if mask.all(): + continue + values = series.values new = values.take(indexer) @@ -608,7 +546,7 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, new_series[col] = new return SparseDataFrame(new_series, index=index, columns=self.columns, - default_fill_value=self.default_fill_value) + default_fill_value=self._default_fill_value) def _reindex_columns(self, columns, copy, level, fill_value, limit=None): if level is not None: @@ -623,10 +561,13 @@ def _reindex_columns(self, columns, copy, level, fill_value, limit=None): # TODO: fill value handling sdict = dict((k, v) for k, v in self.iteritems() if k in columns) return SparseDataFrame(sdict, index=self.index, columns=columns, - default_fill_value=self.default_fill_value) + default_fill_value=self._default_fill_value) + + def _reindex_with_indexers(self, reindexers, method=None, copy=False, fill_value=np.nan): + + index, row_indexer = reindexers.get(0,(None,None)) + columns, col_indexer = reindexers.get(1,(None, None)) - def _reindex_with_indexers(self, index, row_indexer, columns, col_indexer, - copy, fill_value): if columns is None: columns = self.columns @@ -635,73 +576,19 @@ def _reindex_with_indexers(self, index, row_indexer, columns, col_indexer, if col not in self: continue if row_indexer is not None: - new_arrays[col] = com.take_1d(self[col].values, row_indexer, + new_arrays[col] = com.take_1d(self[col].get_values(), row_indexer, fill_value=fill_value) else: new_arrays[col] = self[col] return self._constructor(new_arrays, index=index, columns=columns) - def _rename_index_inplace(self, mapper): - self.index = [mapper(x) for x in self.index] - - def _rename_columns_inplace(self, mapper): - new_series = {} - new_columns = [] - - for col in self.columns: - new_col = mapper(col) - if new_col in new_series: # pragma: no cover - raise Exception('Non-unique mapping!') - new_series[new_col] = self[col] - new_columns.append(new_col) - - self.columns = new_columns - self._series = new_series - - def take(self, indices, axis=0, convert=True): - """ - Analogous to ndarray.take, return SparseDataFrame corresponding to - requested indices along an axis - - Parameters - ---------- - indices : list / array of ints - axis : {0, 1} - convert : convert indices for negative values, check bounds, default True - mainly useful for an user routine calling - - Returns - ------- - taken : SparseDataFrame - """ - - indices = com._ensure_platform_int(indices) - - # check/convert indicies here - if convert: - indices = _maybe_convert_indices(indices, len(self._get_axis(axis))) - - new_values = self.values.take(indices, axis=axis) - if axis == 0: - new_columns = self.columns - new_index = self.index.take(indices) - else: - new_columns = self.columns.take(indices) - new_index = self.index - return self._constructor(new_values, index=new_index, - columns=new_columns) - - def add_prefix(self, prefix): - f = (('%s' % prefix) + '%s').__mod__ - return self.rename(columns=f) - - def add_suffix(self, suffix): - f = ('%s' + ('%s' % suffix)).__mod__ - return self.rename(columns=f) - def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): + if isinstance(other, Series): + assert(other.name is not None) + other = SparseDataFrame({other.name: other}, + default_fill_value=self._default_fill_value) if on is not None: raise NotImplementedError else: @@ -713,7 +600,7 @@ def _join_index(self, other, how, lsuffix, rsuffix): raise AssertionError() other = SparseDataFrame({other.name: other}, - default_fill_value=self.default_fill_value) + default_fill_value=self._default_fill_value) join_index = self.index.join(other.index, how=how) @@ -722,11 +609,8 @@ def _join_index(self, other, how, lsuffix, rsuffix): this, other = this._maybe_rename_join(other, lsuffix, rsuffix) - result_series = this._series - other_series = other._series - result_series.update(other_series) - - return self._constructor(result_series, index=join_index) + from pandas import concat + return concat([this,other],axis=1,verify_integrity=True) def _maybe_rename_join(self, other, lsuffix, rsuffix): intersection = self.columns.intersection(other.columns) @@ -758,8 +642,8 @@ def transpose(self): """ return SparseDataFrame(self.values.T, index=self.columns, columns=self.index, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) + default_fill_value=self._default_fill_value, + default_kind=self._default_kind) T = property(transpose) @Appender(DataFrame.count.__doc__) @@ -781,32 +665,7 @@ def cumsum(self, axis=0): """ return self.apply(lambda x: x.cumsum(), axis=axis) - def shift(self, periods, freq=None, **kwds): - """ - Analogous to DataFrame.shift - """ - from pandas.core.series import _resolve_offset - - offset = _resolve_offset(freq, kwds) - - new_series = {} - if offset is None: - new_index = self.index - for col, s in self.iteritems(): - new_series[col] = s.shift(periods) - else: - new_index = self.index.shift(periods, offset) - for col, s in self.iteritems(): - new_series[col] = SparseSeries(s.sp_values, index=new_index, - sparse_index=s.sp_index, - fill_value=s.fill_value) - - return SparseDataFrame(new_series, index=new_index, - columns=self.columns, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) - - def apply(self, func, axis=0, broadcast=False): + def apply(self, func, axis=0, broadcast=False, reduce=False): """ Analogous to DataFrame.apply, for SparseDataFrame @@ -834,11 +693,11 @@ def apply(self, func, axis=0, broadcast=False): new_series[k] = applied return SparseDataFrame(new_series, index=self.index, columns=self.columns, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) + default_fill_value=self._default_fill_value, + default_kind=self._default_kind) else: if not broadcast: - return self._apply_standard(func, axis) + return self._apply_standard(func, axis, reduce=reduce) else: return self._apply_broadcast(func, axis) @@ -859,19 +718,19 @@ def applymap(self, func): """ return self.apply(lambda x: map(func, x)) - @Appender(DataFrame.fillna.__doc__) - def fillna(self, value=None, method=None, inplace=False, limit=None): - new_series = {} - for k, v in self.iterkv(): - new_series[k] = v.fillna(value=value, method=method, limit=limit) - if inplace: - self._series = new_series - return self - else: - return self._constructor(new_series, index=self.index, - columns=self.columns) +def dict_to_manager(sdict, columns, index): + """ create and return the block manager from a dict of series, columns, index """ + + # from BlockManager perspective + axes = [_ensure_index(columns), _ensure_index(index)] + + # segregates dtypes and forms blocks matching to columns + blocks = form_blocks([ sdict[c] for c in columns ], columns, axes) + # consolidate for now + mgr = BlockManager(blocks, axes) + return mgr.consolidate() def stack_sparse_frame(frame): """ diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index 0b2842155b299..9bc0586ab27b1 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -58,9 +58,12 @@ class SparsePanel(Panel): ----- """ ndim = 3 + _typ = 'panel' + _subtyp = 'sparse_panel' def __init__(self, frames, items=None, major_axis=None, minor_axis=None, - default_fill_value=np.nan, default_kind='block'): + default_fill_value=np.nan, default_kind='block', + copy=False): if isinstance(frames, np.ndarray): new_frames = {} for item, vals in zip(items, frames): @@ -74,6 +77,7 @@ def __init__(self, frames, items=None, major_axis=None, minor_axis=None, if not (isinstance(frames, dict)): raise AssertionError() + self.default_fill_value = fill_value = default_fill_value self.default_kind = kind = default_kind @@ -128,6 +132,9 @@ def to_dense(self): return Panel(self.values, self.items, self.major_axis, self.minor_axis) + def as_matrix(self): + return self.values + @property def values(self): # return dense values @@ -226,6 +233,7 @@ def __setstate__(self, state): self._minor_axis = _ensure_index(com._unpickle_array(minor)) self._frames = frames + def copy(self): """ Make a (shallow) copy of the sparse panel diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index bd01845a295b6..208f4a1275e8c 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -10,14 +10,17 @@ import operator -from pandas.core.common import isnull +from pandas.core.common import isnull, _values_from_object from pandas.core.index import Index, _ensure_index -from pandas.core.series import Series, TimeSeries, _maybe_match_name +from pandas.core.series import Series, _maybe_match_name from pandas.core.frame import DataFrame +from pandas.core.internals import SingleBlockManager +from pandas.core import generic import pandas.core.common as com import pandas.core.datetools as datetools +import pandas.index as _index -from pandas.util import py3compat +from pandas.util import py3compat, rwproperty from pandas.sparse.array import (make_sparse, _sparse_array_op, SparseArray) from pandas._sparse import BlockIndex, IntIndex @@ -66,14 +69,9 @@ def _sparse_series_op(left, right, op, name): new_name = _maybe_match_name(left, right) result = _sparse_array_op(left, right, op, name) - result = result.view(SparseSeries) - result.index = new_index - result.name = new_name + return SparseSeries(result, index=new_index, name=new_name) - return result - - -class SparseSeries(SparseArray, Series): +class SparseSeries(Series): """Data structure for labeled, sparse floating point data Parameters @@ -91,111 +89,157 @@ class SparseSeries(SparseArray, Series): must change values, convert to dense, make your changes, then convert back to sparse """ - __array_priority__ = 15 + _subtyp = 'sparse_series' - sp_index = None - fill_value = None + def __init__(self, data, index=None, sparse_index=None, kind='block', + fill_value=None, name=None, dtype=None, copy=False, + fastpath=False): - def __new__(cls, data, index=None, sparse_index=None, kind='block', - fill_value=None, name=None, copy=False): + # we are called internally, so short-circuit + if fastpath: - is_sparse_array = isinstance(data, SparseArray) - if fill_value is None: - if is_sparse_array: - fill_value = data.fill_value - else: - fill_value = nan - - if is_sparse_array: - if isinstance(data, SparseSeries) and index is None: - index = data.index - elif index is not None: - if not (len(index) == len(data)): - raise AssertionError() - - sparse_index = data.sp_index - values = np.asarray(data) - elif isinstance(data, (Series, dict)): - if index is None: - index = data.index - - data = Series(data) - values, sparse_index = make_sparse(data, kind=kind, - fill_value=fill_value) - elif isinstance(data, (tuple, list, np.ndarray)): - # array-like - if sparse_index is None: - values, sparse_index = make_sparse(data, kind=kind, - fill_value=fill_value) - else: - values = data - if not (len(values) == sparse_index.npoints): - raise AssertionError() + # data is an ndarray, index is defined + data = SingleBlockManager(data, index, fastpath=True) + if copy: + data = data.copy() else: - if index is None: - raise Exception('must pass index!') - length = len(index) + is_sparse_array = isinstance(data, SparseArray) + if fill_value is None: + if is_sparse_array: + fill_value = data.fill_value + else: + fill_value = nan + + if is_sparse_array: + if isinstance(data, SparseSeries) and index is None: + index = data.index + elif index is not None: + assert(len(index) == len(data)) + + sparse_index = data.sp_index + data = np.asarray(data) + + elif isinstance(data, SparseSeries): + if index is None: + index = data.index + + # extract the SingleBlockManager + data = data._data + + elif isinstance(data, (Series, dict)): + if index is None: + index = data.index + + data = Series(data) + data, sparse_index = make_sparse(data, kind=kind, + fill_value=fill_value) + + elif isinstance(data, (tuple, list, np.ndarray)): + # array-like + if sparse_index is None: + data, sparse_index = make_sparse(data, kind=kind, + fill_value=fill_value) + else: + assert(len(data) == sparse_index.npoints) - if data == fill_value or (isnull(data) - and isnull(fill_value)): - if kind == 'block': - sparse_index = BlockIndex(length, [], []) + elif isinstance(data, SingleBlockManager): + if dtype is not None: + data = data.astype(dtype) + if index is None: + index = data.index else: - sparse_index = IntIndex(length, []) - values = np.array([]) + data = data.reindex(index,copy=False) + else: - if kind == 'block': - locs, lens = ([0], [length]) if length else ([], []) - sparse_index = BlockIndex(length, locs, lens) + if index is None: + raise Exception('must pass index!') + + length = len(index) + + if data == fill_value or (isnull(data) + and isnull(fill_value)): + if kind == 'block': + sparse_index = BlockIndex(length, [], []) + else: + sparse_index = IntIndex(length, []) + data = np.array([]) + else: - sparse_index = IntIndex(length, index) - values = np.empty(length) - values.fill(data) + if kind == 'block': + locs, lens = ([0], [length]) if length else ([], []) + sparse_index = BlockIndex(length, locs, lens) + else: + sparse_index = IntIndex(length, index) + v = data + data = np.empty(length) + data.fill(v) + + if index is None: + index = com._default_index(sparse_index.length) + index = _ensure_index(index) + + # create/copy the manager + if isinstance(data, SingleBlockManager): + + if copy: + data = data.copy() + else: - if index is None: - index = com._default_index(sparse_index.length) - index = _ensure_index(index) + # create a sparse array + if not isinstance(data, SparseArray): + data = SparseArray(data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype, copy=copy) + + data = SingleBlockManager(data, index) - # Create array, do *not* copy data by default - if copy: - subarr = np.array(values, dtype=np.float64, copy=True) - else: - subarr = np.asarray(values, dtype=np.float64) + generic.NDFrame.__init__(self, data) + + self.index = index + self.name = name + + @property + def values(self): + """ return the array """ + return self._data._values + + def get_values(self): + """ same as values """ + return self._data._values.to_dense().view() + + @property + def block(self): + return self._data._block + + @rwproperty.getproperty + def fill_value(self): + return self.block.fill_value + + @rwproperty.setproperty + def fill_value(self, v): + self.block.fill_value = v - if index.is_all_dates: - cls = SparseTimeSeries + @property + def sp_index(self): + return self.block.sp_index - # Change the class of the array to be the subclass type. - output = subarr.view(cls) - output.sp_index = sparse_index - output.fill_value = np.float64(fill_value) - output.index = index - output.name = name - return output + @property + def sp_values(self): + return self.values.sp_values - def _make_time_series(self): - # oh boy #2139 - self.__class__ = SparseTimeSeries + @property + def npoints(self): + return self.sp_index.npoints @classmethod - def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None): + def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None, fastpath=False): """ Simplified alternate constructor """ - return SparseSeries(arr, index=index, name=name, copy=copy, fill_value=fill_value) - - def __init__(self, data, index=None, sparse_index=None, kind='block', - fill_value=None, name=None, copy=False): - pass + return cls(arr, index=index, name=name, copy=copy, fill_value=fill_value, fastpath=fastpath) @property def _constructor(self): - def make_sp_series(data, index=None, name=None): - return SparseSeries(data, index=index, fill_value=self.fill_value, - kind=self.kind, name=name) - - return make_sp_series + return SparseSeries @property def kind(self): @@ -204,42 +248,21 @@ def kind(self): elif isinstance(self.sp_index, IntIndex): return 'integer' - def __array_finalize__(self, obj): - """ - Gets called after any ufunc or other array operations, necessary - to pass on the index. - """ - self._index = getattr(obj, '_index', None) - self.name = getattr(obj, 'name', None) - self.sp_index = getattr(obj, 'sp_index', None) - self.fill_value = getattr(obj, 'fill_value', None) - - def __reduce__(self): - """Necessary for making this object picklable""" - object_state = list(ndarray.__reduce__(self)) + def as_sparse_array(self, kind=None, fill_value=None, copy=False): + """ return my self as a sparse array, do not copy by default """ - subclass_state = (self.index, self.fill_value, self.sp_index, - self.name) - object_state[2] = (object_state[2], subclass_state) - return tuple(object_state) - - def __setstate__(self, state): - """Necessary for making this object picklable""" - nd_state, own_state = state - ndarray.__setstate__(self, nd_state) - - index, fill_value, sp_index = own_state[:3] - name = None - if len(own_state) > 3: - name = own_state[3] - - self.sp_index = sp_index - self.fill_value = fill_value - self.index = index - self.name = name + if fill_value is None: + fill_value = self.fill_value + if kind is None: + kind = self.kind + return SparseArray(self.values, + sparse_index=self.sp_index, + fill_value=fill_value, + kind=kind, + copy=copy) def __len__(self): - return self.sp_index.length + return len(self.block) def __repr__(self): series_rep = Series.__repr__(self) @@ -255,6 +278,14 @@ def __repr__(self): __floordiv__ = _sparse_op_wrap(operator.floordiv, 'floordiv') __pow__ = _sparse_op_wrap(operator.pow, 'pow') + # Inplace operators + __iadd__ = __add__ + __isub__ = __sub__ + __imul__ = __mul__ + __itruediv__ = __truediv__ + __ifloordiv__ = __floordiv__ + __ipow__ = __pow__ + # reverse operators __radd__ = _sparse_op_wrap(operator.add, '__radd__') __rsub__ = _sparse_op_wrap(lambda x, y: y - x, '__rsub__') @@ -268,6 +299,73 @@ def __repr__(self): __div__ = _sparse_op_wrap(operator.div, 'div') __rdiv__ = _sparse_op_wrap(lambda x, y: y / x, '__rdiv__') + + def __array_wrap__(self, result): + """ + Gets called prior to a ufunc (and after) + """ + return self._constructor(result, + index=self.index, + sparse_index=self.sp_index, + fill_value=self.fill_value, + copy=False) + + def __array_finalize__(self, obj): + """ + Gets called after any ufunc or other array operations, necessary + to pass on the index. + """ + self.name = getattr(obj, 'name', None) + self.fill_value = getattr(obj, 'fill_value', None) + + def __getstate__(self): + # pickling + return dict(_typ = self._typ, + _subtyp = self._subtyp, + _data = self._data, + fill_value = self.fill_value, + name = self.name) + + + + def _unpickle_series_compat(self, state): + + nd_state, own_state = state + + # recreate the ndarray + data = np.empty(nd_state[1],dtype=nd_state[2]) + np.ndarray.__setstate__(data, nd_state) + + index, fill_value, sp_index = own_state[:3] + name = None + if len(own_state) > 3: + name = own_state[3] + + # create a sparse array + if not isinstance(data, SparseArray): + data = SparseArray(data, sparse_index=sp_index, fill_value=fill_value, copy=False) + + # recreate + data = SingleBlockManager(data, index, fastpath=True) + generic.NDFrame.__init__(self, data) + + self._set_axis(0,index) + self.name = name + + def __iter__(self): + """ forward to the array """ + return iter(self.values) + + def _set_subtyp(self, is_all_dates): + if is_all_dates: + object.__setattr__(self,'_subtyp','sparse_time_series') + else: + object.__setattr__(self,'_subtyp','sparse_series') + + def _get_val_at(self, loc): + """ forward to the array """ + return self.block.values._get_val_at(loc) + def __getitem__(self, key): """ @@ -287,10 +385,14 @@ def __getitem__(self, key): # is there a case where this would NOT be an ndarray? # need to find an example, I took out the case for now + key = _values_from_object(key) dataSlice = self.values[key] new_index = Index(self.index.view(ndarray)[key]) return self._constructor(dataSlice, index=new_index, name=self.name) + def _set_with_engine(self, key, value): + return self.set_value(key, value) + def abs(self): """ Return an object with absolute value taken. Only applicable to objects @@ -364,8 +466,31 @@ def set_value(self, label, value): ------- series : SparseSeries """ - dense = self.to_dense().set_value(label, value) - return dense.to_sparse(kind=self.kind, fill_value=self.fill_value) + values = self.to_dense() + + # if the label doesn't exist, we will create a new object here + # and possibily change the index + new_values = values.set_value(label, value) + if new_values is not None: + values = new_values + new_index = values.index + values = SparseArray(values, fill_value=self.fill_value, kind=self.kind) + self._data = SingleBlockManager(values, new_index) + self._index = new_index + + def _set_values(self, key, value): + + # this might be inefficient as we have to recreate the sparse array + # rather than setting individual elements, but have to convert + # the passed slice/boolean that's in dense space into a sparse indexer + # not sure how to do that! + if isinstance(key, Series): + key = key.values + + values = self.values.to_dense() + values[key] = _index.convert_scalar(values, value) + values = SparseArray(values, fill_value=self.fill_value, kind=self.kind) + self._data = SingleBlockManager(values, self.index) def to_dense(self, sparse_only=False): """ @@ -376,35 +501,26 @@ def to_dense(self, sparse_only=False): index = self.index.take(int_index.indices) return Series(self.sp_values, index=index, name=self.name) else: - return Series(self.values, index=self.index, name=self.name) + return Series(self.values.to_dense(), index=self.index, name=self.name) @property def density(self): r = float(self.sp_index.npoints) / float(self.sp_index.length) return r - def astype(self, dtype=None): - """ - - """ - if dtype is not None and dtype not in (np.float_, float): - raise Exception('Can only support floating point data') - - return self.copy() - def copy(self, deep=True): """ Make a copy of the SparseSeries. Only the actual sparse values need to be copied """ + new_data = self._data if deep: - values = self.sp_values.copy() - else: - values = self.sp_values - return SparseSeries(values, index=self.index, - sparse_index=self.sp_index, - fill_value=self.fill_value, name=self.name) - + new_data = self._data.copy() + + return self._constructor(new_data, index=self.index, + sparse_index=self.sp_index, + fill_value=self.fill_value, name=self.name) + def reindex(self, index=None, method=None, copy=True, limit=None): """ Conform SparseSeries to new Index @@ -422,19 +538,8 @@ def reindex(self, index=None, method=None, copy=True, limit=None): return self.copy() else: return self - - if len(self.index) == 0: - # FIXME: inelegant / slow - values = np.empty(len(new_index), dtype=np.float64) - values.fill(nan) - return SparseSeries(values, index=new_index, - fill_value=self.fill_value) - - new_index, fill_vec = self.index.reindex(index, method=method, - limit=limit) - new_values = com.take_1d(self.values, fill_vec) - return SparseSeries(new_values, index=new_index, - fill_value=self.fill_value, name=self.name) + + return self._constructor(self._data.reindex(new_index,method=method,limit=limit,copy=copy),index=new_index,name=self.name) def sparse_reindex(self, new_index): """ @@ -451,26 +556,25 @@ def sparse_reindex(self, new_index): if not (isinstance(new_index, splib.SparseIndex)): raise AssertionError() - new_values = self.sp_index.to_int_index().reindex(self.sp_values, - self.fill_value, - new_index) - return SparseSeries(new_values, index=self.index, - sparse_index=new_index, - fill_value=self.fill_value) - - @Appender(Series.fillna.__doc__) - def fillna(self, value=None, method=None, inplace=False, limit=None): - dense = self.to_dense() - filled = dense.fillna(value=value, method=method, limit=limit) - result = filled.to_sparse(kind=self.kind, - fill_value=self.fill_value) + block = self.block.sparse_reindex(new_index) + new_data = SingleBlockManager(block, block.ref_items) + return self._constructor(new_data, index=self.index, + sparse_index=new_index, + fill_value=self.fill_value) - if inplace: - self.sp_values[:] = result.values - return self + def _reindex_indexer(self, new_index, indexer, copy): + if indexer is not None: + new_values = com.take_1d(self.values.values, indexer) else: + if copy: + result = self.copy() + else: + result = self return result + # be subclass-friendly + return self._constructor(new_values, new_index, name=self.name) + def take(self, indices, axis=0, convert=True): """ Sparse-compatible version of ndarray.take @@ -479,7 +583,7 @@ def take(self, indices, axis=0, convert=True): ------- taken : ndarray """ - new_values = SparseArray.take(self, indices) + new_values = SparseArray.take(self.values, indices) new_index = self.index.take(indices) return self._constructor(new_values, index=new_index) @@ -487,22 +591,14 @@ def cumsum(self, axis=0, dtype=None, out=None): """ Cumulative sum of values. Preserves locations of NaN values - Extra parameters are to preserve ndarray interface. - Returns ------- cumsum : Series or SparseSeries """ - result = SparseArray.cumsum(self) - if isinstance(result, SparseArray): - result = self._attach_meta(result) - return result - - def _attach_meta(self, sparse_arr): - sparse_series = sparse_arr.view(SparseSeries) - sparse_series.index = self.index - sparse_series.name = self.name - return sparse_series + new_array = SparseArray.cumsum(self.values) + if isinstance(new_array, SparseArray): + return self._constructor(new_array, index=self.index, sparse_index=new_array.sp_index, name=self.name) + return Series(new_array, index=self.index, name=self.name) def dropna(self): """ @@ -513,6 +609,7 @@ def dropna(self): if isnull(self.fill_value): return dense_valid else: + dense_valid=dense_valid[dense_valid!=self.fill_value] return dense_valid.to_sparse(fill_value=self.fill_value) def shift(self, periods, freq=None, **kwds): @@ -534,10 +631,10 @@ def shift(self, periods, freq=None, **kwds): return self.copy() if offset is not None: - return SparseSeries(self.sp_values, - sparse_index=self.sp_index, - index=self.index.shift(periods, offset), - fill_value=self.fill_value) + return self._constructor(self.sp_values, + sparse_index=self.sp_index, + index=self.index.shift(periods, offset), + fill_value=self.fill_value) int_index = self.sp_index.to_int_index() new_indices = int_index.indices + periods @@ -549,10 +646,10 @@ def shift(self, periods, freq=None, **kwds): if isinstance(self.sp_index, BlockIndex): new_sp_index = new_sp_index.to_block_index() - return SparseSeries(self.sp_values[start:end].copy(), - index=self.index, - sparse_index=new_sp_index, - fill_value=self.fill_value) + return self._constructor(self.sp_values[start:end].copy(), + index=self.index, + sparse_index=new_sp_index, + fill_value=self.fill_value) def combine_first(self, other): """ @@ -573,25 +670,5 @@ def combine_first(self, other): dense_combined = self.to_dense().combine_first(other) return dense_combined.to_sparse(fill_value=self.fill_value) - -class SparseTimeSeries(SparseSeries, TimeSeries): - """Data structure for labeled, sparse floating point data, with `TimeStamp` - index labels - - Parameters - ---------- - data : {array-like, Series, SparseSeries, dict} - kind : {'block', 'integer'} - fill_value : float - Defaults to NaN (code for missing) - sparse_index : {BlockIndex, IntIndex}, optional - Only if you have one. Mainly used internally - - Notes - ----- - SparseSeries objects are immutable via the typical Python means. If you - must change values, convert to dense, make your changes, then convert back - to sparse - """ - - pass +# backwards compatiblity +SparseTimeSeries = SparseSeries diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py index c18e0173b4589..f25e1c35eaa2f 100644 --- a/pandas/sparse/tests/test_sparse.py +++ b/pandas/sparse/tests/test_sparse.py @@ -151,10 +151,27 @@ def setUp(self): self.ziseries2 = SparseSeries(arr, index=index, kind='integer', fill_value=0) + def test_iteration_and_str(self): + [ x for x in self.bseries ] + str(self.bseries) + def test_construct_DataFrame_with_sp_series(self): # it works! df = DataFrame({'col': self.bseries}) + # printing & access + df.iloc[:1] + df['col'] + df.dtypes + str(df) + + assert_sp_series_equal(df['col'],self.bseries) + + # blocking + expected = Series({ 'col' : 'float64:sparse' }) + result = df.ftypes + assert_series_equal(expected,result) + def test_series_density(self): # GH2803 ts = Series(np.random.randn(10)) @@ -208,7 +225,7 @@ def test_constructor(self): self.assert_(isinstance(self.iseries.sp_index, IntIndex)) self.assertEquals(self.zbseries.fill_value, 0) - assert_equal(self.zbseries.values, self.bseries.to_dense().fillna(0)) + assert_equal(self.zbseries.values.values, self.bseries.to_dense().fillna(0).values) # pass SparseSeries s2 = SparseSeries(self.bseries) @@ -230,7 +247,7 @@ def test_constructor(self): # pass dict? # don't copy the data by default - values = np.ones(len(self.bseries.sp_values)) + values = np.ones(self.bseries.npoints) sp = SparseSeries(values, sparse_index=self.bseries.sp_index) sp.sp_values[:5] = 97 self.assert_(values[0] == 97) @@ -257,10 +274,10 @@ def test_constructor_ndarray(self): def test_constructor_nonnan(self): arr = [0, 0, 0, nan, nan] sp_series = SparseSeries(arr, fill_value=0) - assert_equal(sp_series.values, arr) + assert_equal(sp_series.values.values, arr) def test_copy_astype(self): - cop = self.bseries.astype(np.float_) + cop = self.bseries.astype(np.float64) self.assert_(cop is not self.bseries) self.assert_(cop.sp_index is self.bseries.sp_index) self.assert_(cop.dtype == np.float64) @@ -271,7 +288,7 @@ def test_copy_astype(self): assert_sp_series_equal(cop2, self.iseries) # test that data is copied - cop.sp_values[:5] = 97 + cop[:5] = 97 self.assert_(cop.sp_values[0] == 97) self.assert_(self.bseries.sp_values[0] != 97) @@ -351,15 +368,14 @@ def test_get_get_value(self): assert_almost_equal(self.bseries.get_value(10), self.bseries[10]) def test_set_value(self): + idx = self.btseries.index[7] - res = self.btseries.set_value(idx, 0) - self.assert_(res is not self.btseries) - self.assertEqual(res[idx], 0) + self.btseries.set_value(idx, 0) + self.assertEqual(self.btseries[idx], 0) - res = self.iseries.set_value('foobar', 0) - self.assert_(res is not self.iseries) - self.assert_(res.index[-1] == 'foobar') - self.assertEqual(res['foobar'], 0) + self.iseries.set_value('foobar', 0) + self.assert_(self.iseries.index[-1] == 'foobar') + self.assertEqual(self.iseries['foobar'], 0) def test_getitem_slice(self): idx = self.bseries.index @@ -386,7 +402,7 @@ def _compare(idx): dense_result = dense.take(idx).values sparse_result = sp.take(idx) self.assert_(isinstance(sparse_result, SparseSeries)) - assert_almost_equal(dense_result, sparse_result.values) + assert_almost_equal(dense_result, sparse_result.values.values) _compare([1., 2., 3., 4., 5., 0.]) _compare([7, 2, 9, 0, 4]) @@ -394,7 +410,6 @@ def _compare(idx): self._check_all(_compare_with_dense) - self.assertRaises(Exception, self.bseries.take, [-1, 0]) self.assertRaises(Exception, self.bseries.take, [0, len(self.bseries) + 1]) @@ -403,11 +418,12 @@ def _compare(idx): assert_almost_equal(sp.take([0, 1, 2, 3, 4]), np.repeat(nan, 5)) def test_setitem(self): - self.assertRaises(Exception, self.bseries.__setitem__, 5, 7.) - self.assertRaises(Exception, self.iseries.__setitem__, 5, 7.) + self.bseries[5] = 7. + self.assert_(self.bseries[5] == 7.) def test_setslice(self): - self.assertRaises(Exception, self.bseries.__setslice__, 5, 10, 7.) + self.bseries[5:10] = 7. + assert_series_equal(self.bseries[5:10].to_dense(),Series(7.,index=range(5,10),name=self.bseries.name)) def test_operators(self): def _check_op(a, b, op): @@ -464,12 +480,20 @@ def test_operators_corner2(self): assert_sp_series_equal(result, 3 - self.zbseries) def test_binary_operators(self): - def _check_inplace_op(op): + + ##### skipping for now ##### + raise nose.SkipTest + + def _check_inplace_op(iop, op): tmp = self.bseries.copy() - self.assertRaises(NotImplementedError, op, tmp, self.bseries) - inplace_ops = ['iadd', 'isub', 'imul', 'itruediv', 'ifloordiv', 'ipow'] + + expected = op(tmp,self.bseries) + iop(tmp,self.bseries) + assert_sp_series_equal(tmp,expected) + + inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow'] for op in inplace_ops: - _check_inplace_op(getattr(operator, op)) + _check_inplace_op(getattr(operator, "i%s" % op), getattr(operator, op)) def test_reindex(self): def _compare_with_series(sps, new_index): @@ -605,9 +629,12 @@ def test_dropna(self): fill_value=0) sp_valid = sp.valid() - assert_almost_equal(sp_valid.values, - sp.to_dense().valid().values) - self.assert_(sp_valid.index.equals(sp.to_dense().valid().index)) + + expected = sp.to_dense().valid() + expected = expected[expected!=0] + + assert_almost_equal(sp_valid.values, expected.values) + self.assert_(sp_valid.index.equals(expected.index)) self.assertEquals(len(sp_valid.sp_values), 2) result = self.bseries.dropna() @@ -710,6 +737,7 @@ class TestSparseDataFrame(TestCase, test_frame.SafeForSparse): _multiprocess_can_split_ = True def setUp(self): + self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], 'C': np.arange(10), @@ -782,12 +810,13 @@ def test_constructor(self): # init dict with different index idx = self.frame.index[:5] - cons = SparseDataFrame(self.frame._series, index=idx, + cons = SparseDataFrame(self.frame, index=idx, columns=self.frame.columns, default_fill_value=self.frame.default_fill_value, - default_kind=self.frame.default_kind) + default_kind=self.frame.default_kind, + copy = True) reindexed = self.frame.reindex(idx) - assert_sp_frame_equal(cons, reindexed) + assert_sp_frame_equal(cons, reindexed, exact_indices=False) # assert level parameter breaks reindex self.assertRaises(Exception, self.frame.reindex, idx, level=0) @@ -1082,8 +1111,8 @@ def _check_frame(frame): # insert SparseSeries differently-indexed to_insert = frame['A'][::2] frame['E'] = to_insert - assert_series_equal(frame['E'].to_dense(), - to_insert.to_dense().reindex(frame.index)) + expected = to_insert.to_dense().reindex(frame.index).fillna(to_insert.fill_value) + assert_series_equal(frame['E'].to_dense(),expected) # insert Series frame['F'] = frame['A'].to_dense() @@ -1093,8 +1122,9 @@ def _check_frame(frame): # insert Series differently-indexed to_insert = frame['A'].to_dense()[::2] frame['G'] = to_insert - assert_series_equal(frame['G'].to_dense(), - to_insert.reindex(frame.index)) + expected = to_insert.reindex(frame.index).fillna(frame.default_fill_value) + assert_series_equal(frame['G'].to_dense(),expected) + # insert ndarray frame['H'] = np.random.randn(N) @@ -1124,11 +1154,14 @@ def test_setitem_corner(self): assert_sp_series_equal(self.frame['a'], self.frame['B']) def test_setitem_array(self): - arr = self.frame['B'].view(SparseArray) + arr = self.frame['B'] self.frame['E'] = arr assert_sp_series_equal(self.frame['E'], self.frame['B']) - self.assertRaises(Exception, self.frame.__setitem__, 'F', arr[:-1]) + + self.frame['F'] = arr[:-1] + index = self.frame.index[:-1] + assert_sp_series_equal(self.frame['E'].reindex(index), self.frame['F'].reindex(index)) def test_delitem(self): A = self.frame['A'] @@ -1160,12 +1193,12 @@ def test_append(self): b = self.frame[5:] appended = a.append(b) - assert_sp_frame_equal(appended, self.frame) + assert_sp_frame_equal(appended, self.frame, exact_indices=False) a = self.frame.ix[:5, :3] b = self.frame.ix[5:] appended = a.append(b) - assert_sp_frame_equal(appended.ix[:, :3], self.frame.ix[:, :3]) + assert_sp_frame_equal(appended.ix[:, :3], self.frame.ix[:, :3], exact_indices=False) def test_apply(self): applied = self.frame.apply(np.sqrt) @@ -1176,10 +1209,6 @@ def test_apply(self): self.assert_(applied['A'].fill_value == np.sqrt(2)) # agg / broadcast - applied = self.frame.apply(np.sum) - assert_series_equal(applied, - self.frame.to_dense().apply(np.sum)) - broadcasted = self.frame.apply(np.sum, broadcast=True) self.assert_(isinstance(broadcasted, SparseDataFrame)) assert_frame_equal(broadcasted.to_dense(), @@ -1187,6 +1216,11 @@ def test_apply(self): self.assert_(self.empty.apply(np.sqrt) is self.empty) + from pandas.core import nanops + applied = self.frame.apply(np.sum) + assert_series_equal(applied, + self.frame.to_dense().apply(nanops.nansum)) + def test_apply_nonuq(self): df_orig = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c']) @@ -1213,12 +1247,12 @@ def test_fillna(self): df = self.zframe.reindex(range(5)) result = df.fillna(0) expected = df.to_dense().fillna(0).to_sparse(fill_value=0) - assert_sp_frame_equal(result, expected) + assert_sp_frame_equal(result, expected, exact_indices=False) result = df.copy() result.fillna(0, inplace=True) expected = df.to_dense().fillna(0).to_sparse(fill_value=0) - assert_sp_frame_equal(result, expected) + assert_sp_frame_equal(result, expected, exact_indices=False) result = df.copy() result = df['A'] @@ -1236,13 +1270,15 @@ def test_corr(self): def test_describe(self): self.frame['foo'] = np.nan + self.frame.get_dtype_counts() + str(self.frame) desc = self.frame.describe() def test_join(self): left = self.frame.ix[:, ['A', 'B']] right = self.frame.ix[:, ['C', 'D']] joined = left.join(right) - assert_sp_frame_equal(joined, self.frame) + assert_sp_frame_equal(joined, self.frame, exact_indices=False) right = self.frame.ix[:, ['B', 'D']] self.assertRaises(Exception, left.join, right) @@ -1262,7 +1298,7 @@ def _check_frame(frame): dense_result) sparse_result2 = sparse_result.reindex(index) - dense_result2 = dense_result.reindex(index) + dense_result2 = dense_result.reindex(index).fillna(frame.default_fill_value) assert_frame_equal(sparse_result2.to_dense(), dense_result2) # propagate CORRECT fill value @@ -1359,7 +1395,6 @@ def _check(frame): def test_shift(self): def _check(frame): shifted = frame.shift(0) - self.assert_(shifted is not frame) assert_sp_frame_equal(shifted, frame) f = lambda s: s.shift(1) @@ -1453,7 +1488,7 @@ def _dense_series_compare(s, f): def _dense_frame_compare(frame, f): result = f(frame) assert(isinstance(frame, SparseDataFrame)) - dense_result = f(frame.to_dense()) + dense_result = f(frame.to_dense()).fillna(frame.default_fill_value) assert_frame_equal(result.to_dense(), dense_result) diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 7d13aa8ce6765..d04f47ea55368 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -331,7 +331,7 @@ def is_period(object o): def is_period_array(ndarray[object] values): cdef int i, n = len(values) - from pandas import Period + from pandas.tseries.period import Period if n == 0: return False diff --git a/pandas/src/properties.pyx b/pandas/src/properties.pyx index 53bb561ef9110..5e55810d7a2f2 100644 --- a/pandas/src/properties.pyx +++ b/pandas/src/properties.pyx @@ -43,26 +43,6 @@ cdef class AxisProperty(object): def __set__(self, obj, value): obj._set_axis(self.axis, value) -cdef class SeriesIndex(object): - cdef: - object _check_type - - def __init__(self): - from pandas.core.index import _ensure_index - self._check_type = _ensure_index - - def __get__(self, obj, type): - return obj._index - - def __set__(self, obj, value): - if len(obj) != len(value): - raise AssertionError('Index length did not match values') - obj._index = val = self._check_type(value) - if hasattr(val, 'tz'): - # hack for #2139 - obj._make_time_series() - - cdef class ValuesProperty(object): def __get__(self, obj, type): diff --git a/pandas/src/reduce.pyx b/pandas/src/reduce.pyx index d173ed8d8e1b7..4d18bc71c1aff 100644 --- a/pandas/src/reduce.pyx +++ b/pandas/src/reduce.pyx @@ -9,8 +9,7 @@ cdef class Reducer: ''' cdef: Py_ssize_t increment, chunksize, nresults - object arr, dummy, f, labels - bint can_set_name + object arr, dummy, f, labels, typ, index def __init__(self, object arr, object f, axis=1, dummy=None, labels=None): @@ -33,49 +32,84 @@ cdef class Reducer: self.f = f self.arr = arr - self.dummy = self._check_dummy(dummy) + self.typ = None self.labels = labels + self.dummy, index = self._check_dummy(dummy) + + if axis == 0: + self.labels = index + self.index = labels + else: + self.labels = labels + self.index = index def _check_dummy(self, dummy=None): + cdef object index + if dummy is None: dummy = np.empty(self.chunksize, dtype=self.arr.dtype) - self.can_set_name = 0 + index = None else: if dummy.dtype != self.arr.dtype: raise ValueError('Dummy array must be same dtype') if len(dummy) != self.chunksize: raise ValueError('Dummy array must be length %d' % self.chunksize) - self.can_set_name = type(dummy) != np.ndarray - return dummy + # we passed a series-like + if hasattr(dummy,'values'): + + self.typ = type(dummy) + index = getattr(dummy,'index',None) + dummy = dummy.values + + return dummy, index def get_result(self): cdef: char* dummy_buf ndarray arr, result, chunk - Py_ssize_t i + Py_ssize_t i, incr flatiter it - object res - bint set_label = 0 - ndarray labels + object res, tchunk, name, labels, index, typ arr = self.arr chunk = self.dummy - dummy_buf = chunk.data chunk.data = arr.data - - set_label = self.labels is not None and self.can_set_name - if set_label: - labels = self.labels + labels = self.labels + index = self.index + typ = self.typ + incr = self.increment try: for i in range(self.nresults): - if set_label: - chunk.name = util.get_value_at(labels, i) + # need to make sure that we pass an actual object to the function + # and not just an ndarray + if typ is not None: + try: + if labels is not None: + name = labels[i] + + # recreate with the index if supplied + if index is not None: + tchunk = typ(chunk, + index = index, + name = name) + else: + tchunk = typ(chunk, name=name) + + except: + tchunk = chunk + typ = None + else: + tchunk = chunk + + res = self.f(tchunk) + + if hasattr(res,'values'): + res = res.values - res = self.f(chunk) if i == 0: result = self._get_result_array(res) it = <flatiter> PyArray_IterNew(result) @@ -117,19 +151,24 @@ cdef class SeriesBinGrouper: bint passed_dummy cdef public: - object arr, index, dummy, f, bins + object arr, index, dummy_arr, dummy_index, values, f, bins, typ, ityp, name def __init__(self, object series, object f, object bins, object dummy): n = len(series) self.bins = bins self.f = f - if not series.flags.c_contiguous: - series = series.copy('C') - self.arr = series + + values = series.values + if not values.flags.c_contiguous: + values = values.copy('C') + self.arr = values self.index = series.index + self.typ = type(series) + self.ityp = type(series.index) + self.name = getattr(series,'name',None) - self.dummy = self._check_dummy(dummy) + self.dummy_arr, self.dummy_index = self._check_dummy(dummy) self.passed_dummy = dummy is not None # kludge for #1688 @@ -140,14 +179,17 @@ cdef class SeriesBinGrouper: def _check_dummy(self, dummy=None): if dummy is None: - dummy = np.empty(0, dtype=self.arr.dtype) + values = np.empty(0, dtype=self.arr.dtype) + index = None else: if dummy.dtype != self.arr.dtype: raise ValueError('Dummy array must be same dtype') - if not dummy.flags.contiguous: - dummy = dummy.copy() + values = dummy.values + if not values.flags.contiguous: + values = values.copy() + index = dummy.index - return dummy + return values, index def get_result(self): cdef: @@ -155,9 +197,9 @@ cdef class SeriesBinGrouper: ndarray[int64_t] counts Py_ssize_t i, n, group_size object res, chunk - bint initialized = 0 + bint initialized = 0, needs_typ = 1, try_typ = 0 Slider vslider, islider - object gin + object gin, typ, ityp, name counts = np.zeros(self.ngroups, dtype=np.int64) @@ -169,14 +211,17 @@ cdef class SeriesBinGrouper: else: counts[i] = self.bins[i] - self.bins[i-1] - chunk = self.dummy + chunk = self.dummy_arr group_size = 0 n = len(self.arr) + typ = self.typ + ityp = self.ityp + name = self.name - vslider = Slider(self.arr, self.dummy) - islider = Slider(self.index, self.dummy.index) + vslider = Slider(self.arr, self.dummy_arr) + islider = Slider(self.index, self.dummy_index) - gin = self.dummy.index._engine + gin = self.dummy_index._engine try: for i in range(self.ngroups): @@ -185,7 +230,28 @@ cdef class SeriesBinGrouper: islider.set_length(group_size) vslider.set_length(group_size) - res = self.f(chunk) + # see if we need to create the object proper + if not try_typ: + try: + chunk.name = name + res = self.f(chunk) + needs_typ = 0 + except: + res = self.f(typ(vslider.buf, index=islider.buf, + name=name, fastpath=True)) + needs_typ = 1 + + try_typ = 0 + else: + if needs_typ: + res = self.f(typ(vslider.buf, index=islider.buf, + name=name, fastpath=True)) + else: + chunk.name = name + res = self.f(chunk) + + if hasattr(res,'values'): + res = res.values if not initialized: result = self._get_result_array(res) @@ -212,7 +278,7 @@ cdef class SeriesBinGrouper: def _get_result_array(self, object res): try: assert(not isinstance(res, np.ndarray)) - assert(not (isinstance(res, list) and len(res) == len(self.dummy))) + assert(not (isinstance(res, list) and len(res) == len(self.dummy_arr))) result = np.empty(self.ngroups, dtype='O') except Exception: @@ -230,7 +296,7 @@ cdef class SeriesGrouper: bint passed_dummy cdef public: - object arr, index, dummy, f, labels + object arr, index, dummy_arr, dummy_index, f, labels, values, typ, ityp, name def __init__(self, object series, object f, object labels, Py_ssize_t ngroups, object dummy): @@ -238,25 +304,33 @@ cdef class SeriesGrouper: self.labels = labels self.f = f - if not series.flags.c_contiguous: - series = series.copy('C') - self.arr = series + + values = series.values + if not values.flags.c_contiguous: + values = values.copy('C') + self.arr = values self.index = series.index + self.typ = type(series) + self.ityp = type(series.index) + self.name = getattr(series,'name',None) - self.dummy = self._check_dummy(dummy) + self.dummy_arr, self.dummy_index = self._check_dummy(dummy) self.passed_dummy = dummy is not None self.ngroups = ngroups def _check_dummy(self, dummy=None): if dummy is None: - dummy = np.empty(0, dtype=self.arr.dtype) + values = np.empty(0, dtype=self.arr.dtype) + index = None else: if dummy.dtype != self.arr.dtype: raise ValueError('Dummy array must be same dtype') - if not dummy.flags.contiguous: - dummy = dummy.copy() + values = dummy.values + if not values.flags.contiguous: + values = values.copy() + index = dummy.index - return dummy + return values, index def get_result(self): cdef: @@ -264,20 +338,23 @@ cdef class SeriesGrouper: ndarray[int64_t] labels, counts Py_ssize_t i, n, group_size, lab object res, chunk - bint initialized = 0 + bint initialized = 0, needs_typ = 1, try_typ = 0 Slider vslider, islider - object gin + object gin, typ, ityp, name labels = self.labels counts = np.zeros(self.ngroups, dtype=np.int64) - chunk = self.dummy + chunk = self.dummy_arr group_size = 0 n = len(self.arr) + typ = self.typ + ityp = self.ityp + name = self.name - vslider = Slider(self.arr, self.dummy) - islider = Slider(self.index, self.dummy.index) + vslider = Slider(self.arr, self.dummy_arr) + islider = Slider(self.index, self.dummy_index) - gin = self.dummy.index._engine + gin = self.dummy_index._engine try: for i in range(n): group_size += 1 @@ -294,7 +371,28 @@ cdef class SeriesGrouper: islider.set_length(group_size) vslider.set_length(group_size) - res = self.f(chunk) + # see if we need to create the object proper + if not try_typ: + try: + chunk.name = name + res = self.f(chunk) + needs_typ = 0 + except: + res = self.f(typ(vslider.buf, index=islider.buf, + name=name, fastpath=True)) + needs_typ = 1 + + try_typ = 0 + else: + if needs_typ: + res = self.f(typ(vslider.buf, index=islider.buf, + name=name, fastpath=True)) + else: + chunk.name = name + res = self.f(chunk) + + if hasattr(res,'values'): + res = res.values if not initialized: result = self._get_result_array(res) @@ -324,7 +422,7 @@ cdef class SeriesGrouper: def _get_result_array(self, object res): try: assert(not isinstance(res, np.ndarray)) - assert(not (isinstance(res, list) and len(res) == len(self.dummy))) + assert(not (isinstance(res, list) and len(res) == len(self.dummy_arr))) result = np.empty(self.ngroups, dtype='O') except Exception: diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index b104c70da9494..c3f4c8b3cd604 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -12,6 +12,7 @@ from pandas.core.api import DataFrame, Series, Panel, notnull import pandas.algos as algos import pandas.core.common as com +from pandas.core.common import _values_from_object from pandas.util.decorators import Substitution, Appender @@ -191,11 +192,11 @@ def _get_corr(a, b): def _flex_binary_moment(arg1, arg2, f): - if not (isinstance(arg1,(np.ndarray, DataFrame)) and - isinstance(arg1,(np.ndarray, DataFrame))): + if not (isinstance(arg1,(np.ndarray, Series, DataFrame)) and + isinstance(arg1,(np.ndarray, Series, DataFrame))): raise ValueError("arguments to moment function must be of type ndarray/DataFrame") - if isinstance(arg1, np.ndarray) and isinstance(arg2, np.ndarray): + if isinstance(arg1, (np.ndarray,Series)) and isinstance(arg2, (np.ndarray,Series)): X, Y = _prep_binary(arg1, arg2) return f(X, Y) elif isinstance(arg1, DataFrame): diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py index 13eeb03e15328..e7d9c5e2a62dd 100644 --- a/pandas/stats/ols.py +++ b/pandas/stats/ols.py @@ -64,14 +64,14 @@ def __init__(self, y, x, intercept=True, weights=None, nw_lags=None, if self._weights is not None: self._x_trans = self._x.mul(np.sqrt(self._weights), axis=0) self._y_trans = self._y * np.sqrt(self._weights) - self.sm_ols = sm.WLS(self._y.values, - self._x.values, + self.sm_ols = sm.WLS(self._y.get_values(), + self._x.get_values(), weights=self._weights.values).fit() else: self._x_trans = self._x self._y_trans = self._y - self.sm_ols = sm.OLS(self._y.values, - self._x.values).fit() + self.sm_ols = sm.OLS(self._y.get_values(), + self._x.get_values()).fit() def _prepare_data(self): """ @@ -96,6 +96,9 @@ def _prepare_data(self): filt_rhs['intercept'] = 1. pre_filt_rhs['intercept'] = 1. + if hasattr(filt_weights,'to_dense'): + filt_weights = filt_weights.to_dense() + return (filt_lhs, filt_rhs, filt_weights, pre_filt_rhs, index, valid) @@ -1300,8 +1303,11 @@ def _filter_data(lhs, rhs, weights=None): filt_lhs = combined.pop('__y__') filt_rhs = combined - return (filt_lhs, filt_rhs, filt_weights, - pre_filt_rhs, index, valid) + if hasattr(filt_weights,'to_dense'): + filt_weights = filt_weights.to_dense() + + return (filt_lhs.to_dense(), filt_rhs.to_dense(), filt_weights, + pre_filt_rhs.to_dense(), index, valid) def _combine_rhs(rhs): diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py index 17a45409c1ab5..f8a81e28bb1c1 100644 --- a/pandas/stats/tests/test_ols.py +++ b/pandas/stats/tests/test_ols.py @@ -97,9 +97,10 @@ def testOLSWithDatasets_scotland(self): def testWLS(self): # WLS centered SS changed (fixed) in 0.5.0 - if sm.version.version < '0.5.0': - raise nose.SkipTest - + v = sm.version.version.split('.') + if int(v[0]) >= 0 and int(v[1]) <= 5: + if int(v[2]) < 1: + raise nose.SkipTest print( "Make sure you're using statsmodels 0.5.0.dev-cec4f26 or later.") X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D']) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 6bba9f6d32efc..2c824d2592d33 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -481,7 +481,7 @@ def test_setitem_boolean_column(self): mask = self.frame['A'] > 0 self.frame.ix[mask, 'B'] = 0 - expected.values[mask, 1] = 0 + expected.values[mask.values, 1] = 0 assert_frame_equal(self.frame, expected) @@ -1027,6 +1027,7 @@ def test_getitem_fancy_1d(self): assert_series_equal(xs, exp) def test_setitem_fancy_1d(self): + # case 1: set cross-section for indices frame = self.frame.copy() expected = self.frame.copy() @@ -1128,13 +1129,13 @@ def test_setitem_fancy_boolean(self): mask = frame['A'] > 0 frame.ix[mask] = 0. - expected.values[mask] = 0. + expected.values[mask.values] = 0. assert_frame_equal(frame, expected) frame = self.frame.copy() expected = self.frame.copy() frame.ix[mask, ['A', 'B']] = 0. - expected.values[mask, :2] = 0. + expected.values[mask.values, :2] = 0. assert_frame_equal(frame, expected) def test_getitem_fancy_ints(self): @@ -2866,8 +2867,8 @@ def test_constructor_with_datetimes(self): index=np.arange(10)) result = df.get_dtype_counts() expected = Series({'int64': 1, datetime64name: 2, objectname : 2}) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) # check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 ndarray with a dtype specified) @@ -2886,16 +2887,16 @@ def test_constructor_with_datetimes(self): expected['float64'] = 1 expected[floatname] = 1 - result.sort() + result.sort_index() expected = Series(expected) - expected.sort() + expected.sort_index() assert_series_equal(result, expected) # check with ndarray construction ndim>0 df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array([1.]*10,dtype=floatname), intname : np.array([1]*10,dtype=intname)}, index=np.arange(10)) result = df.get_dtype_counts() - result.sort() + result.sort_index() assert_series_equal(result, expected) # GH 2809 @@ -2906,8 +2907,8 @@ def test_constructor_with_datetimes(self): df = DataFrame({'datetime_s':datetime_s}) result = df.get_dtype_counts() expected = Series({ datetime64name : 1 }) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) # GH 2810 @@ -2917,8 +2918,8 @@ def test_constructor_with_datetimes(self): df = DataFrame({'datetimes': datetimes, 'dates':dates}) result = df.get_dtype_counts() expected = Series({ datetime64name : 1, objectname : 1 }) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) def test_constructor_for_list_with_dtypes(self): @@ -2979,8 +2980,8 @@ def test_constructor_for_list_with_dtypes(self): 'e' : [1.,2,4.,7]}) result = df.get_dtype_counts() expected = Series({'int64': 1, 'float64' : 2, datetime64name: 1, objectname : 1}) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) def test_timedeltas(self): @@ -5365,10 +5366,13 @@ def test_as_matrix_duplicates(self): self.assertTrue(np.array_equal(result, expected)) - def test_as_blocks(self): + def test_ftypes(self): frame = self.mixed_float - mat = frame.blocks - self.assert_(set([ x.name for x in frame.dtypes.values ]) == set(mat.keys())) + expected = Series(dict(A = 'float32:dense', B = 'float32:dense', C = 'float16:dense', D = 'float64:dense')) + expected.sort() + result = frame.ftypes + result.sort() + assert_series_equal(result,expected) def test_values(self): self.frame.values[:, 0] = 5. @@ -6484,6 +6488,11 @@ def test_reindex(self): newFrame = self.frame.reindex(list(self.ts1.index)) self.assert_(newFrame.index.equals(self.ts1.index)) + # copy with no axes + result = self.frame.reindex() + assert_frame_equal(result,self.frame) + self.assert_((result is self.frame) == False) + def test_reindex_name_remains(self): s = Series(random.rand(10)) df = DataFrame(s, index=np.arange(len(s))) @@ -6571,6 +6580,7 @@ def test_reindex_fill_value(self): assert_frame_equal(result, expected) def test_align(self): + af, bf = self.frame.align(self.frame) self.assert_(af._data is not self.frame._data) @@ -6744,14 +6754,13 @@ def _check_get(df, cond, check_dtypes = True): rs = df.where(cond, other1) rs2 = df.where(cond.values, other1) for k, v in rs.iteritems(): - assert_series_equal(v, np.where(cond[k], df[k], other1[k])) + assert_series_equal(v, Series(np.where(cond[k], df[k], other1[k]),index=v.index)) assert_frame_equal(rs, rs2) # dtypes if check_dtypes: self.assert_((rs.dtypes == df.dtypes).all() == True) - # check getting for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]: cond = df > 0 @@ -7314,8 +7323,8 @@ def test_apply_yield_list(self): def test_apply_reduce_Series(self): self.frame.ix[::2, 'A'] = np.nan + expected = self.frame.mean(1) result = self.frame.apply(np.mean, axis=1) - expected = self.frame.mean(1) assert_series_equal(result, expected) def test_apply_differently_indexed(self): @@ -7450,11 +7459,20 @@ def test_applymap(self): def test_filter(self): # items - filtered = self.frame.filter(['A', 'B', 'E']) self.assertEqual(len(filtered.columns), 2) self.assert_('E' not in filtered) + filtered = self.frame.filter(['A', 'B', 'E'], axis='columns') + self.assertEqual(len(filtered.columns), 2) + self.assert_('E' not in filtered) + + # other axis + idx = self.frame.index[0:4] + filtered = self.frame.filter(idx, axis='index') + expected = self.frame.reindex(index=idx) + assert_frame_equal(filtered,expected) + # like fcopy = self.frame.copy() fcopy['AA'] = 1 @@ -8020,8 +8038,8 @@ def test_get_numeric_data(self): index=np.arange(10)) result = df.get_dtype_counts() expected = Series({'int64': 1, 'float64' : 1, datetime64name: 1, objectname : 1}) - result.sort() - expected.sort() + result.sort_index() + expected.sort_index() assert_series_equal(result, expected) df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', @@ -8264,7 +8282,7 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, if has_skipna: def skipna_wrapper(x): - nona = x.dropna().values + nona = x.dropna() if len(nona) == 0: return np.nan return alternative(nona) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 65a3d3b1c8a20..77089d8b52409 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1191,7 +1191,6 @@ def test_get_loc_level(self): def test_slice_locs(self): df = tm.makeTimeDataFrame() stacked = df.stack() - idx = stacked.index slob = slice(*idx.slice_locs(df.index[5], df.index[15])) diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index eec5f5632d36b..b9ea3d45c8ac6 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -5,6 +5,7 @@ import numpy as np from pandas import Index, MultiIndex, DataFrame, Series +from pandas.sparse.array import SparseArray from pandas.core.internals import * import pandas.core.internals as internals import pandas.util.testing as tm @@ -23,7 +24,7 @@ def assert_block_equal(left, right): def get_float_mat(n, k, dtype): return np.repeat(np.atleast_2d(np.arange(k, dtype=dtype)), n, axis=0) -TEST_COLS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] +TEST_COLS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 's1', 's2'] N = 10 @@ -43,7 +44,6 @@ def get_obj_ex(cols=['b', 'd']): mat[:, 1] = 'bar' return make_block(mat.T, cols, TEST_COLS) - def get_bool_ex(cols=['f']): mat = np.ones((N, 1), dtype=bool) return make_block(mat.T, cols, TEST_COLS) @@ -58,6 +58,14 @@ def get_dt_ex(cols=['h']): mat = randn(N, 1).astype(int).astype('M8[ns]') return make_block(mat.T, cols, TEST_COLS) +def get_sparse_ex1(): + sa1 = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + return make_block(sa1, ['s1'], TEST_COLS) + +def get_sparse_ex2(): + sa2 = SparseArray([0, 0, 2, 3, 4, 0, 6, 7, 0, 8], fill_value=0) + return make_block(sa2, ['s2'], TEST_COLS) + def create_blockmanager(blocks): l = [] for b in blocks: @@ -66,9 +74,19 @@ def create_blockmanager(blocks): for b in blocks: b.ref_items = items - index_sz = blocks[0].values.shape[1] + index_sz = blocks[0].shape[1] return BlockManager(blocks, [items, np.arange(index_sz)]) +def create_singleblockmanager(blocks): + l = [] + for b in blocks: + l.extend(b.items) + items = Index(l) + for b in blocks: + b.ref_items = items + + return SingleBlockManager(blocks, [items]) + class TestBlock(unittest.TestCase): _multiprocess_can_split_ = True @@ -326,8 +344,27 @@ def test_set_change_dtype(self): def test_copy(self): shallow = self.mgr.copy(deep=False) - for cp_blk, blk in zip(shallow.blocks, self.mgr.blocks): - self.assert_(cp_blk.values is blk.values) + # we don't guaranteee block ordering + for blk in self.mgr.blocks: + found = False + for cp_blk in shallow.blocks: + if cp_blk.values is blk.values: + found = True + break + self.assert_(found == True) + + def test_sparse(self): + mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2()]) + + # what to test here? + self.assert_(mgr.as_matrix().dtype == np.float64) + + def test_sparse_mixed(self): + mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2(),get_float_ex()]) + self.assert_(len(mgr.blocks) == 3) + self.assert_(isinstance(mgr,BlockManager)) + + # what to test here? def test_as_matrix_float(self): @@ -502,15 +539,15 @@ def test_get_numeric_data(self): assert_frame_equal(xp, rs) xp = DataFrame({'bool': bool_ser}) - rs = DataFrame(df._data.get_numeric_data(type_list=bool)) + rs = DataFrame(df._data.get_bool_data()) assert_frame_equal(xp, rs) - rs = DataFrame(df._data.get_numeric_data(type_list=bool)) + rs = DataFrame(df._data.get_bool_data()) df.ix[0, 'bool'] = not df.ix[0, 'bool'] self.assertEqual(rs.ix[0, 'bool'], df.ix[0, 'bool']) - rs = DataFrame(df._data.get_numeric_data(type_list=bool, copy=True)) + rs = DataFrame(df._data.get_bool_data(copy=True)) df.ix[0, 'bool'] = not df.ix[0, 'bool'] self.assertEqual(rs.ix[0, 'bool'], not df.ix[0, 'bool']) diff --git a/pandas/tests/test_ndframe.py b/pandas/tests/test_ndframe.py index d5d50359b67e8..edafeb64af98e 100644 --- a/pandas/tests/test_ndframe.py +++ b/pandas/tests/test_ndframe.py @@ -14,21 +14,6 @@ def setUp(self): tdf = t.makeTimeDataFrame() self.ndf = NDFrame(tdf._data) - def test_constructor(self): - # with cast - ndf = NDFrame(self.ndf._data, dtype=np.int64) - self.assert_(ndf.values.dtype == np.int64) - - def test_ndim(self): - self.assertEquals(self.ndf.ndim, 2) - - def test_astype(self): - casted = self.ndf.astype(int) - self.assert_(casted.values.dtype == np.int_) - - casted = self.ndf.astype(np.int32) - self.assert_(casted.values.dtype == np.int32) - def test_squeeze(self): # noop for s in [ t.makeFloatSeries(), t.makeStringSeries(), t.makeObjectSeries() ]: diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 437f8b7279824..017ef22b2ebff 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1003,11 +1003,14 @@ def test_reindex(self): major=self.panel.major_axis, minor=self.panel.minor_axis) - assert(result.items is self.panel.items) - assert(result.major_axis is self.panel.major_axis) - assert(result.minor_axis is self.panel.minor_axis) + self.assert_(result.items is self.panel.items) + self.assert_(result.major_axis is self.panel.major_axis) + self.assert_(result.minor_axis is self.panel.minor_axis) - self.assertRaises(Exception, self.panel.reindex) + # this ok + result = self.panel.reindex() + assert_panel_equal(result,self.panel) + self.assert_((result is self.panel) == False) # with filling smaller_major = self.panel.major_axis[::5] @@ -1021,7 +1024,8 @@ def test_reindex(self): # don't necessarily copy result = self.panel.reindex(major=self.panel.major_axis, copy=False) - self.assert_(result is self.panel) + assert_panel_equal(result,self.panel) + self.assert_((result is self.panel) == False) def test_reindex_like(self): # reindex_like @@ -1132,8 +1136,10 @@ def test_swapaxes(self): result = self.panel.swapaxes(0, 1) self.assert_(result.items is self.panel.major_axis) - # this should not work - self.assertRaises(Exception, self.panel.swapaxes, 'items', 'items') + # this works, but return a copy + result = self.panel.swapaxes('items', 'items') + assert_panel_equal(self.panel,result) + self.assert_(id(self.panel) != id(result)) def test_transpose(self): result = self.panel.transpose('minor', 'major', 'items') @@ -1726,15 +1732,18 @@ def test_pivot(self): def test_monotonic(): pos = np.array([1, 2, 3, 5]) - assert panelm._monotonic(pos) + def _monotonic(arr): + return not (arr[1:] < arr[:-1]).any() + + assert _monotonic(pos) neg = np.array([1, 2, 3, 4, 3]) - assert not panelm._monotonic(neg) + assert not _monotonic(neg) neg2 = np.array([5, 1, 2, 3, 4, 5]) - assert not panelm._monotonic(neg2) + assert not _monotonic(neg2) def test_panel_index(): diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index 5981640b4159c..cb9940ab41b08 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -769,7 +769,10 @@ def test_reindex(self): assert(result.major_axis is self.panel4d.major_axis) assert(result.minor_axis is self.panel4d.minor_axis) - self.assertRaises(Exception, self.panel4d.reindex) + # don't necessarily copy + result = self.panel4d.reindex() + assert_panel4d_equal(result,self.panel4d) + self.assert_((result is self.panel4d) == False) # with filling smaller_major = self.panel4d.major_axis[::5] @@ -784,7 +787,8 @@ def test_reindex(self): # don't necessarily copy result = self.panel4d.reindex( major=self.panel4d.major_axis, copy=False) - self.assert_(result is self.panel4d) + assert_panel4d_equal(result,self.panel4d) + self.assert_((result is self.panel4d) == False) def test_reindex_like(self): # reindex_like @@ -880,8 +884,10 @@ def test_swapaxes(self): result = self.panel4d.swapaxes(0, 1) self.assert_(result.labels is self.panel4d.items) - # this should also work - self.assertRaises(Exception, self.panel4d.swapaxes, 'items', 'items') + # this works, but return a copy + result = self.panel4d.swapaxes('items', 'items') + assert_panel4d_equal(self.panel4d,result) + self.assert_(id(self.panel4d) != id(result)) def test_to_frame(self): raise nose.SkipTest diff --git a/pandas/tests/test_panelnd.py b/pandas/tests/test_panelnd.py index 5675cfec58678..d055706e2525c 100644 --- a/pandas/tests/test_panelnd.py +++ b/pandas/tests/test_panelnd.py @@ -29,11 +29,11 @@ def test_4d_construction(self): # create a 4D Panel4D = panelnd.create_nd_panel_factory( klass_name='Panel4D', - axis_orders=['labels', 'items', 'major_axis', 'minor_axis'], - axis_slices={'items': 'items', 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['labels', 'items', 'major_axis', 'minor_axis'], + slices={'items': 'items', 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer=Panel, - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2) p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) @@ -43,11 +43,11 @@ def test_4d_construction_alt(self): # create a 4D Panel4D = panelnd.create_nd_panel_factory( klass_name='Panel4D', - axis_orders=['labels', 'items', 'major_axis', 'minor_axis'], - axis_slices={'items': 'items', 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['labels', 'items', 'major_axis', 'minor_axis'], + slices={'items': 'items', 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer='Panel', - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2) p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) @@ -58,14 +58,14 @@ def test_4d_construction_error(self): self.assertRaises(Exception, panelnd.create_nd_panel_factory, klass_name='Panel4D', - axis_orders=['labels', 'items', 'major_axis', - 'minor_axis'], - axis_slices={'items': 'items', - 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['labels', 'items', 'major_axis', + 'minor_axis'], + slices={'items': 'items', + 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer='foo', - axis_aliases={'major': 'major_axis', - 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', + 'minor': 'minor_axis'}, stat_axis=2) def test_5d_construction(self): @@ -73,11 +73,11 @@ def test_5d_construction(self): # create a 4D Panel4D = panelnd.create_nd_panel_factory( klass_name='Panel4D', - axis_orders=['labels1', 'items', 'major_axis', 'minor_axis'], - axis_slices={'items': 'items', 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['labels1', 'items', 'major_axis', 'minor_axis'], + slices={'items': 'items', 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer=Panel, - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2) p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) @@ -85,13 +85,13 @@ def test_5d_construction(self): # create a 5D Panel5D = panelnd.create_nd_panel_factory( klass_name='Panel5D', - axis_orders=['cool1', 'labels1', 'items', 'major_axis', - 'minor_axis'], - axis_slices={'labels1': 'labels1', 'items': 'items', - 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, + orders=['cool1', 'labels1', 'items', 'major_axis', + 'minor_axis'], + slices={'labels1': 'labels1', 'items': 'items', + 'major_axis': 'major_axis', + 'minor_axis': 'minor_axis'}, slicer=Panel4D, - axis_aliases={'major': 'major_axis', 'minor': 'minor_axis'}, + aliases={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2) p5d = Panel5D(dict(C1=p4d)) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 20ff6e95b436c..e791f8292caec 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -11,7 +11,7 @@ import numpy as np import numpy.ma as ma -from pandas import (Index, Series, TimeSeries, DataFrame, isnull, notnull, +from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range, date_range) from pandas.core.index import MultiIndex from pandas.tseries.index import Timestamp, DatetimeIndex @@ -280,27 +280,27 @@ def setUp(self): def test_constructor(self): # Recognize TimeSeries - self.assert_(isinstance(self.ts, TimeSeries)) + self.assert_(self.ts.is_time_series == True) # Pass in Series derived = Series(self.ts) - self.assert_(isinstance(derived, TimeSeries)) + self.assert_(derived.is_time_series == True) self.assert_(tm.equalContents(derived.index, self.ts.index)) # Ensure new index is not created self.assertEquals(id(self.ts.index), id(derived.index)) - # Pass in scalar - scalar = Series(0.5) - self.assert_(isinstance(scalar, float)) + # Pass in scalar (now disabled) + #scalar = Series(0.5) + #self.assert_(isinstance(scalar, float)) # Mixed type Series mixed = Series(['hello', np.NaN], index=[0, 1]) self.assert_(mixed.dtype == np.object_) self.assert_(mixed[1] is np.NaN) - self.assert_(not isinstance(self.empty, TimeSeries)) - self.assert_(not isinstance(Series({}), TimeSeries)) + self.assert_(not self.empty.is_time_series) + self.assert_(not Series({}).is_time_series) self.assertRaises(Exception, Series, np.random.randn(3, 3), index=np.arange(3)) @@ -603,7 +603,7 @@ def test_setindex(self): # wrong length series = self.series.copy() - self.assertRaises(AssertionError, setattr, series, 'index', + self.assertRaises(Exception, setattr, series, 'index', np.arange(len(series) - 1)) # works @@ -742,6 +742,7 @@ def test_getitem_generator(self): def test_getitem_boolean_object(self): # using column from DataFrame + s = self.series mask = s > s.median() omask = mask.astype(object) @@ -752,11 +753,12 @@ def test_getitem_boolean_object(self): assert_series_equal(result, expected) # setitem + s2 = s.copy() cop = s.copy() cop[omask] = 5 - s[mask] = 5 - assert_series_equal(cop, s) - + s2[mask] = 5 + assert_series_equal(cop, s2) + # nans raise exception omask[5:10] = np.nan self.assertRaises(Exception, s.__getitem__, omask) @@ -765,11 +767,18 @@ def test_getitem_boolean_object(self): def test_getitem_setitem_boolean_corner(self): ts = self.ts mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median() + + # these used to raise...?? + self.assertRaises(Exception, ts.__getitem__, mask_shifted) self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1) + #ts[mask_shifted] + #ts[mask_shifted] = 1 self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted) self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1) + #ts.ix[mask_shifted] + #ts.ix[mask_shifted] = 2 def test_getitem_setitem_slice_integers(self): s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16]) @@ -1106,8 +1115,18 @@ def test_where(self): assert(s.shape == rs.shape) assert(rs is not s) - rs = s.where(cond[:3], -s) - assert_series_equal(rs, s.abs()[:3].append(s[3:])) + # test alignment + cond = Series([True,False,False,True,False],index=s.index) + s2 = -(s.abs()) + + expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index) + rs = s2.where(cond[:3]) + assert_series_equal(rs, expected) + + expected = s2.abs() + expected.ix[0] = s2[0] + rs = s2.where(cond[:3], -s2) + assert_series_equal(rs, expected) self.assertRaises(ValueError, s.where, 1) self.assertRaises(ValueError, s.where, cond[:3].values, -s) @@ -1169,7 +1188,7 @@ def test_where(self): s = Series(np.arange(10)) mask = s > 5 - self.assertRaises(ValueError, s.__setitem__, mask, ([0]*5,)) + self.assertRaises(ValueError, s.__setitem__, mask, [0]*5) def test_where_inplace(self): s = Series(np.random.randn(5)) @@ -1453,7 +1472,7 @@ def test_median(self): self._check_stat_op('median', np.median) # test with integers, test failure - int_ts = TimeSeries(np.ones(10, dtype=int), index=range(10)) + int_ts = Series(np.ones(10, dtype=int), index=range(10)) self.assertAlmostEqual(np.median(int_ts), int_ts.median()) def test_prod(self): @@ -1560,7 +1579,11 @@ def test_cummax(self): self.assert_(np.array_equal(result, expected)) def test_npdiff(self): + raise nose.SkipTest + + # no longer works as the return type of np.diff is now nd.array s = Series(np.arange(5)) + r = np.diff(s) assert_series_equal(Series([nan, 0, 0, 0, nan]), r) @@ -2574,6 +2597,10 @@ def test_unique(self): expected = np.array([1, 2, 3, None], dtype=object) self.assert_(np.array_equal(result, expected)) + def test_dropna_empty(self): + s = Series([]) + self.assert_(len(s.dropna()) == 0) + def test_drop_duplicates(self): s = Series([1, 2, 3, 3]) @@ -2654,7 +2681,8 @@ def test_rank(self): mask = np.isnan(self.ts) filled = self.ts.fillna(np.inf) - exp = rankdata(filled) + # rankdata returns a ndarray + exp = Series(rankdata(filled),index=filled.index) exp[mask] = np.nan assert_almost_equal(ranks, exp) @@ -3680,19 +3708,19 @@ def test_preserveRefs(self): self.assertFalse(np.isnan(self.ts[10])) def test_ne(self): - ts = TimeSeries([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) + ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) expected = [True, True, False, True, True] self.assert_(tm.equalContents(ts.index != 5, expected)) self.assert_(tm.equalContents(~(ts.index == 5), expected)) def test_pad_nan(self): - x = TimeSeries([np.nan, 1., np.nan, 3., np.nan], - ['z', 'a', 'b', 'c', 'd'], dtype=float) + x = Series([np.nan, 1., np.nan, 3., np.nan], + ['z', 'a', 'b', 'c', 'd'], dtype=float) x.fillna(method='pad', inplace=True) - expected = TimeSeries([np.nan, 1.0, 1.0, 3.0, 3.0], - ['z', 'a', 'b', 'c', 'd'], dtype=float) + expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], + ['z', 'a', 'b', 'c', 'd'], dtype=float) assert_series_equal(x[1:], expected[1:]) self.assert_(np.isnan(x[0]), np.isnan(expected[0])) @@ -4134,12 +4162,12 @@ def test_set_index_makes_timeseries(self): s = Series(range(10)) s.index = idx - self.assertTrue(isinstance(s, TimeSeries)) + self.assertTrue(s.is_time_series == True) def test_timeseries_coercion(self): idx = tm.makeDateIndex(10000) ser = Series(np.random.randn(len(idx)), idx.astype(object)) - self.assert_(isinstance(ser, TimeSeries)) + self.assert_(ser.is_time_series == True) self.assert_(isinstance(ser.index, DatetimeIndex)) def test_replace(self): diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 6d224ffcb7b05..16fec0591a4f6 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -17,7 +17,6 @@ make_block, _consolidate) from pandas.util.decorators import cache_readonly, Appender, Substitution -from pandas.sparse.frame import SparseDataFrame import pandas.core.common as com import pandas.lib as lib @@ -304,8 +303,8 @@ def _get_merge_keys(self): left_drop = [] left, right = self.left, self.right - is_lkey = lambda x: isinstance(x, np.ndarray) and len(x) == len(left) - is_rkey = lambda x: isinstance(x, np.ndarray) and len(x) == len(right) + is_lkey = lambda x: isinstance(x, (np.ndarray, Series)) and len(x) == len(left) + is_rkey = lambda x: isinstance(x, (np.ndarray, Series)) and len(x) == len(right) # ugh, spaghetti re #733 if _any(self.left_on) and _any(self.right_on): @@ -668,7 +667,7 @@ def _prepare_blocks(self): join_blocks = unit.get_upcasted_blocks() type_map = {} for blk in join_blocks: - type_map.setdefault(blk.dtype, []).append(blk) + type_map.setdefault(blk.ftype, []).append(blk) blockmaps.append((unit, type_map)) return blockmaps @@ -710,11 +709,11 @@ def _merge_blocks(self, merge_chunks): funit, fblock = merge_chunks[0] fidx = funit.indexer - out_shape = list(fblock.values.shape) + out_shape = list(fblock.get_values().shape) n = len(fidx) if fidx is not None else out_shape[self.axis] - out_shape[0] = sum(len(blk) for unit, blk in merge_chunks) + out_shape[0] = sum(blk.get_merge_length() for unit, blk in merge_chunks) out_shape[self.axis] = n # Should use Fortran order?? @@ -724,7 +723,7 @@ def _merge_blocks(self, merge_chunks): sofar = 0 for unit, blk in merge_chunks: out_chunk = out[sofar: sofar + len(blk)] - com.take_nd(blk.values, unit.indexer, self.axis, out=out_chunk) + com.take_nd(blk.get_values(), unit.indexer, self.axis, out=out_chunk) sofar += len(blk) # does not sort @@ -881,8 +880,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, class _Concatenator(object): """ - Orchestrates a concatenation operation for BlockManagers, with little hacks - to support sparse data structures, etc. + Orchestrates a concatenation operation for BlockManagers """ def __init__(self, objs, axis=0, join='outer', join_axes=None, @@ -950,8 +948,9 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, def get_result(self): if self._is_series and self.axis == 0: - new_data = com._concat_compat([x.values for x in self.objs]) + new_data = com._concat_compat([x.get_values() for x in self.objs]) name = com._consensus_name_attr(self.objs) + new_data = self._post_merge(new_data) return Series(new_data, index=self.new_axes[0], name=name) elif self._is_series: data = dict(itertools.izip(xrange(len(self.objs)), self.objs)) @@ -962,31 +961,43 @@ def get_result(self): return tmpdf else: new_data = self._get_concatenated_data() + new_data = self._post_merge(new_data) return self.objs[0]._from_axes(new_data, self.new_axes) + def _post_merge(self, data): + if isinstance(data, BlockManager): + data = data.post_merge(self.objs) + return data + def _get_fresh_axis(self): return Index(np.arange(len(self._get_concat_axis()))) def _prepare_blocks(self): reindexed_data = self._get_reindexed_data() + # we are consolidating as we go, so just add the blocks, no-need for dtype mapping blockmaps = [] for data in reindexed_data: data = data.consolidate() - - type_map = dict((blk.dtype, blk) for blk in data.blocks) - blockmaps.append(type_map) + blockmaps.append(data.get_block_map(typ='dict')) return blockmaps, reindexed_data def _get_concatenated_data(self): + blockmaps, rdata = self._prepare_blocks() + try: # need to conform to same other (joined) axes for block join - blockmaps, rdata = self._prepare_blocks() + kinds = _get_all_block_kinds(blockmaps) new_blocks = [] for kind in kinds: - klass_blocks = [mapping.get(kind) for mapping in blockmaps] + klass_blocks = [] + for mapping in blockmaps: + l = mapping.get(kind) + if l is None: + l = [ None ] + klass_blocks.extend(l) stacked_block = self._concat_blocks(klass_blocks) new_blocks.append(stacked_block) @@ -997,7 +1008,7 @@ def _get_concatenated_data(self): blk.ref_items = self.new_axes[0] new_data = BlockManager(new_blocks, self.new_axes) - except Exception: # EAFP + except (Exception), detail: # EAFP # should not be possible to fail here for the expected reason with # axis = 0 if self.axis == 0: # pragma: no cover @@ -1013,22 +1024,20 @@ def _get_reindexed_data(self): # HACK: ugh reindexed_data = [] - if isinstance(self.objs[0], SparseDataFrame): - pass - else: - axes_to_reindex = list(enumerate(self.new_axes)) - axes_to_reindex.pop(self.axis) + axes_to_reindex = list(enumerate(self.new_axes)) + axes_to_reindex.pop(self.axis) - for obj in self.objs: - data = obj._data - for i, ax in axes_to_reindex: - data = data.reindex_axis(ax, axis=i, copy=False) - reindexed_data.append(data) + for obj in self.objs: + data = obj._data.prepare_for_merge() + for i, ax in axes_to_reindex: + data = data.reindex_axis(ax, axis=i, copy=False) + reindexed_data.append(data) return reindexed_data def _concat_blocks(self, blocks): - values_list = [b.values for b in blocks if b is not None] + + values_list = [b.get_values() for b in blocks if b is not None] concat_values = com._concat_compat(values_list, axis=self.axis) if self.axis > 0: @@ -1058,13 +1067,11 @@ def _concat_single_item(self, objs, item): all_values = [] dtypes = set() - # le sigh - if isinstance(self.objs[0], SparseDataFrame): - objs = [x._data for x in self.objs] - for data, orig in zip(objs, self.objs): if item in orig: values = data.get(item) + if hasattr(values,'to_dense'): + values = values.to_dense() dtypes.add(values.dtype) all_values.append(values) else: diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index 8d5ba7af0d92b..238183ef6e9ff 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -210,6 +210,8 @@ def _convert_by(by): elif (np.isscalar(by) or isinstance(by, np.ndarray) or hasattr(by, '__call__')): by = [by] + elif isinstance(by, Series): + by = [by] else: by = list(by) return by diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index d9625a3d5e549..3066671f3e9bf 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -6,7 +6,7 @@ import numpy as np -from pandas.core.common import isnull +from pandas.core.common import isnull, _values_from_object, _maybe_box from pandas.core.index import Index, Int64Index from pandas.tseries.frequencies import ( infer_freq, to_offset, get_period_alias, @@ -514,8 +514,6 @@ def __repr__(self): return summary - __str__ = __repr__ - def __reduce__(self): """Necessary for making this object picklable""" object_state = list(np.ndarray.__reduce__(self)) @@ -1134,12 +1132,10 @@ def get_value(self, series, key): know what you're doing """ if isinstance(key, datetime): - # needed to localize naive datetimes - stamp = Timestamp(key, tz=self.tz) - return self._engine.get_value(series, stamp) + return self.get_value_maybe_box(series, key) try: - return Index.get_value(self, series, key) + return _maybe_box(self, Index.get_value(self, series, key), series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -1152,11 +1148,19 @@ def get_value(self, series, key): return series.take(locs) try: - stamp = Timestamp(key, tz=self.tz) - return self._engine.get_value(series, stamp) - except (KeyError, ValueError): + return self.get_value_maybe_box(series, key) + except (TypeError, ValueError, KeyError): raise KeyError(key) + def get_value_maybe_box(self, series, key): + # needed to localize naive datetimes + if self.tz is not None: + key = Timestamp(key, tz=self.tz) + elif not isinstance(key, Timestamp): + key = Timestamp(key) + values = self._engine.get_value(_values_from_object(series), key) + return _maybe_box(self, values, series, key) + def get_loc(self, key): """ Get integer location for requested label @@ -1277,6 +1281,8 @@ def __getitem__(self, key): return self._simple_new(result, self.name, new_offset, self.tz) + _getitem_slice = __getitem__ + # Try to run function on index first, and then on elements of index # Especially important for group-by functionality def map(self, f): diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index c1af7ba5cccc2..99d07f19f39d1 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -11,10 +11,10 @@ from pandas.tseries.tools import parse_time_string import pandas.tseries.frequencies as _freq_mod +from pandas.core import base import pandas.core.common as com -from pandas.core.common import isnull +from pandas.core.common import isnull, _maybe_box, _values_from_object from pandas.util import py3compat - from pandas.lib import Timestamp import pandas.lib as lib import pandas.tslib as tslib @@ -40,7 +40,7 @@ def f(self): return property(f) -class Period(object): +class Period(base.PandasObject): """ Represents an period of time @@ -272,28 +272,6 @@ def __repr__(self): return "Period('%s', '%s')" % (formatted, freqstr) - def __str__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular DataFrame @@ -911,8 +889,9 @@ def get_value(self, series, key): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ + s = _values_from_object(series) try: - return super(PeriodIndex, self).get_value(series, key) + return _maybe_box(self, super(PeriodIndex, self).get_value(s, key), series, key) except (KeyError, IndexError): try: asdt, parsed, reso = parse_time_string(key, self.freq) @@ -934,15 +913,15 @@ def get_value(self, series, key): key = slice(pos[0], pos[1] + 1) return series[key] else: - key = Period(asdt, freq=self.freq) - return self._engine.get_value(series, key.ordinal) + key = Period(asdt, freq=self.freq).ordinal + return _maybe_box(self, self._engine.get_value(s, key), series, key) except TypeError: pass except KeyError: pass - key = Period(key, self.freq) - return self._engine.get_value(series, key.ordinal) + key = Period(key, self.freq).ordinal + return _maybe_box(self, self._engine.get_value(s, key), series, key) def get_loc(self, key): """ @@ -1079,6 +1058,8 @@ def __getitem__(self, key): return PeriodIndex(result, name=self.name, freq=self.freq) + _getitem_slice = __getitem__ + def _format_with_header(self, header, **kwargs): return header + self._format_native_types(**kwargs) diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 02a3030f69519..03f5ef3de0554 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -563,20 +563,21 @@ def test_resample_median_bug_1688(self): tm.assert_frame_equal(result, exp) def test_how_lambda_functions(self): - ts = _simple_ts('1/1/2000', '4/1/2000') + ts = _simple_ts('1/1/2000', '4/1/2000') + result = ts.resample('M', how=lambda x: x.mean()) exp = ts.resample('M', how='mean') tm.assert_series_equal(result, exp) - + self.assertRaises(Exception, ts.resample, 'M', - how=[lambda x: x.mean(), lambda x: x.std()]) - + how=[lambda x: x.mean(), lambda x: x.std(ddof=1)]) + result = ts.resample('M', how={'foo': lambda x: x.mean(), - 'bar': lambda x: x.std()}) + 'bar': lambda x: x.std(ddof=1)}) foo_exp = ts.resample('M', how='mean') bar_exp = ts.resample('M', how='std') - + tm.assert_series_equal(result['foo'], foo_exp) tm.assert_series_equal(result['bar'], bar_exp) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index c83d4ba131a42..5000a3c26055a 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -1832,7 +1832,10 @@ def test_time(self): self.assert_((result == expected).all()) -class TestLegacySupport(unittest.TestCase): +# infortunately, too much has changed to handle these legacy pickles +# class TestLegacySupport(unittest.TestCase): +class LegacySupport(object): + _multiprocess_can_split_ = True @classmethod @@ -2584,15 +2587,7 @@ def test_set_none_nan(self): def test_intercept_astype_object(self): # this test no longer makes sense as series is by default already M8[ns] - - # Work around NumPy 1.6 bugs - #result = self.series.astype(object) - #result2 = self.series.astype('O') - - expected = Series(self.series, dtype=object) - - #assert_series_equal(result, expected) - #assert_series_equal(result2, expected) + expected = self.series.astype('object') df = DataFrame({'a': self.series, 'b': np.random.randn(len(self.series))}) diff --git a/pandas/util/rwproperty.py b/pandas/util/rwproperty.py new file mode 100644 index 0000000000000..2d0dada68cc0e --- /dev/null +++ b/pandas/util/rwproperty.py @@ -0,0 +1,75 @@ +# Read & write properties +# +# Copyright (c) 2006 by Philipp "philiKON" von Weitershausen +# philikon@philikon.de +# +# Freely distributable under the terms of the Zope Public License, v2.1. +# +# See rwproperty.txt for detailed explanations +# +import sys + +__all__ = ['getproperty', 'setproperty', 'delproperty'] + +class rwproperty(object): + + def __new__(cls, func): + name = func.__name__ + + # ugly, but common hack + frame = sys._getframe(1) + locals = frame.f_locals + + if name not in locals: + return cls.createProperty(func) + + oldprop = locals[name] + if isinstance(oldprop, property): + return cls.enhanceProperty(oldprop, func) + + raise TypeError("read & write properties cannot be mixed with " + "other attributes except regular property objects.") + + # this might not be particularly elegant, but it's easy on the eyes + + @staticmethod + def createProperty(func): + raise NotImplementedError + + @staticmethod + def enhanceProperty(oldprop, func): + raise NotImplementedError + +class getproperty(rwproperty): + + @staticmethod + def createProperty(func): + return property(func) + + @staticmethod + def enhanceProperty(oldprop, func): + return property(func, oldprop.fset, oldprop.fdel) + +class setproperty(rwproperty): + + @staticmethod + def createProperty(func): + return property(None, func) + + @staticmethod + def enhanceProperty(oldprop, func): + return property(oldprop.fget, func, oldprop.fdel) + +class delproperty(rwproperty): + + @staticmethod + def createProperty(func): + return property(None, None, func) + + @staticmethod + def enhanceProperty(oldprop, func): + return property(oldprop.fget, oldprop.fset, func) + +if __name__ == "__main__": + import doctest + doctest.testfile('rwproperty.txt') diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index 7745450e5c03b..ce3fbd26376c5 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -40,8 +40,8 @@ # reindex both axes setup = common_setup + """ -df = DataFrame(randn(1000, 1000)) -idx = np.arange(400, 700) +df = DataFrame(randn(10000, 10000)) +idx = np.arange(4000, 7000) """ frame_reindex_axis0 = Benchmark('df.reindex(idx)', setup) diff --git a/vb_suite/sparse.py b/vb_suite/sparse.py index bfee959ab982f..1cb0f9233f7e9 100644 --- a/vb_suite/sparse.py +++ b/vb_suite/sparse.py @@ -11,8 +11,8 @@ K = 50 N = 50000 -rng = np.asarray(DateRange('1/1/2000', periods=N, - offset=datetools.Minute())) +rng = np.asarray(date_range('1/1/2000', periods=N, + freq='T')) # rng2 = np.asarray(rng).astype('M8[ns]').astype('i8')
Major refactor primarily to make Series inherit from NDFrame Preserves pickle compat very few tests were changed (and only for compat on return objects) a few performance enhancements, a couple of regressions (see bottom) _obviously this is a large change in terms of the codebase, but it brings more consistency between series/frame/panel (not all of this is there yet, but future changes are much easier)_ _Series is now like Frame in that it has a BlockManager (called SingleBlockManager), which holds a block (of any type we support). This introduced some overhead in doing certain operations, which I spent a lot of time optimizing away, further optimizations will come from cythonizing the core/internals, which should be straightforward at this point_ Highlites below: - Refactor of PandasObject to become new generic Pandas base class - moved methods - **str**,**bytes**,**repr**,save,load - affects all NDFrame hierarchy, Index hierarchy, Period (Timestamp not included) - Refactor of series.py/frame.py/panel.py to move common code to generic.py - added _setup_axes to created generic NDFrame structures - moved methods - from_axes,_wrap_array,axes,ix,shape,empty,swapaxes,transpose,pop - **iter**,keys,**contains**,**len**,**neg**,**invert** - convert_objects,as_blocks,as_matrix,values - **getstate**,**setstate** (though compat remains in frame/panel) - **getattr**,**setattr** - _indexed_same,reindex_like,reindex,align,where,mask - filter (also added axis argument to selectively filter on a different axis) - reindex,reindex_axis (which was the biggest change to make generic) - truncate (moved to become part of NDFrame) These are API changes which make Panel more consistent with DataFrame - swapaxes on a Panel with the same axes specified now return a copy - support attribute access for setting - filter supports same api as original DataFrame filter - Reindex called with no arguments will now return a copy of the input object - Series now inherits from `NDFrame` rather than directly from `ndarray`. There are several minor changes that affect the API. - numpy functions that do not support the array interface will now return `ndarrays` rather than series, e.g. `np.diff` and `np.where` - `Series(0.5)` would previously return the scalar `0.5`, this is not longer supported - several methods from frame/series have moved to `NDFrame` (convert_objects,where,mask) - `TimeSeries` is now an alias for `Series`. the property `is_time_series` can be used to distinguish (if desired) - Refactor of Sparse objects to use BlockManager - Created a new block type in internals, SparseBlock, which can hold multi-dtypes and is non-consolidatable. SparseSeries and SparseDataFrame now inherit more methods from there hierarchy (Series/DataFrame), and no longer inherit from SparseArray (which instead is the object of the SparseBlock) - Sparse suite now supports integration with non-sparse data. Non-float sparse data is supportable (partially implemented) - Operations on sparse structures within DataFrames should preserve sparseness, merging type operations will convert to dense (and back to sparse), so might be somewhat inefficient - enable setitem on SparseSeries for boolean/integer/slices - SparsePanels implementation is unchanged (e.g. not using BlockManager, needs work) - added `ftypes` method to Series/DataFame, similar to `dtypes`, but indicates if the underlying is sparse/dense (as well as the dtype) Perf changed a bit primarily in groupby where a Series has to be reconstructed in order to be passed to the function (in some cases). the biggest regression is actually a special case and can trivially be changed to be MUCH faster (as this requires a Series for a sum operation), in any event the cython versions are quite fast. ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- frame_reindex_both_axes | 18.8640 | 36.5677 | 0.5159 | frame_reindex_both_axes_ix | 18.7081 | 35.0070 | 0.5344 | groupby_apply_dict_return | 31.2620 | 43.8453 | 0.7130 | indexing_panel_subset | 0.4027 | 0.5000 | 0.8053 | series_constructor_ndarray | 0.0173 | 0.0113 | 1.5352 | frame_iteritems | 3.4393 | 1.8907 | 1.8191 | frame_fancy_lookup | 3.4020 | 1.4997 | 2.2685 | sparse_frame_constructor | 12.0993 | 4.8280 | 2.5061 | groupby_multi_python | 204.1063 | 44.7550 | 4.5605 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Target [dfb119d] : PERF: made constructions of SparseFrame have less redundant steps Base [67ad556] : Merge pull request #3459 from jreback/GH3455 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3463
2013-04-25T18:40:30Z
2013-04-29T14:37:13Z
null
2014-07-10T19:18:54Z
BUG: GH3455 Duplicate indexes with getitem will return items in the correct order
diff --git a/RELEASE.rst b/RELEASE.rst index 20167e1918540..e57c6c565e2cf 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -51,12 +51,15 @@ pandas 0.12.0 columns (GH3437_) - ``.loc`` was not raising when passed an integer list (GH3449_) - Unordered time series selection was misbehaving when using label slicing (GH3448_) + - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3038: https://github.com/pydata/pandas/issues/3038 .. _GH3437: https://github.com/pydata/pandas/issues/3437 +.. _GH3455: https://github.com/pydata/pandas/issues/3455 +.. _GH3457: https://github.com/pydata/pandas/issues/3457 .. _GH3448: https://github.com/pydata/pandas/issues/3448 .. _GH3449: https://github.com/pydata/pandas/issues/3449 diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 70fe378ad3c07..7562d20363027 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -563,26 +563,34 @@ def _convert_to_indexer(self, obj, axis=0): check = labels.levels[0].get_indexer(objarr) else: level = None - # XXX + + # unique index if labels.is_unique: indexer = check = labels.get_indexer(objarr) + + # non-unique (dups) else: - mask = np.zeros(len(labels), dtype=bool) + indexer = [] + check = np.arange(len(labels)) lvalues = labels.values for x in objarr: # ugh to_or = lib.map_infer(lvalues, x.__eq__) if not to_or.any(): raise KeyError('%s not in index' % str(x)) - mask |= to_or - indexer = check = mask.nonzero()[0] + # add the indicies (as we want to take) + indexer.extend(check[to_or]) + + indexer = Index(indexer) + mask = check == -1 if mask.any(): raise KeyError('%s not in index' % objarr[mask]) - + return indexer + else: return labels.get_loc(obj) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 6106509d530f4..6bba9f6d32efc 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4621,7 +4621,6 @@ def test_to_csv_from_csv(self): xp.columns = map(int,xp.columns) assert_frame_equal(xp,rs) - @slow def test_to_csv_moar(self): from pandas.util.testing import makeCustomDataframe as mkdf @@ -4935,6 +4934,21 @@ def test_to_csv_dups_cols(self): with ensure_clean() as filename: self.assertRaises(Exception, df.to_csv, filename) + # GH3457 + from pandas.util.testing import makeCustomDataframe as mkdf + + N=10 + df= mkdf(N, 3) + df.columns = ['a','a','b'] + + with ensure_clean() as filename: + df.to_csv(filename) + + # read_csv will rename the dups columns + result = read_csv(filename,index_col=0) + result = result.rename(columns={ 'a.1' : 'a' }) + assert_frame_equal(result,df) + def test_to_csv_chunking(self): aa=DataFrame({'A':range(100000)}) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 002282a21162d..86cd0ef524b35 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -761,6 +761,16 @@ def test_setitem_iloc(self): expected = DataFrame(np.array([0,101,102,3,104,105,6,7,8]).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]) assert_frame_equal(df,expected) + def test_dups_fancy_indexing(self): + + # GH 3455 + from pandas.util.testing import makeCustomDataframe as mkdf + df= mkdf(10, 3) + df.columns = ['a','a','b'] + cols = ['b','a'] + result = df[['b','a']].columns + expected = Index(['b','a','a']) + self.assert_(result.equals(expected)) if __name__ == '__main__': import nose
BUG: GH3457 to_csv writing duplicate columns incorrectly closes #3455, #3457
https://api.github.com/repos/pandas-dev/pandas/pulls/3459
2013-04-25T14:17:46Z
2013-04-25T14:38:06Z
2013-04-25T14:38:06Z
2014-06-28T10:24:30Z
BUG: to_csv handles cols= reordering,dupe cols GH3454
diff --git a/RELEASE.rst b/RELEASE.rst index 6f55b7cd4490f..aac34c6cf8a5e 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -48,7 +48,8 @@ pandas 0.12.0 - Fixed an esoteric excel reading bug, xlrd>= 0.9.0 now required for excel support. Should provide python3 support (for reading) which has been lacking. (GH3164_) - - Fix to_csv issue when having a large number of rows and ``NaT`` in some + - Addressed handling of dupe columns in df.to_csv new and old (GH3454_, GH3457_) + - Fix to_csv issue when having a large number of rows and ``NaT`` in some columns (GH3437_) - ``.loc`` was not raising when passed an integer list (GH3449_) - Unordered time series selection was misbehaving when using label slicing (GH3448_) @@ -57,6 +58,8 @@ pandas 0.12.0 .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 +.. _GH3454: https://github.com/pydata/pandas/issues/3454 +.. _GH3457: https://github.com/pydata/pandas/issues/3457 .. _GH3038: https://github.com/pydata/pandas/issues/3038 .. _GH3437: https://github.com/pydata/pandas/issues/3437 .. _GH3455: https://github.com/pydata/pandas/issues/3455 diff --git a/pandas/core/format.py b/pandas/core/format.py index 22a1f99c6e2d9..7226bd14e5576 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -772,6 +772,7 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None, self.engine = engine # remove for 0.12 self.obj = obj + self.path_or_buf = path_or_buf self.sep = sep self.na_rep = na_rep @@ -789,13 +790,27 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None, self.line_terminator = line_terminator - if cols is None: - cols = obj.columns + #GH3457 + if not self.obj.columns.is_unique and engine == 'python': + msg= "columns.is_unique == False not supported with engine='python'" + raise NotImplementedError(msg) + if cols is not None: + if isinstance(cols,Index): + cols = cols.to_native_types(na_rep=na_rep,float_format=float_format) + else: + cols=list(cols) + self.obj = self.obj.loc[:,cols] + + # update columns to include possible multiplicity of dupes + # and make sure sure cols is just a list of labels + cols = self.obj.columns if isinstance(cols,Index): cols = cols.to_native_types(na_rep=na_rep,float_format=float_format) else: cols=list(cols) + + # save it self.cols = cols # preallocate data 2d list @@ -804,7 +819,7 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None, self.data =[None] * ncols if self.obj.columns.is_unique: - self.colname_map = dict((k,i) for i,k in enumerate(obj.columns)) + self.colname_map = dict((k,i) for i,k in enumerate(self.obj.columns)) else: ks = [set(x.items) for x in self.blocks] u = len(reduce(lambda a,x: a.union(x),ks,set())) @@ -1024,7 +1039,9 @@ def _save_chunk(self, start_i, end_i): # self.data is a preallocated list self.data[self.colname_map[k]] = d[j] else: - for i in range(len(self.cols)): + # self.obj should contain a proper view of the dataframes + # with the specified ordering of cols if cols was specified + for i in range(len(self.obj.columns)): self.data[i] = self.obj.icol(i).values[slicer].tolist() ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 6bba9f6d32efc..530128a100d0b 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -28,6 +28,7 @@ from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal, + makeCustomDataframe as mkdf, ensure_clean) from pandas.util import py3compat from pandas.util.compat import OrderedDict @@ -4621,9 +4622,59 @@ def test_to_csv_from_csv(self): xp.columns = map(int,xp.columns) assert_frame_equal(xp,rs) + def test_to_csv_cols_reordering(self): + # GH3454 + import pandas as pd + + def _check_df(df,cols=None): + with ensure_clean() as path: + df.to_csv(path,cols = cols,engine='python') + rs_p = pd.read_csv(path,index_col=0) + df.to_csv(path,cols = cols,chunksize=chunksize) + rs_c = pd.read_csv(path,index_col=0) + + if cols: + df = df[cols] + assert (rs_c.columns==rs_p.columns).all() + assert_frame_equal(df,rs_c,check_names=False) + + chunksize=5 + N = int(chunksize*2.5) + + df= mkdf(N, 3) + cs = df.columns + cols = [cs[2],cs[0]] + _check_df(df,cols) + + def test_to_csv_legacy_raises_on_dupe_cols(self): + df= mkdf(10, 3) + df.columns = ['a','a','b'] + with ensure_clean() as path: + self.assertRaises(NotImplementedError,df.to_csv,path,engine='python') + + def test_to_csv_new_dupe_cols(self): + import pandas as pd + def _check_df(df,cols=None): + with ensure_clean() as path: + df.to_csv(path,cols = cols,chunksize=chunksize) + rs_c = pd.read_csv(path,index_col=0) + rs_c.columns = df.columns + assert_frame_equal(df,rs_c,check_names=False) + + chunksize=5 + N = int(chunksize*2.5) + + # dupe cols + df= mkdf(N, 3) + df.columns = ['a','a','b'] + _check_df(df,None) + + # dupe cols with selection + cols = ['b','a'] + _check_df(df,cols) + @slow def test_to_csv_moar(self): - from pandas.util.testing import makeCustomDataframe as mkdf path = '__tmp_to_csv_moar__' def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,
#3454, #3457, #3455 @jreback, comments?
https://api.github.com/repos/pandas-dev/pandas/pulls/3458
2013-04-25T12:53:19Z
2013-04-25T16:09:51Z
2013-04-25T16:09:51Z
2014-06-18T19:05:44Z
BUG: GH3448 Unordered time series selection was misbehaving when using label slicing
diff --git a/RELEASE.rst b/RELEASE.rst index ae98884f0f683..20167e1918540 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -50,16 +50,17 @@ pandas 0.12.0 - Fix to_csv issue when having a large number of rows and ``NaT`` in some columns (GH3437_) - ``.loc`` was not raising when passed an integer list (GH3449_) + - Unordered time series selection was misbehaving when using label slicing (GH3448_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3038: https://github.com/pydata/pandas/issues/3038 .. _GH3437: https://github.com/pydata/pandas/issues/3437 +.. _GH3448: https://github.com/pydata/pandas/issues/3448 .. _GH3449: https://github.com/pydata/pandas/issues/3449 - pandas 0.11.0 ============= diff --git a/pandas/core/index.py b/pandas/core/index.py index 9eafcd996ed4f..5ffd211c86d27 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1178,7 +1178,13 @@ def slice_indexer(self, start=None, end=None, step=None): This function assumes that the data is sorted, so use at your own peril """ start_slice, end_slice = self.slice_locs(start, end) - return slice(start_slice, end_slice, step) + + # return a slice + if np.isscalar(start_slice) and np.isscalar(end_slice): + return slice(start_slice, end_slice, step) + + # loc indexers + return Index(start_slice) & Index(end_slice) def slice_locs(self, start=None, end=None): """ diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 6f8d9edcb5e4a..d9625a3d5e549 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1069,7 +1069,9 @@ def intersection(self, other): left_chunk = left.values[lslice] return self._view_like(left_chunk) - def _partial_date_slice(self, reso, parsed): + def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True): + + is_monotonic = self.is_monotonic if reso == 'year': t1 = Timestamp(datetime(parsed.year, 1, 1), tz=self.tz) @@ -1083,20 +1085,20 @@ def _partial_date_slice(self, reso, parsed): d = tslib.monthrange(parsed.year, qe)[1] # at end of month t1 = Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz) t2 = Timestamp(datetime(parsed.year, qe, d), tz=self.tz) - elif reso == 'day' and self._resolution < Resolution.RESO_DAY: + elif (reso == 'day' and (self._resolution < Resolution.RESO_DAY or not is_monotonic)): st = datetime(parsed.year, parsed.month, parsed.day) t1 = Timestamp(st, tz=self.tz) t2 = st + offsets.Day() t2 = Timestamp(Timestamp(t2, tz=self.tz).value - 1) - elif (reso == 'hour' and - self._resolution < Resolution.RESO_HR): + elif (reso == 'hour' and ( + self._resolution < Resolution.RESO_HR or not is_monotonic)): st = datetime(parsed.year, parsed.month, parsed.day, hour=parsed.hour) t1 = Timestamp(st, tz=self.tz) t2 = Timestamp(Timestamp(st + offsets.Hour(), tz=self.tz).value - 1) - elif (reso == 'minute' and - self._resolution < Resolution.RESO_MIN): + elif (reso == 'minute' and ( + self._resolution < Resolution.RESO_MIN or not is_monotonic)): st = datetime(parsed.year, parsed.month, parsed.day, hour=parsed.hour, minute=parsed.minute) t1 = Timestamp(st, tz=self.tz) @@ -1108,15 +1110,18 @@ def _partial_date_slice(self, reso, parsed): stamps = self.asi8 - if self.is_monotonic: + if is_monotonic: # a monotonic (sorted) series can be sliced - left = stamps.searchsorted(t1.value, side='left') - right = stamps.searchsorted(t2.value, side='right') + left = stamps.searchsorted(t1.value, side='left') if use_lhs else None + right = stamps.searchsorted(t2.value, side='right') if use_rhs else None return slice(left, right) + lhs_mask = (stamps>=t1.value) if use_lhs else True + rhs_mask = (stamps<=t2.value) if use_rhs else True + # try to find a the dates - return ((stamps>=t1.value) & (stamps<=t2.value)).nonzero()[0] + return (lhs_mask & rhs_mask).nonzero()[0] def _possibly_promote(self, other): if other.inferred_type == 'date': @@ -1182,11 +1187,11 @@ def get_loc(self, key): except (KeyError, ValueError): raise KeyError(key) - def _get_string_slice(self, key): + def _get_string_slice(self, key, use_lhs=True, use_rhs=True): freq = getattr(self, 'freqstr', getattr(self, 'inferred_freq', None)) _, parsed, reso = parse_time_string(key, freq) - loc = self._partial_date_slice(reso, parsed) + loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs) return loc def slice_indexer(self, start=None, end=None, step=None): @@ -1208,20 +1213,40 @@ def slice_locs(self, start=None, end=None): Index.slice_locs, customized to handle partial ISO-8601 string slicing """ if isinstance(start, basestring) or isinstance(end, basestring): - try: - if start: - start_loc = self._get_string_slice(start).start - else: - start_loc = 0 - if end: - end_loc = self._get_string_slice(end).stop - else: - end_loc = len(self) + if self.is_monotonic: + try: + if start: + start_loc = self._get_string_slice(start).start + else: + start_loc = 0 + + if end: + end_loc = self._get_string_slice(end).stop + else: + end_loc = len(self) + + return start_loc, end_loc + except KeyError: + pass - return start_loc, end_loc - except KeyError: - pass + else: + # can't use a slice indexer because we are not sorted! + # so create an indexer directly + try: + if start: + start_loc = self._get_string_slice(start,use_rhs=False) + else: + start_loc = np.arange(len(self)) + + if end: + end_loc = self._get_string_slice(end,use_lhs=False) + else: + end_loc = np.arange(len(self)) + + return start_loc, end_loc + except KeyError: + pass if isinstance(start, time) or isinstance(end, time): raise KeyError('Cannot use slice_locs with time slice keys') diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index f0ade216f9772..c83d4ba131a42 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -183,6 +183,23 @@ def test_indexing_unordered(self): result = ts2[t] self.assertTrue(expected == result) + # GH 3448 (ranges) + def compare(slobj): + result = ts2[slobj].copy() + result = result.sort_index() + expected = ts[slobj] + assert_series_equal(result,expected) + + compare(slice('2011-01-01','2011-01-15')) + compare(slice('2010-12-30','2011-01-15')) + compare(slice('2011-01-01','2011-01-16')) + + # partial ranges + compare(slice('2011-01-01','2011-01-6')) + compare(slice('2011-01-06','2011-01-8')) + compare(slice('2011-01-06','2011-01-12')) + + # single values result = ts2['2011'].sort_index() expected = ts['2011'] assert_series_equal(result,expected)
closes #3448
https://api.github.com/repos/pandas-dev/pandas/pulls/3452
2013-04-25T01:10:17Z
2013-04-25T12:29:36Z
2013-04-25T12:29:36Z
2014-07-16T08:06:20Z
BUG: GH3449 .loc was not raising when passed an integer list
diff --git a/RELEASE.rst b/RELEASE.rst index 2c47c043dd84d..ae98884f0f683 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -49,12 +49,14 @@ pandas 0.12.0 lacking. (GH3164_) - Fix to_csv issue when having a large number of rows and ``NaT`` in some columns (GH3437_) + - ``.loc`` was not raising when passed an integer list (GH3449_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3038: https://github.com/pydata/pandas/issues/3038 .. _GH3437: https://github.com/pydata/pandas/issues/3437 +.. _GH3449: https://github.com/pydata/pandas/issues/3449 diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 882f4f5559a92..70fe378ad3c07 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -253,13 +253,13 @@ def _getitem_tuple(self, tup): except IndexingError: pass + # no multi-index, so validate all of the indexers + self._has_valid_tuple(tup) + # ugly hack for GH #836 if self._multi_take_opportunity(tup): return self._multi_take(tup) - # no multi-index, so validate all of the indexers - self._has_valid_tuple(tup) - # no shortcut needed retval = self.obj for i, key in enumerate(tup): diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index bc717a0fbf6d1..002282a21162d 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -483,6 +483,16 @@ def test_loc_getitem_int_slice(self): expected = df[10] assert_frame_equal(result,expected) + def test_loc_to_fail(self): + + # GH3449 + df = DataFrame(np.random.random((3, 3)), + index=['a', 'b', 'c'], + columns=['e', 'f', 'g']) + + # raise a KeyError? + self.assertRaises(KeyError, df.loc.__getitem__, tuple([[1, 2], [1, 2]])) + def test_loc_getitem_label_slice(self): # label slices (with ints)
closes #3449
https://api.github.com/repos/pandas-dev/pandas/pulls/3451
2013-04-25T00:05:20Z
2013-04-25T01:55:28Z
2013-04-25T01:55:28Z
2014-06-22T03:25:32Z
BUG: index.map fast path raises recursion warnings on index.time GH3419
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 0d29da83dbd8a..6f8d9edcb5e4a 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1299,7 +1299,10 @@ def time(self): """ Returns array of datetime.time. The time of the day """ - return self.map(lambda t: t.time()) + # can't call self.map() which tries to treat func as ufunc + # and causes recursion warnings on python 2.6 + return _algos.arrmap_object(self.asobject, lambda x:x.time()) + def normalize(self): """
fyi @hayd #3419
https://api.github.com/repos/pandas-dev/pandas/pulls/3450
2013-04-24T22:39:37Z
2013-04-24T22:39:44Z
2013-04-24T22:39:44Z
2014-07-16T08:06:17Z
BUG: Removing a nonexistent store raises a KeyError (GH3346)
diff --git a/RELEASE.rst b/RELEASE.rst index e8287912efb3e..f0e54c0e74166 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -40,7 +40,8 @@ pandas 0.12.0 **API Changes** - - + - When removing an object from a store, **store.remove(key)**, raises + **KeyError** if **key** is not a valid store object. **Bug Fixes** diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index be11732d7b3a2..b9db30245eb1b 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -554,6 +554,10 @@ def remove(self, key, where=None, start=None, stop=None): ------- number of rows removed (or None if not a Table) + Exceptions + ---------- + raises KeyError if key is not a valid store + """ try: s = self.get_storer(key) @@ -569,7 +573,7 @@ def remove(self, key, where=None, start=None, stop=None): return None if s is None: - return None + raise KeyError('No object named %s in the file' % key) # remove the node if where is None: diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 75fe0eefe771e..1999789f206be 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -64,6 +64,14 @@ def ensure_clean(path, mode='a', complevel=None, complib=None, tables.parameters.MAX_BLOSC_THREADS = 1 tables.parameters.MAX_THREADS = 1 +def _maybe_remove(store, key): + """For tests using tables, try removing the table to be sure there is + no content from previous tests using the same table name.""" + try: + store.remove(key) + except: + pass + class TestHDFStore(unittest.TestCase): @@ -196,7 +204,7 @@ def test_versioning(self): store['a'] = tm.makeTimeSeries() store['b'] = tm.makeDataFrame() df = tm.makeTimeDataFrame() - store.remove('df1') + _maybe_remove(store, 'df1') store.append('df1', df[:10]) store.append('df1', df[10:]) self.assert_(store.root.a._v_attrs.pandas_version == '0.10.1') @@ -204,7 +212,7 @@ def test_versioning(self): self.assert_(store.root.df1._v_attrs.pandas_version == '0.10.1') # write a file and wipe its versioning - store.remove('df2') + _maybe_remove(store, 'df2') store.append('df2', df) # this is an error because its table_type is appendable, but no version @@ -287,7 +295,7 @@ def test_put(self): # node does not currently exist, test _is_table_type returns False in # this case - # store.remove('f') + # _maybe_remove(store, 'f') # self.assertRaises(ValueError, store.put, 'f', df[10:], append=True) # can't put to a table (use append instead) @@ -369,7 +377,7 @@ def test_put_mixed_type(self): df = df.consolidate().convert_objects() with ensure_clean(self.path) as store: - store.remove('df') + _maybe_remove(store, 'df') warnings.filterwarnings('ignore', category=PerformanceWarning) store.put('df',df) expected = store.get('df') @@ -380,24 +388,24 @@ def test_append(self): with ensure_clean(self.path) as store: df = tm.makeTimeDataFrame() - store.remove('df1') + _maybe_remove(store, 'df1') store.append('df1', df[:10]) store.append('df1', df[10:]) tm.assert_frame_equal(store['df1'], df) - store.remove('df2') + _maybe_remove(store, 'df2') store.put('df2', df[:10], table=True) store.append('df2', df[10:]) tm.assert_frame_equal(store['df2'], df) - store.remove('df3') + _maybe_remove(store, 'df3') store.append('/df3', df[:10]) store.append('/df3', df[10:]) tm.assert_frame_equal(store['df3'], df) # this is allowed by almost always don't want to do it warnings.filterwarnings('ignore', category=tables.NaturalNameWarning) - store.remove('/df3 foo') + _maybe_remove(store, '/df3 foo') store.append('/df3 foo', df[:10]) store.append('/df3 foo', df[10:]) tm.assert_frame_equal(store['df3 foo'], df) @@ -405,20 +413,20 @@ def test_append(self): # panel wp = tm.makePanel() - store.remove('wp1') + _maybe_remove(store, 'wp1') store.append('wp1', wp.ix[:, :10, :]) store.append('wp1', wp.ix[:, 10:, :]) tm.assert_panel_equal(store['wp1'], wp) # ndim p4d = tm.makePanel4D() - store.remove('p4d') + _maybe_remove(store, 'p4d') store.append('p4d', p4d.ix[:, :, :10, :]) store.append('p4d', p4d.ix[:, :, 10:, :]) tm.assert_panel4d_equal(store['p4d'], p4d) # test using axis labels - store.remove('p4d') + _maybe_remove(store, 'p4d') store.append('p4d', p4d.ix[:, :, :10, :], axes=[ 'items', 'major_axis', 'minor_axis']) store.append('p4d', p4d.ix[:, :, 10:, :], axes=[ @@ -429,13 +437,13 @@ def test_append(self): p4d2 = p4d.copy() p4d2['l4'] = p4d['l1'] p4d2['l5'] = p4d['l1'] - store.remove('p4d2') + _maybe_remove(store, 'p4d2') store.append( 'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis']) tm.assert_panel4d_equal(store['p4d2'], p4d2) # test using differt order of items on the non-index axes - store.remove('wp1') + _maybe_remove(store, 'wp1') wp_append1 = wp.ix[:, :10, :] store.append('wp1', wp_append1) wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1]) @@ -446,7 +454,7 @@ def test_append(self): df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]]) df['mixed_column'] = 'testing' df.ix[2, 'mixed_column'] = np.nan - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df) tm.assert_frame_equal(store['df'], df) @@ -459,7 +467,7 @@ def test_append_some_nans(self): 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) }, index=np.arange(20)) # some nans - store.remove('df1') + _maybe_remove(store, 'df1') df.ix[0:15,['A1','B','D','E']] = np.nan store.append('df1', df[:10]) store.append('df1', df[10:]) @@ -468,7 +476,7 @@ def test_append_some_nans(self): # first column df1 = df.copy() df1.ix[:,'A1'] = np.nan - store.remove('df1') + _maybe_remove(store, 'df1') store.append('df1', df1[:10]) store.append('df1', df1[10:]) tm.assert_frame_equal(store['df1'], df1) @@ -476,7 +484,7 @@ def test_append_some_nans(self): # 2nd column df2 = df.copy() df2.ix[:,'A2'] = np.nan - store.remove('df2') + _maybe_remove(store, 'df2') store.append('df2', df2[:10]) store.append('df2', df2[10:]) tm.assert_frame_equal(store['df2'], df2) @@ -484,7 +492,7 @@ def test_append_some_nans(self): # datetimes df3 = df.copy() df3.ix[:,'E'] = np.nan - store.remove('df3') + _maybe_remove(store, 'df3') store.append('df3', df3[:10]) store.append('df3', df3[10:]) tm.assert_frame_equal(store['df3'], df3) @@ -496,7 +504,7 @@ def test_append_some_nans(self): 'A2' : np.random.randn(20)}, index=np.arange(20)) - store.remove('df4') + _maybe_remove(store, 'df4') df.ix[0:15,:] = np.nan store.append('df4', df[:10]) store.append('df4', df[10:]) @@ -509,7 +517,7 @@ def test_append_some_nans(self): 'B' : 'foo', 'C' : 'bar'}, index=np.arange(20)) - store.remove('df5') + _maybe_remove(store, 'df5') df.ix[0:15,:] = np.nan store.append('df5', df[:10]) store.append('df5', df[10:]) @@ -522,7 +530,7 @@ def test_append_some_nans(self): 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) }, index=np.arange(20)) - store.remove('df6') + _maybe_remove(store, 'df6') df.ix[0:15,:] = np.nan store.append('df6', df[:10]) store.append('df6', df[10:]) @@ -534,7 +542,7 @@ def test_append_frame_column_oriented(self): with ensure_clean(self.path) as store: # column oriented df = tm.makeTimeDataFrame() - store.remove('df1') + _maybe_remove(store, 'df1') store.append('df1', df.ix[:, :2], axes=['columns']) store.append('df1', df.ix[:, 2:]) tm.assert_frame_equal(store['df1'], df) @@ -568,14 +576,14 @@ def check_indexers(key, indexers): # append then change (will take existing schema) indexers = ['items', 'major_axis', 'minor_axis'] - store.remove('p4d') + _maybe_remove(store, 'p4d') store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers) store.append('p4d', p4d.ix[:, :, 10:, :]) tm.assert_panel4d_equal(store.select('p4d'), p4d) check_indexers('p4d', indexers) # same as above, but try to append with differnt axes - store.remove('p4d') + _maybe_remove(store, 'p4d') store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers) store.append('p4d', p4d.ix[:, :, 10:, :], axes=[ 'labels', 'items', 'major_axis']) @@ -583,13 +591,13 @@ def check_indexers(key, indexers): check_indexers('p4d', indexers) # pass incorrect number of axes - store.remove('p4d') + _maybe_remove(store, 'p4d') self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[ :, :, :10, :], axes=['major_axis', 'minor_axis']) # different than default indexables #1 indexers = ['labels', 'major_axis', 'minor_axis'] - store.remove('p4d') + _maybe_remove(store, 'p4d') store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers) store.append('p4d', p4d.ix[:, :, 10:, :]) tm.assert_panel4d_equal(store['p4d'], p4d) @@ -597,7 +605,7 @@ def check_indexers(key, indexers): # different than default indexables #2 indexers = ['major_axis', 'labels', 'minor_axis'] - store.remove('p4d') + _maybe_remove(store, 'p4d') store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers) store.append('p4d', p4d.ix[:, :, 10:, :]) tm.assert_panel4d_equal(store['p4d'], p4d) @@ -680,7 +688,7 @@ def check_col(key,name,size): self.assertRaises(ValueError, store.append, 'df_new', df_new) # with nans - store.remove('df') + _maybe_remove(store, 'df') df = tm.makeTimeDataFrame() df['string'] = 'foo' df.ix[1:4, 'string'] = np.nan @@ -700,33 +708,33 @@ def check_col(key,name,size): df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10)) # a min_itemsize that creates a data_column - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df, min_itemsize={'A' : 200 }) check_col('df', 'A', 200) self.assert_(store.get_storer('df').data_columns == ['A']) # a min_itemsize that creates a data_column2 - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 }) check_col('df', 'A', 200) self.assert_(store.get_storer('df').data_columns == ['B','A']) # a min_itemsize that creates a data_column2 - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df, data_columns = ['B'], min_itemsize={'values' : 200 }) check_col('df', 'B', 200) check_col('df', 'values_block_0', 200) self.assert_(store.get_storer('df').data_columns == ['B']) # infer the .typ on subsequent appends - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df[:5], min_itemsize=200) store.append('df', df[5:], min_itemsize=200) tm.assert_frame_equal(store['df'], df) # invalid min_itemsize keys df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A']) - store.remove('df') + _maybe_remove(store, 'df') self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20}) def test_append_with_data_columns(self): @@ -734,7 +742,7 @@ def test_append_with_data_columns(self): with ensure_clean(self.path) as store: df = tm.makeTimeDataFrame() df.loc[:,'B'].iloc[0] = 1. - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df[:2], data_columns=['B']) store.append('df', df[2:]) tm.assert_frame_equal(store['df'], df) @@ -760,7 +768,7 @@ def test_append_with_data_columns(self): df_new['string'] = 'foo' df_new['string'][1:4] = np.nan df_new['string'][5:6] = 'bar' - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df_new, data_columns=['string']) result = store.select('df', [Term('string', '=', 'foo')]) expected = df_new[df_new.string == 'foo'] @@ -771,15 +779,15 @@ def check_col(key,name,size): self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size) with ensure_clean(self.path) as store: - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df_new, data_columns=['string'], min_itemsize={'string': 30}) check_col('df', 'string', 30) - store.remove('df') + _maybe_remove(store, 'df') store.append( 'df', df_new, data_columns=['string'], min_itemsize=30) check_col('df', 'string', 30) - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df_new, data_columns=['string'], min_itemsize={'values': 30}) check_col('df', 'string', 30) @@ -788,7 +796,7 @@ def check_col(key,name,size): df_new['string2'] = 'foobarbah' df_new['string_block1'] = 'foobarbah1' df_new['string_block2'] = 'foobarbah2' - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df_new, data_columns=['string', 'string2'], min_itemsize={'string': 30, 'string2': 40, 'values': 50}) check_col('df', 'string', 30) check_col('df', 'string2', 40) @@ -805,7 +813,7 @@ def check_col(key,name,size): df_new['string2'] = 'foo' df_new['string2'][2:5] = np.nan df_new['string2'][7:8] = 'bar' - store.remove('df') + _maybe_remove(store, 'df') store.append( 'df', df_new, data_columns=['A', 'B', 'string', 'string2']) result = store.select('df', [Term('string', '=', 'foo'), Term( @@ -832,7 +840,7 @@ def check_col(key,name,size): df_dc = df_dc.convert_objects() df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan - store.remove('df_dc') + _maybe_remove(store, 'df_dc') store.append('df_dc', df_dc, data_columns=['B', 'C', 'string', 'string2', 'datetime']) result = store.select('df_dc', [Term('B>0')]) @@ -901,7 +909,7 @@ def col(t,column): assert(col('f2', 'string2').is_indexed is False) # try to index a non-table - store.remove('f2') + _maybe_remove(store, 'f2') store.put('f2', df) self.assertRaises(TypeError, store.create_table_index, 'f2') @@ -1254,23 +1262,26 @@ def test_remove(self): df = tm.makeDataFrame() store['a'] = ts store['b'] = df - store.remove('a') + _maybe_remove(store, 'a') self.assertEquals(len(store), 1) tm.assert_frame_equal(df, store['b']) - store.remove('b') + _maybe_remove(store, 'b') self.assertEquals(len(store), 0) + # nonexistence + self.assertRaises(KeyError, store.remove, 'a_nonexistent_store') + # pathing store['a'] = ts store['b/foo'] = df - store.remove('foo') - store.remove('b/foo') + _maybe_remove(store, 'foo') + _maybe_remove(store, 'b/foo') self.assertEquals(len(store), 1) store['a'] = ts store['b/foo'] = df - store.remove('b') + _maybe_remove(store, 'b') self.assertEquals(len(store), 1) # __delitem__ @@ -1286,7 +1297,7 @@ def test_remove_where(self): # non-existance crit1 = Term('index', '>', 'foo') - store.remove('a', where=[crit1]) + self.assertRaises(KeyError, store.remove, 'a', [crit1]) # try to remove non-table (with crit) # non-table ok (where = None) @@ -1298,7 +1309,7 @@ def test_remove_where(self): tm.assert_panel_equal(rs, expected) # empty where - store.remove('wp') + _maybe_remove(store, 'wp') store.put('wp', wp, table=True) # deleted number (entire table) @@ -1306,7 +1317,7 @@ def test_remove_where(self): assert(n == 120) # non - empty where - store.remove('wp') + _maybe_remove(store, 'wp') store.put('wp', wp, table=True) self.assertRaises(ValueError, store.remove, 'wp', ['foo']) @@ -1807,12 +1818,12 @@ def test_select(self): with ensure_clean(self.path) as store: # put/select ok - store.remove('wp') + _maybe_remove(store, 'wp') store.put('wp', wp, table=True) store.select('wp') # non-table ok (where = None) - store.remove('wp') + _maybe_remove(store, 'wp') store.put('wp2', wp, table=False) store.select('wp2') @@ -1821,7 +1832,7 @@ def test_select(self): np.random.randn(100, 100, 100), items=['Item%03d' % i for i in xrange(100)], major_axis=date_range('1/1/2000', periods=100), minor_axis=['E%03d' % i for i in xrange(100)]) - store.remove('wp') + _maybe_remove(store, 'wp') store.append('wp', wp) items = ['Item%03d' % i for i in xrange(80)] result = store.select('wp', Term('items', items)) @@ -1834,7 +1845,7 @@ def test_select(self): # select with columns= df = tm.makeTimeDataFrame() - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df) result = store.select('df', columns=['A', 'B']) expected = df.reindex(columns=['A', 'B']) @@ -1846,21 +1857,21 @@ def test_select(self): tm.assert_frame_equal(expected, result) # with a data column - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df, data_columns=['A']) result = store.select('df', ['A > 0'], columns=['A', 'B']) expected = df[df.A > 0].reindex(columns=['A', 'B']) tm.assert_frame_equal(expected, result) # all a data columns - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df, data_columns=True) result = store.select('df', ['A > 0'], columns=['A', 'B']) expected = df[df.A > 0].reindex(columns=['A', 'B']) tm.assert_frame_equal(expected, result) # with a data column, but different columns - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df, data_columns=['A']) result = store.select('df', ['A > 0'], columns=['C', 'D']) expected = df[df.A > 0].reindex(columns=['C', 'D']) @@ -1872,7 +1883,7 @@ def test_select_dtypes(self): # with a Timestamp data column (GH #2637) df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300))) - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df, data_columns=['ts', 'A']) result = store.select('df', [Term('ts', '>=', Timestamp('2012-02-01'))]) expected = df[df.ts >= Timestamp('2012-02-01')] @@ -1883,7 +1894,7 @@ def test_select_dtypes(self): df['object'] = 'foo' df.ix[4:5,'object'] = 'bar' df['bool'] = df['A'] > 0 - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df, data_columns = True) expected = df[df.bool == True].reindex(columns=['A','bool']) @@ -1898,7 +1909,7 @@ def test_select_dtypes(self): # integer index df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20))) - store.remove('df_int') + _maybe_remove(store, 'df_int') store.append('df_int', df) result = store.select( 'df_int', [Term("index<10"), Term("columns", "=", ["A"])]) @@ -1908,7 +1919,7 @@ def test_select_dtypes(self): # float index df = DataFrame(dict(A=np.random.rand( 20), B=np.random.rand(20), index=np.arange(20, dtype='f8'))) - store.remove('df_float') + _maybe_remove(store, 'df_float') store.append('df_float', df) result = store.select( 'df_float', [Term("index<10.0"), Term("columns", "=", ["A"])]) @@ -1923,7 +1934,7 @@ def test_select_with_many_inputs(self): A=np.random.randn(300), B=range(300), users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)])) - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df, data_columns=['ts', 'A', 'B', 'users']) # regular select @@ -1961,7 +1972,7 @@ def test_select_iterator(self): with ensure_clean(self.path) as store: df = tm.makeTimeDataFrame(500) - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df) expected = store.select('df') @@ -2120,7 +2131,7 @@ def test_read_column(self): df = tm.makeTimeDataFrame() with ensure_clean(self.path) as store: - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df) # error @@ -2159,7 +2170,7 @@ def test_coordinates(self): with ensure_clean(self.path) as store: - store.remove('df') + _maybe_remove(store, 'df') store.append('df', df) # all @@ -2167,7 +2178,7 @@ def test_coordinates(self): assert((c.values == np.arange(len(df.index))).all() == True) # get coordinates back & test vs frame - store.remove('df') + _maybe_remove(store, 'df') df = DataFrame(dict(A=range(5), B=range(5))) store.append('df', df) @@ -2184,8 +2195,8 @@ def test_coordinates(self): tm.assert_frame_equal(result, expected) # multiple tables - store.remove('df1') - store.remove('df2') + _maybe_remove(store, 'df1') + _maybe_remove(store, 'df2') df1 = tm.makeTimeDataFrame() df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) store.append('df1', df1, data_columns=['A', 'B'])
https://api.github.com/repos/pandas-dev/pandas/pulls/3447
2013-04-24T18:17:18Z
2013-04-25T14:25:09Z
2013-04-25T14:25:09Z
2014-07-16T08:06:11Z
DOC: Adding Panel docs to API, and shortening line length in panel.py
diff --git a/doc/source/api.rst b/doc/source/api.rst index 6eef018d13418..eb65e6087c66c 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -535,10 +535,159 @@ Serialization / IO / Conversion .. _api.panel: Panel ------ +------ + +Attributes and underlying data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +**Axes** + + * **items**: axis 0; each item corresponds to a DataFrame contained inside + * **major_axis**: axis 1; the index (rows) of each of the DataFrames + * **minor_axis**: axis 2; the columns of each of the DataFrames + +.. autosummary:: + :toctree: generated/ + + Panel.values + Panel.axes + Panel.ndim + Panel.shape + +Conversion / Constructors +~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.__init__ + Panel.astype + Panel.copy + +Getting and setting +~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.get_value + Panel.set_value + +Indexing, iteration, slicing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ -.. _api.panel.stats: + Panel.ix + Panel.__iter__ + Panel.iteritems + Panel.pop + Panel.xs + Panel.major_xs + Panel.minor_xs + +Binary operator functions +~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.add + Panel.div + Panel.mul + Panel.sub + +Function application, GroupBy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.apply + Panel.groupby Computations / Descriptive Stats ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.abs + Panel.count + Panel.cummax + Panel.cummin + Panel.cumprod + Panel.cumsum + Panel.max + Panel.mean + Panel.median + Panel.min + Panel.pct_change + Panel.prod + Panel.skew + Panel.sum + Panel.std + Panel.var + +Reindexing / Selection / Label manipulation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.add_prefix + Panel.add_suffix + Panel.drop + Panel.filter + Panel.first + Panel.last + Panel.reindex + Panel.reindex_axis + Panel.reindex_like + Panel.select + Panel.take + Panel.truncate + +Missing data handling +~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.dropna + Panel.fillna + +Reshaping, sorting, transposing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.sort_index + Panel.swaplevel + Panel.transpose + Panel.swapaxes + Panel.conform + +Combining / joining / merging +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.join + Panel.update + +Time series-related +~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.asfreq + Panel.shift + Panel.resample + Panel.tz_convert + Panel.tz_localize + +Serialization / IO / Conversion +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.from_dict + Panel.load + Panel.save + Panel.to_excel + Panel.to_sparse + Panel.to_frame diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 38abfcf925363..57d63acf77ab9 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -7,7 +7,8 @@ import sys import numpy as np from pandas.core.common import (PandasError, _mut_exclusive, - _try_sort, _default_index, _infer_dtype_from_scalar, + _try_sort, _default_index, + _infer_dtype_from_scalar, notnull) from pandas.core.categorical import Factor from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -154,7 +155,7 @@ class Panel(NDFrame): ---------- data : ndarray (items x major x minor), or dict of DataFrames items : Index or array-like - axis=1 + axis=0 major_axis : Index or array-like axis=1 minor_axis : Index or array-like @@ -197,20 +198,20 @@ def _constructor(self): _constructor_sliced = DataFrame def _construct_axes_dict(self, axes=None, **kwargs): - """ return an axes dictionary for myself """ + """ Return an axes dictionary for myself """ d = dict([(a, getattr(self, a)) for a in (axes or self._AXIS_ORDERS)]) d.update(kwargs) return d @staticmethod def _construct_axes_dict_from(self, axes, **kwargs): - """ return an axes dictionary for the passed axes """ + """ Return an axes dictionary for the passed axes """ d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)]) d.update(kwargs) return d def _construct_axes_dict_for_slice(self, axes=None, **kwargs): - """ return an axes dictionary for myself """ + """ Return an axes dictionary for myself """ d = dict([(self._AXIS_SLICEMAP[a], getattr(self, a)) for a in (axes or self._AXIS_ORDERS)]) d.update(kwargs) @@ -241,7 +242,10 @@ def __init__(self, data=None, items=None, major_axis=None, minor_axis=None, copy=copy, dtype=dtype) def _init_data(self, data, copy, dtype, **kwargs): - """ generate ND initialization; axes are passed as required objects to __init__ """ + """ + Generate ND initialization; axes are passed + as required objects to __init__ + """ if data is None: data = {} @@ -281,7 +285,8 @@ def _init_dict(self, data, axes, dtype=None): # prefilter if haxis passed if haxis is not None: haxis = _ensure_index(haxis) - data = OrderedDict((k, v) for k, v in data.iteritems() if k in haxis) + data = OrderedDict((k, v) for k, v + in data.iteritems() if k in haxis) else: ks = data.keys() if not isinstance(data,OrderedDict): @@ -356,7 +361,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): new_data[item][col] = s data = new_data elif orient != 'items': # pragma: no cover - raise ValueError('only recognize items or minor for orientation') + raise ValueError('Orientation must be one of {items, minor}.') d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype) ks = d['data'].keys() @@ -421,7 +426,8 @@ def __array_wrap__(self, result): # Comparison methods def _indexed_same(self, other): - return all([getattr(self, a).equals(getattr(other, a)) for a in self._AXIS_ORDERS]) + return all([getattr(self, a).equals(getattr(other, a)) + for a in self._AXIS_ORDERS]) def _compare_constructor(self, other, func): if not self._indexed_same(other): @@ -490,7 +496,8 @@ def __unicode__(self): """ Return a string representation for a particular Panel - Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. + Invoked by unicode(df) in py2 only. + Yields a Unicode String in both py2/py3. """ class_name = str(self.__class__) @@ -502,7 +509,9 @@ def __unicode__(self): def axis_pretty(a): v = getattr(self, a) if len(v) > 0: - return u'%s axis: %s to %s' % (a.capitalize(), com.pprint_thing(v[0]), com.pprint_thing(v[-1])) + return u'%s axis: %s to %s' % (a.capitalize(), + com.pprint_thing(v[0]), + com.pprint_thing(v[-1])) else: return u'%s axis: None' % a.capitalize() @@ -530,7 +539,11 @@ def iteritems(self): iterkv = iteritems def _get_plane_axes(self, axis): - """ get my plane axes: these are already (as compared with higher level planes), as we are returning a DataFrame axes """ + """ + Get my plane axes: these are already + (as compared with higher level planes), + as we are returning a DataFrame axes + """ axis = self._get_axis_name(axis) if axis == 'major_axis': @@ -677,7 +690,9 @@ def __getattr__(self, name): (type(self).__name__, name)) def _slice(self, slobj, axis=0, raise_on_error=False): - new_data = self._data.get_slice(slobj, axis=axis, raise_on_error=raise_on_error) + new_data = self._data.get_slice(slobj, + axis=axis, + raise_on_error=raise_on_error) return self._constructor(new_data) def __setitem__(self, key, value): @@ -1268,7 +1283,8 @@ def _reduce(self, op, axis=0, skipna=True): if result.ndim == 2 and axis_name != self._info_axis: result = result.T - return self._constructor_sliced(result, **self._extract_axes_for_slice(self, axes)) + return self._constructor_sliced(result, + **self._extract_axes_for_slice(self, axes)) def _wrap_result(self, result, axis): axis = self._get_axis_name(axis) @@ -1280,7 +1296,9 @@ def _wrap_result(self, result, axis): if self.ndim == result.ndim: return self._constructor(result, **self._construct_axes_dict()) elif self.ndim == result.ndim + 1: - return self._constructor_sliced(result, **self._extract_axes_for_slice(self, axes)) + return self._constructor_sliced(result, + **self._extract_axes_for_slice(self, axes)) + raise PandasError("invalid _wrap_result [self->%s] [result->%s]" % (self.ndim, result.ndim)) @@ -1467,12 +1485,14 @@ def _get_join_index(self, other, how): @staticmethod def _extract_axes(self, data, axes, **kwargs): """ return a list of the axis indicies """ - return [self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes)] + return [self._extract_axis(self, data, axis=i, **kwargs) for i, a + in enumerate(axes)] @staticmethod def _extract_axes_for_slice(self, axes): """ return the slice dictionary for these axes """ - return dict([(self._AXIS_SLICEMAP[i], a) for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)]) + return dict([(self._AXIS_SLICEMAP[i], a) for i, a + in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)]) @staticmethod def _prep_ndarray(self, values, copy=True): @@ -1491,8 +1511,8 @@ def _prep_ndarray(self, values, copy=True): @staticmethod def _homogenize_dict(self, frames, intersect=True, dtype=None): """ - Conform set of _constructor_sliced-like objects to either an intersection - of indices / columns or a union. + Conform set of _constructor_sliced-like objects to either + an intersection of indices / columns or a union. Parameters ---------- @@ -1614,7 +1634,8 @@ def f(self, other, axis=0): Parameters ---------- -axis : {""" + ', '.join(cls._AXIS_ORDERS) + "} or {" + ', '.join([str(i) for i in range(cls._AXIS_LEN)]) + """} +axis : {""" + ', '.join(cls._AXIS_ORDERS) + "} or {" \ ++ ', '.join([str(i) for i in range(cls._AXIS_LEN)]) + """} skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA @@ -1706,7 +1727,8 @@ def install_ipython_completers(): # pragma: no cover @complete_object.when_type(Panel) def complete_dataframe(obj, prev_completions): return prev_completions + [c for c in obj.keys() - if isinstance(c, basestring) and py3compat.isidentifier(c)] + if isinstance(c, basestring) + and py3compat.isidentifier(c)] # Importing IPython brings in about 200 modules, so we want to avoid it unless # we're in IPython (when those modules are loaded anyway). @@ -1714,4 +1736,4 @@ def complete_dataframe(obj, prev_completions): try: install_ipython_completers() except Exception: - pass + pass \ No newline at end of file
https://api.github.com/repos/pandas-dev/pandas/pulls/3444
2013-04-24T15:31:12Z
2013-04-24T15:50:16Z
null
2014-07-15T15:39:59Z
BUG: GH3437 to_csv issue with large number of rows and some NaT in multi datetime64[ns] cols
diff --git a/RELEASE.rst b/RELEASE.rst index e8287912efb3e..2c47c043dd84d 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -44,14 +44,17 @@ pandas 0.12.0 **Bug Fixes** - - Fixed an esoteric excel reading bug, xlrd>= 0.9.0 now required for excel - support. Should provide python3 support (for reading) which has been - lacking. (GH3164_) + - Fixed an esoteric excel reading bug, xlrd>= 0.9.0 now required for excel + support. Should provide python3 support (for reading) which has been + lacking. (GH3164_) + - Fix to_csv issue when having a large number of rows and ``NaT`` in some + columns (GH3437_) .. _GH3164: https://github.com/pydata/pandas/issues/3164 .. _GH3251: https://github.com/pydata/pandas/issues/3251 .. _GH3379: https://github.com/pydata/pandas/issues/3379 .. _GH3038: https://github.com/pydata/pandas/issues/3038 +.. _GH3437: https://github.com/pydata/pandas/issues/3437 diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 94029e3212057..03cfd18f5afe5 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -787,7 +787,7 @@ def to_native_types(self, slicer=None, na_rep=None, **kwargs): values = values[:,slicer] mask = isnull(values) - rvalues = np.empty(self.shape,dtype=object) + rvalues = np.empty(values.shape,dtype=object) if na_rep is None: na_rep = 'NaT' rvalues[mask] = na_rep
fixes #3437
https://api.github.com/repos/pandas-dev/pandas/pulls/3438
2013-04-23T21:05:03Z
2013-04-24T12:49:50Z
null
2014-07-02T07:30:22Z
DOC: Adding parameters address issue sub-points 8 and 10 - 24 (issue #2916)
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index adc824544b8c7..8595e2a91906d 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -745,11 +745,13 @@ def make_axis_dummies(frame, axis='minor', transform=None): Parameters ---------- + frame : DataFrame axis : {'major', 'minor'}, default 'minor' transform : function, default None Function to apply to axis labels first. For example, to - get "day of week" dummies in a time series regression you might - call: + get "day of week" dummies in a time series regression + you might call:: + make_axis_dummies(panel, axis='major', transform=lambda d: d.weekday()) Returns diff --git a/pandas/stats/math.py b/pandas/stats/math.py index 1b926fa5ee7c0..579d49edb8511 100644 --- a/pandas/stats/math.py +++ b/pandas/stats/math.py @@ -50,13 +50,14 @@ def newey_west(m, max_lags, nobs, df, nw_overlap=False): Parameters ---------- - m: (N x K) - max_lags: int - nobs: int + m : (N x K) + max_lags : int + nobs : int Number of observations in model - df: int + df : int Degrees of freedom in explanatory variables - nw_overlap: boolean + nw_overlap : boolean, default False + Assume data is overlapping Returns ------- diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index e53916f113e1b..b104c70da9494 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -137,7 +137,8 @@ def rolling_count(arg, window, freq=None, center=False, time_rule=None): Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window - + time_rule : Legacy alias for freq + Returns ------- rolling_count : type of caller @@ -268,7 +269,8 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window - + time_rule : Legacy alias for freq + Returns ------- y : type of input @@ -540,7 +542,8 @@ def rolling_quantile(arg, window, quantile, min_periods=None, freq=None, Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window - + time_rule : Legacy alias for freq + Returns ------- y : type of input argument @@ -569,7 +572,8 @@ def rolling_apply(arg, window, func, min_periods=None, freq=None, Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window - + time_rule : Legacy alias for freq + Returns ------- y : type of input argument @@ -604,7 +608,9 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, Whether the label should correspond with center of window mean : boolean, default True If True computes weighted mean, else weighted sum - + time_rule : Legacy alias for freq + axis : {0, 1}, default 0 + Returns ------- y : type of input argument @@ -729,7 +735,8 @@ def expanding_count(arg, freq=None, center=False, time_rule=None): Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window - + time_rule : Legacy alias for freq + Returns ------- expanding_count : type of caller @@ -752,7 +759,8 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None, Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window - + time_rule : Legacy alias for freq + Returns ------- y : type of input argument @@ -816,7 +824,8 @@ def expanding_apply(arg, func, min_periods=1, freq=None, center=False, Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window - + time_rule : Legacy alias for freq + Returns ------- y : type of input argument diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py index 9ecf5c6ab715f..4d0ae7ce0e897 100644 --- a/pandas/stats/ols.py +++ b/pandas/stats/ols.py @@ -28,12 +28,19 @@ class OLS(object): Parameters ---------- - y: Series - x: Series, DataFrame, dict of Series - intercept: bool + y : Series + x : Series, DataFrame, dict of Series + intercept : bool True if you want an intercept. - nw_lags: None or int + weights : array-like, optional + 1d array of weights. If you supply 1/W then the variables are pre- + multiplied by 1/sqrt(W). If no weights are supplied the default value + is 1 and WLS reults are the same as OLS. + nw_lags : None or int Number of Newey-West lags. + nw_overlap : boolean, default False + Assume data is overlapping when computing Newey-West estimator + """ _panel_model = False @@ -593,16 +600,24 @@ class MovingOLS(OLS): Parameters ---------- - y: Series - x: Series, DataFrame, or dict of Series - intercept: bool - True if you want an intercept. - nw_lags: None or int - Number of Newey-West lags. - window_type: {'full sample', 'rolling', 'expanding'} + y : Series + x : Series, DataFrame, or dict of Series + weights : array-like, optional + 1d array of weights. If None, equivalent to an unweighted OLS. + window_type : {'full sample', 'rolling', 'expanding'} Default expanding - window: int + window : int size of window (for rolling/expanding OLS) + min_periods : int + Threshold of non-null data points to require. + If None, defaults to size of window. + intercept : bool + True if you want an intercept. + nw_lags : None or int + Number of Newey-West lags. + nw_overlap : boolean, default False + Assume data is overlapping when computing Newey-West estimator + """ def __init__(self, y, x, weights=None, window_type='expanding', window=None, min_periods=None, intercept=True, @@ -1246,10 +1261,12 @@ def _filter_data(lhs, rhs, weights=None): Parameters ---------- - lhs: Series + lhs : Series Dependent variable in the regression. - rhs: dict, whose values are Series, DataFrame, or dict + rhs : dict, whose values are Series, DataFrame, or dict Explanatory variables of the regression. + weights : array-like, optional + 1d array of weights. If None, equivalent to an unweighted OLS. Returns ------- diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 2fdad439219e2..223e127223195 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -165,6 +165,7 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, Parameters ---------- + frame : DataFrame alpha : amount of transparency applied figsize : a tuple (width, height) in inches ax : Matplotlib axis object @@ -172,6 +173,7 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, diagonal : pick between 'kde' and 'hist' for either Kernel Density Estimation or Histogram plot in the diagonal + marker : Matplotlib marker type, default '.' kwds : other plotting keyword arguments To be passed to scatter function @@ -365,10 +367,17 @@ def normalize(series): def andrews_curves(data, class_column, ax=None, samples=200): """ Parameters: - data: A DataFrame containing data to be plotted, preferably - normalized to (0.0, 1.0). - class_column: Name of the column containing class names. - samples: Number of points to plot in each curve. + ----------- + data : DataFrame + Data to be plotted, preferably normalized to (0.0, 1.0) + class_column : Name of the column containing class names + ax : matplotlib axes object, default None + samples : Number of points to plot in each curve + + Returns: + -------- + ax: Matplotlib axis object + """ from math import sqrt, pi, sin, cos import matplotlib.pyplot as plt @@ -1475,6 +1484,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, Parameters ---------- + frame : DataFrame x : label or position, default None y : label or position, default None Allows plotting of one column versus another @@ -1675,8 +1685,11 @@ def boxplot(data, column=None, by=None, ax=None, fontsize=None, Can be any valid input to groupby by : string or sequence Column in the DataFrame to group by + ax : Matplotlib axis object, optional fontsize : int or string rot : label rotation angle + figsize : A tuple (width, height) in inches + grid : Setting this to True will show the grid kwds : other plotting keyword arguments to be passed to matplotlib boxplot function @@ -1779,7 +1792,19 @@ def format_date_labels(ax, rot): def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwargs): """ + Make a scatter plot from two DataFrame columns + Parameters + ---------- + data : DataFrame + x : Column name for the x-axis values + y : Column name for the y-axis values + ax : Matplotlib axis object + figsize : A tuple (width, height) in inches + grid : Setting this to True will show the grid + kwargs : other plotting keyword arguments + To be passed to scatter function + Returns ------- fig : matplotlib.Figure @@ -1818,6 +1843,11 @@ def hist_frame( Parameters ---------- + data : DataFrame + column : string or sequence + If passed, will be used to limit data to a subset of columns + by : object, optional + If passed, then used to form histograms for separate groups grid : boolean, default True Whether to show axis grid lines xlabelsize : int, default None @@ -1956,6 +1986,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, Parameters ---------- + grouped : Grouped DataFrame subplots : * ``False`` - no subplots will be used * ``True`` - create a subplot for each group @@ -1963,6 +1994,8 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, Can be any valid input to groupby fontsize : int or string rot : label rotation angle + grid : Setting this to True will show the grid + figsize : A tuple (width, height) in inches kwds : other plotting keyword arguments to be passed to matplotlib boxplot function @@ -2157,15 +2190,19 @@ def _subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, Dict with keywords passed to the add_subplot() call used to create each subplots. - fig_kw : dict - Dict with keywords passed to the figure() call. Note that all keywords - not recognized above will be automatically included here. - - ax : Matplotlib axis object, default None + ax : Matplotlib axis object, optional secondary_y : boolean or sequence of ints, default False If True then y-axis will be on the right + data : DataFrame, optional + If secondary_y is a sequence, data is used to select columns. + + fig_kw : Other keyword arguments to be passed to the figure() call. + Note that all keywords not recognized above will be + automatically included here. + + Returns: fig, ax : tuple
https://api.github.com/repos/pandas-dev/pandas/pulls/3422
2013-04-22T23:43:22Z
2013-04-24T12:29:48Z
2013-04-24T12:29:48Z
2014-06-15T20:28:49Z
DOC: Adding parameters to frequencies, offsets (issue #2916)
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index e8dad6c85b2ac..3b66eba31fca1 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -746,6 +746,7 @@ def infer_freq(index, warn=True): Parameters ---------- index : DatetimeIndex + warn : boolean, default True Returns ------- diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index bd95a62c3f2ed..3bc801bd38695 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1197,6 +1197,8 @@ def generate_range(start=None, end=None, periods=None, start : datetime (default None) end : datetime (default None) periods : int, optional + time_rule : (legacy) name of DateOffset object to be used, optional + Corresponds with names expected by tseries.frequencies.get_offset Note ---- @@ -1204,6 +1206,7 @@ def generate_range(start=None, end=None, periods=None, * At least two of (start, end, periods) must be specified. * If both start and end are specified, the returned dates will satisfy start <= date <= end. + * If both time_rule and offset are specified, time_rule supersedes offset. Returns -------
My first PR...
https://api.github.com/repos/pandas-dev/pandas/pulls/3420
2013-04-22T20:53:51Z
2013-04-22T21:02:26Z
2013-04-22T21:02:26Z
2014-06-14T00:34:08Z
Period.strftime should return unicode strings always
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 14119dd94290a..a405fda1c4fe4 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -13,6 +13,7 @@ import pandas.core.common as com from pandas.core.common import isnull +from pandas.util import py3compat from pandas.lib import Timestamp import pandas.lib as lib @@ -264,12 +265,49 @@ def __repr__(self): base, mult = _gfc(self.freq) formatted = tslib.period_format(self.ordinal, base) freqstr = _freq_mod._reverse_period_code_map[base] + + if not py3compat.PY3: + encoding = com.get_option("display.encoding") + formatted = formatted.encode(encoding) + return "Period('%s', '%s')" % (formatted, freqstr) def __str__(self): + """ + Return a string representation for a particular DataFrame + + Invoked by str(df) in both py2/py3. + Yields Bytestring in Py2, Unicode String in py3. + """ + + if py3compat.PY3: + return self.__unicode__() + return self.__bytes__() + + def __bytes__(self): + """ + Return a string representation for a particular DataFrame + + Invoked by bytes(df) in py3 only. + Yields a bytestring in both py2/py3. + """ + encoding = com.get_option("display.encoding") + return self.__unicode__().encode(encoding, 'replace') + + def __unicode__(self): + """ + Return a string representation for a particular DataFrame + + Invoked by unicode(df) in py2 only. Yields a Unicode String in both + py2/py3. + """ base, mult = _gfc(self.freq) formatted = tslib.period_format(self.ordinal, base) - return ("%s" % formatted) + value = (u"%s" % formatted) + assert type(value) == unicode + + return value + def strftime(self, fmt): """ diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 436254a682e8c..f34a237b55dd4 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -206,8 +206,9 @@ def test_repr(self): def test_strftime(self): p = Period('2000-1-1 12:34:12', freq='S') - self.assert_(p.strftime('%Y-%m-%d %H:%M:%S') == - '2000-01-01 12:34:12') + res = p.strftime('%Y-%m-%d %H:%M:%S') + self.assert_( res == '2000-01-01 12:34:12') + self.assert_( isinstance(res,unicode)) # GH3363 def test_sub_delta(self): left, right = Period('2011', freq='A'), Period('2007', freq='A') diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index c07ac411be075..4d15ec8c8ace9 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -2283,6 +2283,7 @@ cdef list extra_fmts = [(b"%q", b"^`AB`^"), cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^"] cdef _period_strftime(int64_t value, int freq, object fmt): + import sys cdef: Py_ssize_t i date_info dinfo @@ -2325,6 +2326,10 @@ cdef _period_strftime(int64_t value, int freq, object fmt): if not PyString_Check(result): result = str(result) + # GH3363 + if sys.version_info[0] == 2: + result = result.decode('utf-8','strict') + return result # period accessors diff --git a/scripts/use_build_cache.py b/scripts/use_build_cache.py index 361ac59e5e852..60833affd9373 100755 --- a/scripts/use_build_cache.py +++ b/scripts/use_build_cache.py @@ -15,29 +15,21 @@ Tested on releases back to 0.7.0. """ +import argparse +argparser = argparse.ArgumentParser(description=""" +'Program description. +""".strip()) -try: - import argparse - argparser = argparse.ArgumentParser(description=""" - 'Program description. - """.strip()) - - argparser.add_argument('-f', '--force-overwrite', +argparser.add_argument('-f', '--force-overwrite', default=False, help='Setting this will overwrite any existing cache results for the current commit', action='store_true') - argparser.add_argument('-d', '--debug', +argparser.add_argument('-d', '--debug', default=False, help='Report cache hits/misses', action='store_true') - args = argparser.parse_args() -except: - class Foo(object): - debug=False - force_overwrite=False - - args = Foo() # for 2.6, no argparse +args = argparser.parse_args() #print args.accumulate(args.integers) @@ -78,28 +70,18 @@ class Foo(object): import shutil import multiprocessing pyver = "%d.%d" % (sys.version_info[:2]) - fileq = ["pandas"] + files = ["pandas"] to_process = dict() + orig_hashes= dict((f.split("-")[0],f) for f in os.listdir(BUILD_CACHE_DIR) + if "-" in f and f.endswith(pyver)) + post_hashes= dict((f.split("-")[1],f) for f in os.listdir(BUILD_CACHE_DIR) + if "-" in f and f.endswith(pyver)) - # retrieve the hashes existing in the cache - orig_hashes=dict() - post_hashes=dict() - for path,dirs,files in os.walk(os.path.join(BUILD_CACHE_DIR,'pandas')): - for f in files: - s=f.split(".py-")[-1] - try: - prev_h,post_h,ver = s.split('-') - if ver == pyver: - orig_hashes[prev_h] = os.path.join(path,f) - post_hashes[post_h] = os.path.join(path,f) - except: - pass - - while fileq: - f = fileq.pop() + while files: + f = files.pop() if os.path.isdir(f): - fileq.extend([os.path.join(f,x) for x in os.listdir(f)]) + files.extend([os.path.join(f,x) for x in os.listdir(f)]) else: if not f.endswith(".py"): continue @@ -108,54 +90,40 @@ class Foo(object): h = sha1(open(f,"rb").read()).hexdigest() except IOError: to_process[h] = f - else: - if h in orig_hashes and not BC_FORCE_OVERWRITE: - src = orig_hashes[h] - if BC_DEBUG: - print("2to3 cache hit %s,%s" % (f,h)) - shutil.copyfile(src,f) - elif h not in post_hashes: - # we're not in a dev dir with already processed files - if BC_DEBUG: - print("2to3 cache miss (will process) %s,%s" % (f,h)) - to_process[h] = f + if h in orig_hashes and not BC_FORCE_OVERWRITE: + src = os.path.join(BUILD_CACHE_DIR,orig_hashes[h]) + if BC_DEBUG: + print("2to3 cache hit %s,%s" % (f,h)) + shutil.copyfile(src,f) + elif h not in post_hashes: + + # we're not in a dev dir with already processed files + if BC_DEBUG: + print("2to3 cache miss %s,%s" % (f,h)) + print("2to3 will process " + f) + to_process[h] = f avail_fixes = set(refactor.get_fixers_from_package("lib2to3.fixes")) avail_fixes.discard('lib2to3.fixes.fix_next') t=refactor.RefactoringTool(avail_fixes) - if to_process: - print("Starting 2to3 refactoring...") - for orig_h,f in to_process.items(): + print("Starting 2to3 refactoring...") + for f in to_process.values(): + if BC_DEBUG: + print("2to3 on %s" % f) + try: + t.refactor([f],True) + post_h = sha1(open(f, "rb").read()).hexdigest() + cached_fname = f + "-" + post_h + "-" + pyver if BC_DEBUG: - print("2to3 on %s" % f) - try: - t.refactor([f],True) - post_h = sha1(open(f, "rb").read()).hexdigest() - cached_fname = f + '-' + orig_h + '-' + post_h + '-' + pyver - path = os.path.join(BUILD_CACHE_DIR, cached_fname) - pathdir =os.path.dirname(path) - if BC_DEBUG: - print("cache put %s in %s" % (f, path)) - try: - os.makedirs(pathdir) - except OSError as exc: - import errno - if exc.errno == errno.EEXIST and os.path.isdir(pathdir): - pass - else: - raise - - shutil.copyfile(f, path) + print("cache put %s,%s in %s" % (f, h, cached_fname)) + shutil.copyfile(f, os.path.join(BUILD_CACHE_DIR, cached_fname)) - except Exception as e: - print("While processing %s 2to3 raised: %s" % (f,str(e))) - - pass - print("2to3 done refactoring.") + except: + pass + print("2to3 done refactoring.") except Exception as e: - if not isinstance(e,ZeroDivisionError): - print( "Exception: " + str(e)) + print( "Exception: " + str(e)) BUILD_CACHE_DIR = None class CompilationCacheMixin(object):
pending confirmed of fix to #3363
https://api.github.com/repos/pandas-dev/pandas/pulls/3410
2013-04-21T13:37:50Z
2013-04-21T16:54:11Z
2013-04-21T16:54:11Z
2014-06-27T18:12:06Z
BUG: don't rely on sys.getdefaultencoding if we don't need to
diff --git a/pandas/core/format.py b/pandas/core/format.py index 1b362fb5c0562..fbab7472ba10b 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -165,7 +165,9 @@ def _encode_diff_func(): encoding = get_option("display.encoding") def _encode_diff(x): - return len(x) - len(x.decode(encoding)) + if not isinstance(x,unicode): + return len(x) - len(x.decode(encoding)) + return 0 return _encode_diff @@ -1639,13 +1641,14 @@ def reset_printoptions(): FutureWarning) reset_option("^display\.") - +_initial_defencoding = None def detect_console_encoding(): """ Try to find the most capable encoding supported by the console. slighly modified from the way IPython handles the same issue. """ import locale + global _initial_defencoding encoding = None try: @@ -1662,6 +1665,11 @@ def detect_console_encoding(): if not encoding or 'ascii' in encoding.lower(): # when all else fails. this will usually be "ascii" encoding = sys.getdefaultencoding() + # GH3360, save the reported defencoding at import time + # MPL backends may change it. Make available for debugging. + if not _initial_defencoding: + _initial_defencoding = sys.getdefaultencoding() + return encoding
pending confirm on fixing #3360.
https://api.github.com/repos/pandas-dev/pandas/pulls/3409
2013-04-21T07:22:08Z
2013-04-21T15:13:08Z
2013-04-21T15:13:08Z
2014-07-23T06:58:59Z
TST: added testing and error messages for passing datetimes with timezones
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index da4077165add2..be11732d7b3a2 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1207,13 +1207,20 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, **kwargs): self.values = list(block.items) dtype = block.dtype.name - inferred_type = lib.infer_dtype(block.values.ravel()) + rvalues = block.values.ravel() + inferred_type = lib.infer_dtype(rvalues) if inferred_type == 'datetime64': self.set_atom_datetime64(block) elif inferred_type == 'date': raise TypeError( "[date] is not implemented as a table column") + elif inferred_type == 'datetime': + if getattr(rvalues[0],'tzinfo',None) is not None: + raise TypeError( + "timezone support on datetimes is not yet implemented as a table column") + raise TypeError( + "[datetime] is not implemented as a table column") elif inferred_type == 'unicode': raise TypeError( "[unicode] is not implemented as a table column") @@ -2080,8 +2087,18 @@ def validate(self, other): (other.table_type, self.table_type)) for c in ['index_axes','non_index_axes','values_axes']: - if getattr(self,c,None) != getattr(other,c,None): - raise ValueError("invalid combinate of [%s] on appending data [%s] vs current table [%s]" % (c,getattr(self,c,None),getattr(other,c,None))) + sv = getattr(self,c,None) + ov = getattr(other,c,None) + if sv != ov: + + # show the error for the specific axes + for i, sax in enumerate(sv): + oax = ov[i] + if sax != oax: + raise ValueError("invalid combinate of [%s] on appending data [%s] vs current table [%s]" % (c,sax,oax)) + + # should never get here + raise Exception("invalid combinate of [%s] on appending data [%s] vs current table [%s]" % (c,sv,ov)) @property def nrows_expected(self): diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 598812373538c..75fe0eefe771e 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -1150,15 +1150,19 @@ def test_table_values_dtypes_roundtrip(self): df1['float322'] = 1. df1['float322'] = df1['float322'].astype('float32') df1['bool'] = df1['float32'] > 0 + df1['time1'] = Timestamp('20130101') + df1['time2'] = Timestamp('20130102') store.append('df_mixed_dtypes1', df1) result = store.select('df_mixed_dtypes1').get_dtype_counts() expected = Series({ 'float32' : 2, 'float64' : 1,'int32' : 1, 'bool' : 1, - 'int16' : 1, 'int8' : 1, 'int64' : 1, 'object' : 1 }) + 'int16' : 1, 'int8' : 1, 'int64' : 1, 'object' : 1, + 'datetime64[ns]' : 2}) result.sort() expected.sort() tm.assert_series_equal(result,expected) + def test_table_mixed_dtypes(self): # frame @@ -1231,6 +1235,17 @@ def test_unimplemented_dtypes_table_columns(self): # this fails because we have a date in the object block...... self.assertRaises(TypeError, store.append, 'df_unimplemented', df) + def test_table_append_with_timezones(self): + # not implemented yet + + with ensure_clean(self.path) as store: + + # check with mixed dtypes + df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern')),index=range(5)) + + # timezones not yet supported + self.assertRaises(TypeError, store.append, 'df_tz', df) + def test_remove(self): with ensure_clean(self.path) as store:
(not yet implemented, and error message was a little misleading) CLN: better error messages on invalid appends
https://api.github.com/repos/pandas-dev/pandas/pulls/3405
2013-04-20T16:26:09Z
2013-04-20T16:51:42Z
2013-04-20T16:51:42Z
2014-06-25T07:53:00Z
DOC: ref / val caveat, point at pandas methods
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index 9a3dc5f37934a..7ba7a315f7bae 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -121,8 +121,14 @@ Sorting by values Selection --------- -See the :ref:`Indexing section <indexing>` +.. note:: + While standard Python / Numpy expressions for selecting and setting are + intuitive and come handy for interactive work, for production code, we + recommend the optimized pandas data access methods, ``.at``, ``.iat``, + ``.loc``, ``.iloc`` and ``.ix``. + +See the :ref:`Indexing section <indexing>` and below. Getting ~~~~~~~ @@ -230,7 +236,8 @@ For getting fast access to a scalar (equiv to the prior method) df.iat[1,1] There is one signficant departure from standard python/numpy slicing semantics. -python/numpy allow slicing past the end of an array without an associated error. +python/numpy allow slicing past the end of an array without an associated +error. .. ipython:: python @@ -239,7 +246,8 @@ python/numpy allow slicing past the end of an array without an associated error. x[4:10] x[8:10] -Pandas will detect this and raise ``IndexError``, rather than return an empty structure. +Pandas will detect this and raise ``IndexError``, rather than return an empty +structure. :: @@ -306,11 +314,13 @@ A ``where`` operation with setting. df2[df2 > 0] = -df2 df2 + Missing Data ------------ -Pandas primarily uses the value ``np.nan`` to represent missing data. It -is by default not included in computations. See the :ref:`Missing Data section <missing_data>` +Pandas primarily uses the value ``np.nan`` to represent missing data. It is by +default not included in computations. See the :ref:`Missing Data section +<missing_data>` Reindexing allows you to change/add/delete the index on a specified axis. This returns a copy of the data. @@ -457,8 +467,8 @@ Append rows to a dataframe. See the :ref:`Appending <merging.concatenation>` Grouping -------- -By "group by" we are referring to a process involving one or more of the following -steps +By "group by" we are referring to a process involving one or more of the +following steps - **Splitting** the data into groups based on some criteria - **Applying** a function to each group independently @@ -481,7 +491,8 @@ Grouping and then applying a function ``sum`` to the resulting groups. df.groupby('A').sum() -Grouping by multiple columns forms a hierarchical index, which we then apply the function. +Grouping by multiple columns forms a hierarchical index, which we then apply +the function. .. ipython:: python @@ -547,10 +558,10 @@ We can produce pivot tables from this data very easily: Time Series ----------- -Pandas has simple, powerful, and efficient functionality for -performing resampling operations during frequency conversion (e.g., converting -secondly data into 5-minutely data). This is extremely common in, but not -limited to, financial applications. See the :ref:`Time Series section <timeseries>` +Pandas has simple, powerful, and efficient functionality for performing +resampling operations during frequency conversion (e.g., converting secondly +data into 5-minutely data). This is extremely common in, but not limited to, +financial applications. See the :ref:`Time Series section <timeseries>` .. ipython:: python diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 853de3ee37ca2..d973b27d2daff 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -32,6 +32,19 @@ attention in this area. Expect more work to be invested higher-dimensional data structures (including Panel) in the future, especially in label-based advanced indexing. +.. note:: + + Regular Python and NumPy indexing operators (squared brackets) and member + operators (dots) provide quick and easy access to pandas data structures + across a wide range of use cases. This makes interactive work intuitive, as + there's little new to learn if you already know how to deal with Python + dictionaries and NumPy arrays. However, the type of the data to be accessed + isn't known in advance. Therefore, accessing pandas objects directly using + standard operators bears some optimization limits. In addition, whether a + copy or a reference is returned here, may depend on context. For production + code, we thus recommended to take advantage of the optimized pandas data + access methods exposed in this chapter. + See the :ref:`cookbook<cookbook.selection>` for some advanced strategies Choice @@ -41,22 +54,27 @@ Starting in 0.11.0, object selection has had a number of user-requested addition order to support more explicit location based indexing. Pandas now supports three types of multi-axis indexing. - - ``.loc`` is strictly label based, will raise ``KeyError`` when the items are not found, + - ``.loc`` is strictly label based, will raise ``KeyError`` when the items + are not found, allowed inputs are: - A single label, e.g. ``5`` or ``'a'`` - (note that ``5`` is interpreted as a *label* of the index. This use is **not** an integer position along the index) + (note that ``5`` is interpreted as a *label* of the index. This use is ** + not** an integer position along the index) - A list or array of labels ``['a', 'b', 'c']`` - A slice object with labels ``'a':'f'`` - (note that contrary to usual python slices, **both** the start and the stop are included!) + (note that contrary to usual python slices, **both** the start and the + stop are included!) - A boolean array See more at :ref:`Selection by Label <indexing.label>` - - ``.iloc`` is strictly integer position based (from 0 to length-1 of the axis), will - raise ``IndexError`` when the requested indicies are out of bounds. Allowed inputs are: + - ``.iloc`` is strictly integer position based (from 0 to length-1 of the + axis), will + raise ``IndexError`` when the requested indicies are out of bounds. + Allowed inputs are: - An integer e.g. ``5`` - A list or array of integers ``[4, 3, 0]`` @@ -65,22 +83,28 @@ three types of multi-axis indexing. See more at :ref:`Selection by Position <indexing.integer>` - - ``.ix`` supports mixed integer and label based access. It is primarily label based, but - will fallback to integer positional access. ``.ix`` is the most general and will support - any of the inputs to ``.loc`` and ``.iloc``, as well as support for floating point label schemes. + - ``.ix`` supports mixed integer and label based access. It is primarily + label based, but + will fallback to integer positional access. ``.ix`` is the most general + and will support any of the inputs to ``.loc`` and ``.iloc``, as well as + support for floating point label schemes. - As using integer slices with ``.ix`` have different behavior depending on whether the slice - is interpreted as integer location based or label position based, it's usually better to be + As using integer slices with ``.ix`` have different behavior depending on + whether the slice + is interpreted as integer location based or label position based, it's + usually better to be explicit and use ``.iloc`` (integer location) or ``.loc`` (label location). - ``.ix`` is especially useful when dealing with mixed positional and label based hierarchial indexes. + ``.ix`` is especially useful when dealing with mixed positional and label + based hierarchial indexes. See more at :ref:`Advanced Indexing <indexing.advanced>` and :ref:`Advanced Hierarchical <indexing.advanced_hierarchical>` -Getting values from an object with multi-axes selection uses the following notation (using ``.loc`` as an -example, but applies to ``.iloc`` and ``.ix`` as well) Any of the axes accessors may be the null -slice ``:``. Axes left out of the specification are assumed to be ``:``. -(e.g. ``p.loc['a']`` is equiv to ``p.loc['a',:,:]``) +Getting values from an object with multi-axes selection uses the following +notation (using ``.loc`` as an example, but applies to ``.iloc`` and ``.ix`` as +well) Any of the axes accessors may be the null slice ``:``. Axes left out of +the specification are assumed to be ``:``. (e.g. ``p.loc['a']`` is equiv to +``p.loc['a',:,:]``) .. csv-table:: :header: "Object Type", "Indexers" @@ -100,12 +124,14 @@ Starting in version 0.11.0, these methods may be deprecated in future versions. - ``icol`` - ``iget_value`` -See the section :ref:`Selection by Position <indexing.integer>` for substitutes. +See the section :ref:`Selection by Position <indexing.integer>` for substitutes +. .. _indexing.xs: -Cross-sectional slices on non-hierarchical indices are now easily performed using -``.loc`` and/or ``.iloc``. These methods now exist primarily for backward compatibility. +Cross-sectional slices on non-hierarchical indices are now easily performed +using ``.loc`` and/or ``.iloc``. These methods now exist primarily for +backward compatibility. - ``xs`` (for DataFrame), - ``minor_xs`` and ``major_xs`` (for Panel) @@ -162,7 +188,8 @@ Attribute Access .. _indexing.df_cols: -You may access a column on a ``DataFrame``, and a item on a ``Panel`` directly as an attribute: +You may access a column on a ``DataFrame``, and a item on a ``Panel`` directly +as an attribute: .. ipython:: python @@ -189,9 +216,8 @@ Slicing ranges ~~~~~~~~~~~~~~ The most robust and consistent way of slicing ranges along arbitrary axes is -described in the :ref:`Selection by Position <indexing.integer>` section detailing -the ``.iloc`` method. For now, we explain the semantics of slicing using the -``[]`` operator. +described in the :ref:`Selection by Position <indexing.integer>` section +detailing the ``.iloc`` method. For now, we explain the semantics of slicing using the ``[]`` operator. With Series, the syntax works exactly as with an ndarray, returning a slice of the values and the corresponding labels: @@ -223,22 +249,27 @@ largely as a convenience since it is such a common operation. Selection By Label ~~~~~~~~~~~~~~~~~~ -Pandas provides a suite of methods in order to have **purely label based indexing**. -This is a strict inclusion based protocol. **ALL** of the labels for which you ask, -must be in the index or a ``KeyError`` will be raised! +Pandas provides a suite of methods in order to have **purely label based +indexing**. +This is a strict inclusion based protocol. **ALL** of the labels for which you +ask, must be in the index or a ``KeyError`` will be raised! -When slicing, the start bound is *included*, **AND** the stop bound is *included*. +When slicing, the start bound is *included*, **AND** the stop bound is * +included*. Integers are valid labels, but they refer to the label *and not the position*. -The ``.loc`` attribute is the primary access method. The following are valid inputs: +The ``.loc`` attribute is the primary access method. The following are valid +inputs: - A single label, e.g. ``5`` or ``'a'`` - (note that ``5`` is interpreted as a *label* of the index. This use is **not** an integer position along the index) + (note that ``5`` is interpreted as a *label* of the index. This use is ** + not** an integer position along the index) - A list or array of labels ``['a', 'b', 'c']`` - A slice object with labels ``'a':'f'`` - (note that contrary to usual python slices, **both** the start and the stop are included!) + (note that contrary to usual python slices, **both** the start and the + stop are included!) - A boolean array .. ipython:: python @@ -296,13 +327,16 @@ For getting a value explicity (equiv to deprecated ``df.get_value('a','A')``) Selection By Position ~~~~~~~~~~~~~~~~~~~~~ -Pandas provides a suite of methods in order to get **purely integer based indexing**. -The semantics follow closely python and numpy slicing. These are ``0-based`` indexing. +Pandas provides a suite of methods in order to get **purely integer based +indexing**. The semantics follow closely python and numpy slicing. These are `` +0-based`` indexing. -When slicing, the start bounds is *included*, while the upper bound is *excluded*. -Trying to use a non-integer, even a **valid** label will raise a ``IndexError``. +When slicing, the start bounds is *included*, while the upper bound is * +excluded*. Trying to use a non-integer, even a **valid** label will raise a `` +IndexError``. -The ``.iloc`` attribute is the primary access method. The following are valid inputs: +The ``.iloc`` attribute is the primary access method. The following are valid +inputs: - An integer e.g. ``5`` - A list or array of integers ``[4, 3, 0]`` @@ -363,21 +397,24 @@ For slicing columns explicitly (equiv to deprecated ``df.icol(slice(1,3))``). df1.iloc[:,1:3] -For getting a scalar via integer position (equiv to deprecated ``df.get_value(1,1)``) +For getting a scalar via integer position (equiv to deprecated ``df.get_value( +1,1)``) .. ipython:: python # this is also equivalent to ``df1.iat[1,1]`` df1.iloc[1,1] -For getting a cross section using an integer position (equiv to deprecated ``df.xs(1)``) +For getting a cross section using an integer position (equiv to deprecated ``df +.xs(1)``) .. ipython:: python df1.iloc[1] There is one signficant departure from standard python/numpy slicing semantics. -python/numpy allow slicing past the end of an array without an associated error. +python/numpy allow slicing past the end of an array without an associated error +. .. ipython:: python @@ -386,7 +423,8 @@ python/numpy allow slicing past the end of an array without an associated error. x[4:10] x[8:10] -Pandas will detect this and raise ``IndexError``, rather than return an empty structure. +Pandas will detect this and raise ``IndexError``, rather than return an empty +structure. :: @@ -401,11 +439,11 @@ Fast scalar value getting and setting Since indexing with ``[]`` must handle a lot of cases (single-label access, slicing, boolean indexing, etc.), it has a bit of overhead in order to figure out what you're asking for. If you only want to access a scalar value, the -fastest way is to use the ``at`` and ``iat`` methods, which are implemented on all of -the data structures. +fastest way is to use the ``at`` and ``iat`` methods, which are implemented on +all of the data structures. -Similary to ``loc``, ``at`` provides **label** based scalar lookups, while, ``iat`` provides -**integer** based lookups analagously to ``iloc`` +Similary to ``loc``, ``at`` provides **label** based scalar lookups, while, `` +iat`` provides **integer** based lookups analagously to ``iloc`` .. ipython:: python @@ -413,9 +451,10 @@ Similary to ``loc``, ``at`` provides **label** based scalar lookups, while, ``ia df.at[dates[5], 'A'] df.iat[3, 0] -You can also set using these same indexers. These have the additional capability -of enlarging an object. This method *always* returns a reference to the object -it modified, which in the case of enlargement, will be a **new object**: +You can also set using these same indexers. These have the additional +capability of enlarging an object. This method *always* returns a reference to +the object it modified, which in the case of enlargement, will be a **new +object**: .. ipython:: python @@ -475,21 +514,33 @@ more complex criteria: # Multiple criteria df2[criterion & (df2['b'] == 'x')] -Note, with the choice methods :ref:`Selection by Label <indexing.label>`, :ref:`Selection by Position <indexing.integer>`, -and :ref:`Advanced Indexing <indexing.advanced>` you may select along more than one axis using boolean vectors combined with other -indexing expressions. +Note, with the choice methods :ref:`Selection by Label <indexing.label>`, :ref: +`Selection by Position <indexing.integer>`, and :ref:`Advanced Indexing < +indexing.advanced>` you may select along more than one axis using boolean + vectors combined with other indexing expressions. .. ipython:: python df2.loc[criterion & (df2['b'] == 'x'),'b':'c'] - + +Caveat. Whether a copy or a reference is returned when using boolean indexing +may depend on context, e.g., in chained expressions the order may determine +whether a copy is returned or not: + +.. ipython:: python + + df2[df2.a.str.startswith('o')]['c'] = 42 # goes to copy (will be lost) + df2['c'][df2.a.str.startswith('o')] = 42 # passed via reference (will stay) + +When assigning values to subsets of your data, thus, make sure to either use the pandas access methods or explicitly handle the assignment creating a copy. Where and Masking ~~~~~~~~~~~~~~~~~ -Selecting values from a Series with a boolean vector generally returns a subset of the data. -To guarantee that selection output has the same shape as the original data, you can use the -``where`` method in ``Series`` and ``DataFrame``. +Selecting values from a Series with a boolean vector generally returns a +subset of the data. To guarantee that selection output has the same shape as +the original data, you can use the ``where`` method in ``Series`` and `` +DataFrame``. To return only the selected rows @@ -504,15 +555,16 @@ To return a Series of the same shape as the original s.where(s > 0) -Selecting values from a DataFrame with a boolean critierion now also preserves input data shape. -``where`` is used under the hood as the implementation. Equivalent is ``df.where(df < 0)`` +Selecting values from a DataFrame with a boolean critierion now also preserves +input data shape. ``where`` is used under the hood as the implementation. +Equivalent is ``df.where(df < 0)`` .. ipython:: python df[df < 0] -In addition, ``where`` takes an optional ``other`` argument for replacement of values where the -condition is False, in the returned copy. +In addition, ``where`` takes an optional ``other`` argument for replacement of +values where the condition is False, in the returned copy. .. ipython:: python @@ -531,8 +583,9 @@ This can be done intuitively like so: df2[df2 < 0] = 0 df2 -Furthermore, ``where`` aligns the input boolean condition (ndarray or DataFrame), such that partial selection -with setting is possible. This is analagous to partial setting via ``.ix`` (but on the contents rather than the axis labels) +Furthermore, ``where`` aligns the input boolean condition (ndarray or DataFrame +), such that partial selection with setting is possible. This is analagous to +partial setting via ``.ix`` (but on the contents rather than the axis labels) .. ipython:: python @@ -540,8 +593,9 @@ with setting is possible. This is analagous to partial setting via ``.ix`` (but df2[ df2[1:4] > 0 ] = 3 df2 -By default, ``where`` returns a modified copy of the data. There is an optional parameter ``inplace`` -so that the original data can be modified without creating a copy: +By default, ``where`` returns a modified copy of the data. There is an +optional parameter ``inplace`` so that the original data can be modified +without creating a copy: .. ipython:: python @@ -674,14 +728,16 @@ Advanced Indexing with ``.ix`` .. note:: The recent addition of ``.loc`` and ``.iloc`` have enabled users to be quite - explicit about indexing choices. ``.ix`` allows a great flexibility to specify - indexing locations by *label* and/or *integer position*. Pandas will attempt - to use any passed *integer* as *label* locations first (like what ``.loc`` - would do, then to fall back on *positional* indexing, like what ``.iloc`` - would do). See :ref:`Fallback Indexing <indexing.fallback>` for an example. + explicit about indexing choices. ``.ix`` allows a great flexibility to + specify indexing locations by *label* and/or *integer position*. Pandas will + attempt to use any passed *integer* as *label* locations first (like what + ``.loc`` would do, then to fall back on *positional* indexing, like what + ``.iloc`` would do). See :ref:`Fallback Indexing <indexing.fallback>` for + an example. -The syntax of using ``.ix`` is identical to ``.loc``, in :ref:`Selection by Label <indexing.label>`, -and ``.iloc`` in :ref:`Selection by Position <indexing.integer>`. +The syntax of using ``.ix`` is identical to ``.loc``, in :ref:`Selection by +Label <indexing.label>`, and ``.iloc`` in :ref:`Selection by Position <indexing +.integer>`. The ``.ix`` attribute takes the following inputs: @@ -791,8 +847,8 @@ Setting values in mixed-type DataFrame .. _indexing.mixed_type_setting: -Setting values on a mixed-type DataFrame or Panel is supported when using scalar -values, though setting arbitrary vectors is not yet supported: +Setting values on a mixed-type DataFrame or Panel is supported when using +scalar values, though setting arbitrary vectors is not yet supported: .. ipython:: python @@ -926,10 +982,10 @@ See the :ref:`cookbook<cookbook.multi_index>` for some advanced strategies Given that hierarchical indexing is so new to the library, it is definitely "bleeding-edge" functionality but is certainly suitable for production. But, - there may inevitably be some minor API changes as more use cases are explored - and any weaknesses in the design / implementation are identified. pandas aims - to be "eminently usable" so any feedback about new functionality like this is - extremely helpful. + there may inevitably be some minor API changes as more use cases are + explored and any weaknesses in the design / implementation are identified. + pandas aims to be "eminently usable" so any feedback about new + functionality like this is extremely helpful. Creating a MultiIndex (hierarchical index) object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -956,8 +1012,10 @@ DataFrame to construct a MultiIndex automatically: .. ipython:: python - arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']), - np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])] + arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']) + , + np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']) + ] s = Series(randn(8), index=arrays) s df = DataFrame(randn(8, 4), index=arrays) @@ -983,8 +1041,8 @@ of the index is up to you: We've "sparsified" the higher levels of the indexes to make the console output a bit easier on the eyes. -It's worth keeping in mind that there's nothing preventing you from using tuples -as atomic labels on an axis: +It's worth keeping in mind that there's nothing preventing you from using +tuples as atomic labels on an axis: .. ipython:: python @@ -1025,8 +1083,8 @@ Basic indexing on axis with MultiIndex One of the important features of hierarchical indexing is that you can select data by a "partial" label identifying a subgroup in the data. **Partial** -selection "drops" levels of the hierarchical index in the result in a completely -analogous way to selecting a column in a regular DataFrame: +selection "drops" levels of the hierarchical index in the result in a +completely analogous way to selecting a column in a regular DataFrame: .. ipython:: python @@ -1275,8 +1333,8 @@ indexed DataFrame: indexed2 = data.set_index(['a', 'b']) indexed2 -The ``append`` keyword option allow you to keep the existing index and append the given -columns to a MultiIndex: +The ``append`` keyword option allow you to keep the existing index and append +the given columns to a MultiIndex: .. ipython:: python @@ -1321,7 +1379,8 @@ discards the index, instead of putting index values in the DataFrame's columns. .. note:: - The ``reset_index`` method used to be called ``delevel`` which is now deprecated. + The ``reset_index`` method used to be called ``delevel`` which is now + deprecated. Adding an ad hoc index ~~~~~~~~~~~~~~~~~~~~~~
This in part addresses #3340. I added a few comments in the doc that point users ad using the pandas at, iat, loc, iloc, etc. methods and included an example similar to the one exposed in #3340 that addresses some of the reference / value intricaies encountered with pandas and numpy objects. To make things clear I selected a rather verbose style. Let me know if this is over the top ;-)
https://api.github.com/repos/pandas-dev/pandas/pulls/3399
2013-04-19T09:32:59Z
2013-04-20T14:32:40Z
2013-04-20T14:32:40Z
2014-06-14T17:35:39Z
ENH: speed up testing when --processes arg is given to test_fast.sh
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 75fe0eefe771e..bce737606040d 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -66,6 +66,7 @@ def ensure_clean(path, mode='a', complevel=None, complib=None, class TestHDFStore(unittest.TestCase): + _multiprocess_shared_ = True def setUp(self): warnings.filterwarnings(action='ignore', category=FutureWarning)
I know this is a stupid one liner but...this allows the pytables fixtures to be shared, but not split (because PyTables is not reentrant [only reading is thread-safe]), during a test run with multiple processors. I get a 3x speed up (75 secs to 25 secs) just by adding this single class variable.
https://api.github.com/repos/pandas-dev/pandas/pulls/3397
2013-04-19T00:20:24Z
2013-05-03T03:18:02Z
null
2014-06-29T16:35:33Z
BUG: fix df repr troubles
diff --git a/pandas/core/format.py b/pandas/core/format.py index 4e03a5c68b2b3..f210b801d8cee 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -12,6 +12,7 @@ from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.util import py3compat from pandas.util.compat import OrderedDict +from pandas.util.terminal import get_terminal_size from pandas.core.config import get_option, set_option, reset_option import pandas.core.common as com import pandas.lib as lib @@ -1665,6 +1666,19 @@ def detect_console_encoding(): return encoding +def get_console_size(): + """Return console size as tuple = (width, height).""" + display_width = get_option('display.width') + display_height = get_option('display.height') + + if com.in_interactive_session(): + terminal_width, terminal_height = get_terminal_size() + else: + terminal_width, terminal_height = 80, 100 + + return (display_width or terminal_width, display_height or terminal_height) + + class EngFormatter(object): """ Formats float values according to engineering format. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2a1ee06984aa8..a457430d2f563 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -605,15 +605,11 @@ def _repr_fits_vertical_(self): options height and max_columns. In case off non-interactive session, no boundaries apply. """ - if not com.in_interactive_session(): - return True - - terminal_width, terminal_height = get_terminal_size() + width, height = fmt.get_console_size() # excluding column axis area - max_rows = get_option("display.max_rows") or terminal_height - display_height = get_option("display.height") or terminal_height - return len(self.index) <= min(max_rows, display_height) + max_rows = get_option("display.max_rows") or height + return len(self) <= min(max_rows, height) def _repr_fits_horizontal_(self): """ @@ -621,23 +617,19 @@ def _repr_fits_horizontal_(self): options width and max_columns. In case off non-interactive session, no boundaries apply. """ - if not com.in_interactive_session(): - return True - - terminal_width, terminal_height = get_terminal_size() + width, height = fmt.get_console_size() max_columns = get_option("display.max_columns") - display_width = get_option("display.width") or terminal_width nb_columns = len(self.columns) if ((max_columns and nb_columns > max_columns) or - (nb_columns > (display_width // 2))): + (nb_columns > (width // 2))): return False buf = StringIO() self.to_string(buf=buf) value = buf.getvalue() repr_width = max([len(l) for l in value.split('\n')]) - return repr_width <= display_width + return repr_width <= width def __str__(self): """ @@ -670,19 +662,21 @@ def __unicode__(self): """ buf = StringIO(u"") fits_vertical = self._repr_fits_vertical_() - fits_horizontal = self._repr_fits_horizontal_() + fits_horizontal = False + if fits_vertical: + fits_horizontal = self._repr_fits_horizontal_() + if fits_vertical and fits_horizontal: self.to_string(buf=buf) else: - terminal_width, terminal_height = get_terminal_size() - max_rows = get_option("display.max_rows") or terminal_height + width, height = fmt.get_console_size() + max_rows = get_option("display.max_rows") or height # Expand or info? Decide based on option display.expand_frame_repr # and keep it sane for the number of display rows used by the # expanded repr. if (get_option("display.expand_frame_repr") and fits_vertical and len(self.columns) < max_rows): - line_width = get_option("display.width") or terminal_width - self.to_string(buf=buf, line_width=line_width) + self.to_string(buf=buf, line_width=width) else: max_info_rows = get_option('display.max_info_rows') verbose = (max_info_rows is None or @@ -711,7 +705,12 @@ def _repr_html_(self): raise ValueError('Disable HTML output in QtConsole') if get_option("display.notebook_repr_html"): - if self._repr_fits_horizontal_() and self._repr_fits_vertical_(): + fits_vertical = self._repr_fits_vertical_() + fits_horizontal = False + if fits_vertical: + fits_horizontal = self._repr_fits_horizontal_() + + if fits_horizontal and fits_vertical: return ('<div style="max-height:1000px;' 'max-width:1500px;overflow:auto;">\n' + self.to_html() + '\n</div>') diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 6be4c846845e9..ea18f3404b3e4 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -179,6 +179,17 @@ def test_expand_frame_repr(self): self.assertTrue(has_info_repr(df_tall)) self.assertFalse(has_expanded_repr(df_tall)) + def test_repr_non_interactive(self): + # in non interactive mode, there can be no dependency on the + # result of terminal auto size detection + df = DataFrame('hello', range(99), range(5)) + + with option_context('mode.sim_interactive', False, + 'display.width', 0, + 'display.height', 0): + self.assertFalse(has_info_repr(df)) + self.assertFalse(has_expanded_repr(df)) + def test_repr_max_columns_max_rows(self): term_width, term_height = get_terminal_size() if term_width < 10 or term_height < 10: diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index ddf9124ac90ef..7745450e5c03b 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -150,20 +150,19 @@ def f(K=500): ## setup = common_setup + """ -from pandas.core.config import option_context - -def interactive_repr(frame): - with option_context('mode.sim_interactive', True): - repr(frame) - df = pandas.DataFrame(np.random.randn(10,10000)) """ -frame_wide_repr = Benchmark('repr(df)', setup, +frame_repr_wide = Benchmark('repr(df)', setup, start_date=datetime(2012, 8, 1)) -frame_wide_repr_interactive = Benchmark('interactive_repr(df)', setup, - start_date=datetime(2012, 8, 1)) +## +setup = common_setup + """ +df = pandas.DataFrame(np.random.randn(10000, 10)) +""" + +frame_repr_tall = Benchmark('repr(df)', setup, + start_date=datetime(2012, 8, 1)) ## setup = common_setup + """
My apologies for the mess i created on the dataframe repr. Not only did i get the non-interactive behavior wrong, i also introduced a performance issue in the interactive mode. This PR should make my wrong, right again. In non-interactive mode i re-enabled concise formats and made sure that auto-terminal-size detection is not used (little test added), also performance issue reported in #3337 and #3373 is fixed (added a benchmark for it).
https://api.github.com/repos/pandas-dev/pandas/pulls/3395
2013-04-18T20:37:48Z
2013-04-22T14:38:01Z
null
2014-06-22T23:48:34Z
BUG: GH3380 groupby will handle mutation on a DataFrame group's columns
diff --git a/RELEASE.rst b/RELEASE.rst index 5fa0234041227..55fb085b7bdee 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -298,6 +298,8 @@ pandas 0.11.0 - Fix set_index segfault when passing MultiIndex (GH3308_) - Ensure pickles created in py2 can be read in py3 - Insert ellipsis in MultiIndex summary repr (GH3348_) + - Groupby will handle mutation among an input groups columns (and fallback + to non-fast apply) (GH3380_) .. _GH3294: https://github.com/pydata/pandas/issues/3294 .. _GH622: https://github.com/pydata/pandas/issues/622 @@ -409,6 +411,7 @@ pandas 0.11.0 .. _GH2919: https://github.com/pydata/pandas/issues/2919 .. _GH3308: https://github.com/pydata/pandas/issues/3308 .. _GH3311: https://github.com/pydata/pandas/issues/3311 +.. _GH3380: https://github.com/pydata/pandas/issues/3380 pandas 0.10.1 ============= diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 8b3fb4c2fba0d..aef44bd91396d 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -620,7 +620,9 @@ def apply(self, f, data, axis=0, keep_internal=False): try: values, mutated = splitter.fast_apply(f, group_keys) return group_keys, values, mutated - except lib.InvalidApply: + except (Exception), detail: + # we detect a mutatation of some kind + # so take slow path pass result_values = [] diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 139a7cace83a7..4604678d58d5a 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -1491,6 +1491,30 @@ def f(group): for key, group in grouped: assert_frame_equal(result.ix[key], f(group)) + def test_mutate_groups(self): + + # GH3380 + + mydf = DataFrame({ + 'cat1' : ['a'] * 8 + ['b'] * 6, + 'cat2' : ['c'] * 2 + ['d'] * 2 + ['e'] * 2 + ['f'] * 2 + ['c'] * 2 + ['d'] * 2 + ['e'] * 2, + 'cat3' : map(lambda x: 'g%s' % x, range(1,15)), + 'val' : np.random.randint(100, size=14), + }) + + def f_copy(x): + x = x.copy() + x['rank'] = x.val.rank(method='min') + return x.groupby('cat2')['rank'].min() + + def f_no_copy(x): + x['rank'] = x.val.rank(method='min') + return x.groupby('cat2')['rank'].min() + + grpby_copy = mydf.groupby('cat1').apply(f_copy) + grpby_no_copy = mydf.groupby('cat1').apply(f_no_copy) + assert_series_equal(grpby_copy,grpby_no_copy) + def test_apply_chunk_view(self): # Low level tinkering could be unsafe, make sure not df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
and fallback to non-fast apply fixes #3380
https://api.github.com/repos/pandas-dev/pandas/pulls/3384
2013-04-17T14:54:52Z
2013-04-20T14:32:53Z
2013-04-20T14:32:53Z
2014-06-24T18:14:36Z
Travis-CI: Allow network cache opt-in for whitelisted forks
diff --git a/.travis.yml b/.travis.yml index e8a9193f9b7c9..b48f6d834b62d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,20 +2,29 @@ language: python python: - 2.6 - - 3.3 env: global: - - NOSE_ARGS="not slow" # need at least this so travis page will show env column + - secure: "O04RU5QRKEDL/SrIWEsVe8O+1TxZqZQSa28Sd+Fz48NW/XddhefYyxzqcUXh\nk/NjWMqknJRQhApLolBianVpsE577OTllzlcyKn3nUL6hjOXcoszGaYray7S\niNGKGyO8xrtB/ZQDtmupz0ksK8sLoCTscdiGotFulczbx0zt+4g=" + - secure: "PUJ9nC1/v2vpFUtELSoSjI53OHCVXfFTb8+t5lIGIqHtjUBkhiJSNPfCv8Bx\ndsdrx30qP8KsSceYzaa/bog6p8YNU1iih23S0KbjucutvA0LNHBTNvnxmjBR\nSJfKd5FmwnXvizRyghYBzmQ3NmGO7ADw2DBwKOhgGMqCHZ8Tlc8=" + - secure: "IDcMrCCW+6pgJtsI3Q163OPc0iec1ogpitaqiRhHcrEBUCXZgVeclOeiZBlw\n/u+uGyW/O0NhHMaFXKB8BdDVwlQEEHv48syN6npS/A5+O6jriWKL4ozttOhE\npOlu+yLhHnEwx6wZVIHRTVn+t1GkOrjlBcjaQi+Z13G3XmDaSG8=" + - secure: "Zu9aj0dTGpvMqT/HqBGQgDYl/v5ubC7lFwfE8Fqb0N1UVXqbpjXnNH/7oal1\nUsIT7klO++LWm+LxsP/A1FWENTSgdYe99JQtNyauW+0x5YR1JTuDJ8atDgx9\nSq66CaVpS5t+ov7UVm2bKSUX+1S8+8zGbIDADrMxEzYEMF7WoGM=" + - secure: "AfIvLxvCxj22zrqg3ejGf/VePKT2AyGT9erYzlKpBS0H8yi5Pp1MfmJjhaR4\n51zBtzqHPHiIEY6ZdE06o9PioMWkXS+BqJNrxGSbt1ltxgOFrxW5zOpwiFGZ\nZOv1YeFkuPf8PEsWT7615mdydqTQT7B0pqUKK/d6aka4TQ/tg5Q=" + - secure: "EM4ySBUusReNu7H1QHXvjnP/J1QowvfpwEBmjysYxJuq7KcG8HhhlfpUF+Gh\nLBzLak9QBA67k4edhum3qtKuJR5cHuja3+zuV8xmx096B/m96liJFTrwZpea\n58op3W6ZULctEpQNgIkyae20bjxl4f99JhZRUlonoPfx/rBIMFc=" + - secure: "pgMYS/6MQqDGb58qdzTJesvAMmcJWTUEEM8gf9rVbfqfxceOL4Xpx8siR9B2\nC4U4MW1cHMPP3RFEb4Jy0uK49aHH10snwZY1S84YPPllpH5ZFXVdN68OayNj\nh4k5N/2hhaaQuJ6Uh8v8s783ye4oYTOW5RJUFqQu4QdG4IkTIMs=" + + - NOSE_ARGS="not slow" UPLOAD=true matrix: include: - python: 2.7 - env: NOSE_ARGS="not network" LOCALE_OVERRIDE="zh_CN.GB18030" + env: NOSE_ARGS="slow and not network" LOCALE_OVERRIDE="zh_CN.GB18030" FULL_DEPS=true UPLOAD=false - python: 2.7 - env: NOSE_ARGS="not slow" FULL_DEPS=true + env: NOSE_ARGS="not slow" FULL_DEPS=true UPLOAD=true - python: 3.2 - env: NOSE_ARGS="not slow" FULL_DEPS=true + env: NOSE_ARGS="not slow" FULL_DEPS=true UPLOAD=true + - python: 3.3 + env: NOSE_ARGS="not slow" UPLOAD=true # allow importing from site-packages, # so apt-get python-x works for system pythons @@ -26,11 +35,14 @@ virtualenv: before_install: - echo "Waldo1" - echo $VIRTUAL_ENV + - df - date - - export PIP_ARGS=-q # comment this this to debug travis install issues - - export APT_ARGS=-qq # comment this to debug travis install issues + # - export PIP_ARGS=-q # comment this this to debug travis install issues + # - export APT_ARGS=-qq # comment this to debug travis install issues # - set -x # enable this to see bash commands - - source ci/before_install.sh # we need to source this to bring in the env + - export ZIP_FLAGS=-q # comment this to debug travis install issues + - source ci/envars.sh # we need to source this to bring in the envars + - ci/before_install.sh - python -V install: @@ -43,3 +55,4 @@ script: after_script: - ci/print_versions.py + - ci/after_script.sh diff --git a/ci/before_install.sh b/ci/before_install.sh index 9561c713d0f2e..677ddfa642f80 100755 --- a/ci/before_install.sh +++ b/ci/before_install.sh @@ -1,28 +1,36 @@ #!/bin/bash +# If envars.sh determined we're running in an authorized fork +# and the user opted in to the network cache,and that cached versions +# are available on the cache server, download and deploy the cached +# files to the local filesystem + echo "inside $0" # overview -if [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then - sudo add-apt-repository -y ppa:doko/ppa # we get the py3.3 debs from here -fi - sudo apt-get update $APT_ARGS # run apt-get update for all versions -# # hack for broken 3.3 env -# if [ x"$VIRTUAL_ENV" == x"" ]; then -# VIRTUAL_ENV=~/virtualenv/python$TRAVIS_PYTHON_VERSION_with_system_site_packages; -# fi +if $PLEASE_TRAVIS_FASTER ; then + echo "Faster? well... I'll try." + + if $CACHE_FILE_AVAILABLE ; then + echo retrieving "$CACHE_FILE_URL"; + + wget -q "$CACHE_FILE_URL" -O "/tmp/_$CYTHON_HASH.zip"; + unzip $ZIP_FLAGS /tmp/_"$CYTHON_HASH.zip" -d "$BUILD_CACHE_DIR"; + rm -f /tmp/_"$CYTHON_HASH.zip" + # copy cythonized c files over + cp -R "$BUILD_CACHE_DIR"/pandas/*.c pandas/ + cp -R "$BUILD_CACHE_DIR"/pandas/src/*.c pandas/src/ + fi; + echo "VENV_FILE_AVAILABLE=$VENV_FILE_AVAILABLE" + if $VENV_FILE_AVAILABLE ; then + echo "getting venv" + wget -q $VENV_FILE_URL -O "/tmp/venv.zip"; + sudo unzip $ZIP_FLAGS -o /tmp/venv.zip -d "/"; + sudo chown travis -R "$VIRTUAL_ENV" + rm -f /tmp/_"$CYTHON_HASH.zip" + fi; +fi -# # we only recreate the virtualenv for 3.x -# # since the "Detach bug" only affects python3 -# # and travis has numpy preinstalled on 2.x which is quicker -# _VENV=$VIRTUAL_ENV # save it -# if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ] ; then -# deactivate # pop out of any venv -# sudo pip install virtualenv==1.8.4 --upgrade -# sudo apt-get install $APT_ARGS python3.3 python3.3-dev -# sudo rm -Rf $_VENV -# virtualenv -p python$TRAVIS_PYTHON_VERSION $_VENV --system-site-packages; -# source $_VENV/bin/activate -# fi +true # never fail because bad things happened here diff --git a/ci/envars.sh b/ci/envars.sh new file mode 100755 index 0000000000000..14ac783af9631 --- /dev/null +++ b/ci/envars.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +# This must be sourced by .travis.yml, so any envars exported here will +# be available to the rest of the build stages + +# - computes a hash based on the cython files in the codebade +# - retrieves the decrypted key if any for all whitelisted forks +# - checks whether the user optd int to use the cache +# - if so, check for availablity of cache files on the server, based on hash +# - set envars to control what the following scripts do + +# at most one of these will decrypt, so the end result is that $STORE_KEY +# either holds a single key or does not +export STORE_KEY="$STORE_KEY0""$STORE_KEY1""$STORE_KEY2""$STORE_KEY3""$STORE_KEY4" +export STORE_KEY="$STORE_KEY""$STORE_KEY5""$STORE_KEY6""$STORE_KEY7" + +export CYTHON_HASH=$(find pandas | grep -P '\.(pyx|pxd)$' | sort \ + | while read N; do echo $(tail -n+1 $N | md5sum ) ;done | md5sum| cut -d ' ' -f 1) + +export CYTHON_HASH=$CYTHON_HASH-$TRAVIS_PYTHON_VERSION + +# where the cache files live on the server +export CACHE_FILE_URL="https://cache27-pypandas.rhcloud.com/static/$STORE_KEY/$CYTHON_HASH.zip" +export VENV_FILE_URL="https://cache27-pypandas.rhcloud.com/static/$STORE_KEY/venv-$TRAVIS_PYTHON_VERSION.zip" +export CACHE_FILE_STORE_URL="https://cache27-pypandas.rhcloud.com/store/$STORE_KEY" + +echo "Hashing:" +find pandas | grep -P '\.(pyx|pxd)$' +echo "Key: $CYTHON_HASH" + +export CACHE_FILE_AVAILABLE=false +export VENV_FILE_AVAILABLE=false +export PLEASE_TRAVIS_FASTER=false + +# check whether the user opted in to use the cache via commit message +if [ x"$(git log --format='%s' -n 1 | grep PLEASE_TRAVIS_FASTER | wc -l)" != x"0" ]; then + export PLEASE_TRAVIS_FASTER=true +fi; +if [ x"$(git log --format='%s' -n 1 | grep PTF | wc -l)" != x"0" ]; then + export PLEASE_TRAVIS_FASTER=true +fi; + +if $PLEASE_TRAVIS_FASTER; then + + # check whether the files exists on the server + curl -s -f -I "$CACHE_FILE_URL" # silent, don;t expose key + if [ x"$?" == x"0" ] ; then + export CACHE_FILE_AVAILABLE=true; + fi + + + curl -s -f -I "$VENV_FILE_URL" # silent, don;t expose key + if [ x"$?" == x"0" ] ; then + export VENV_FILE_AVAILABLE=true; + fi + + # the pandas build cache machinery needs this set, and the directory created + export BUILD_CACHE_DIR="/tmp/build_cache" + mkdir "$BUILD_CACHE_DIR" +fi; + +# debug +echo "PLEASE_TRAVIS_FASTER=$PLEASE_TRAVIS_FASTER" +echo "CACHE_FILE_AVAILABLE=$CACHE_FILE_AVAILABLE" +echo "VENV_FILE_AVAILABLE=$VENV_FILE_AVAILABLE" + +true diff --git a/ci/install.sh b/ci/install.sh index 7fe425e055589..0045a6c3c86f6 100755 --- a/ci/install.sh +++ b/ci/install.sh @@ -1,75 +1,128 @@ #!/bin/bash +# There are 2 distinct pieces that get zipped and cached +# - The venv site-packages dir including the installed dependencies +# - The pandas build artifacts, using the build cache support via +# scripts/use_build_cache.py +# +# if the user opted in to use the cache and we're on a whitelisted fork +# - if the server doesn't hold a cached version of venv/pandas build, +# do things the slow way, and put the results on the cache server +# for the next time. +# - if the cache files are available, instal some necessaries via apt +# (no compiling needed), then directly goto script and collect 200$. +# + echo "inside $0" -# Install Dependencies +# Install Dependencie +SITE_PKG_DIR=$VIRTUAL_ENV/lib/python$TRAVIS_PYTHON_VERSION/site-packages +echo "Using SITE_PKG_DIR: $SITE_PKG_DIR" # workaround for travis ignoring system_site_packages in travis.yml rm -f $VIRTUAL_ENV/lib/python$TRAVIS_PYTHON_VERSION/no-global-site-packages.txt -# Hard Deps -pip install $PIP_ARGS --use-mirrors cython nose python-dateutil pytz - -# try and get numpy as a binary deb +if [ x"$LOCALE_OVERRIDE" != x"" ]; then + # make sure the locale is available + # probably useless, since you would need to relogin + sudo locale-gen "$LOCALE_OVERRIDE" +fi; -# numpy is preinstalled on 2.x -# if [ ${TRAVIS_PYTHON_VERSION} == "2.7" ]; then -# sudo apt-get $APT_ARGS install python-numpy; -# fi +#scipy is not included in the cached venv +if [ x"$FULL_DEPS" == x"true" ] ; then + # for pytables gets the lib as well + sudo apt-get $APT_ARGS install libhdf5-serial-dev; -if [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then - sudo apt-get $APT_ARGS install python3-numpy; -elif [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then # should be >=3,3 - pip $PIP_ARGS install numpy==1.7.0; -else - pip $PIP_ARGS install numpy==1.6.1; + if [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then + sudo apt-get $APT_ARGS install python3-scipy + elif [ ${TRAVIS_PYTHON_VERSION} == "2.7" ]; then + sudo apt-get $APT_ARGS install python-scipy + fi fi -# Optional Deps -if [ x"$FULL_DEPS" == x"true" ]; then - echo "Installing FULL_DEPS" - if [ ${TRAVIS_PYTHON_VERSION} == "2.7" ]; then - sudo apt-get $APT_ARGS install python-scipy; - fi +# Everything installed inside this clause into site-packages +# will get included in the cached venv downloaded from the net +# in PTF mode +if ( ! $VENV_FILE_AVAILABLE ); then + echo "Running full monty" + # Hard Deps + pip install $PIP_ARGS nose python-dateutil pytz + pip install $PIP_ARGS cython - if [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then - sudo apt-get $APT_ARGS install python3-scipy; + if [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then # should be >=3,3 + pip install $PIP_ARGS numpy==1.7.0 + elif [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then + # sudo apt-get $APT_ARGS install python3-numpy; # 1.6.2 or precise + pip install $PIP_ARGS numpy==1.6.1 + else + pip install $PIP_ARGS numpy==1.6.1 fi - if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - sudo apt-get $APT_ARGS install libhdf5-serial-dev; - pip install numexpr - pip install tables + # Optional Deps + if [ x"$FULL_DEPS" == x"true" ]; then + echo "Installing FULL_DEPS" + pip install $PIP_ARGS cython + + if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then + # installed explicitly above, to get the library as well + # sudo apt-get $APT_ARGS install libhdf5-serial-dev; + pip install numexpr + pip install tables + pip install $PIP_ARGS xlwt + fi + + pip install $PIP_ARGS matplotlib + pip install $PIP_ARGS openpyxl + pip install $PIP_ARGS xlrd>=0.9.0 + pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r=' + pip install $PIP_ARGS patsy + + # fool statsmodels into thinking pandas was already installed + # so it won't refuse to install itself. We want it in the zipped venv + + mkdir $SITE_PKG_DIR/pandas + touch $SITE_PKG_DIR/pandas/__init__.py + echo "version='0.10.0-phony'" > $SITE_PKG_DIR/pandas/version.py + pip install $PIP_ARGS git+git://github.com/statsmodels/statsmodels@c9062e43b8a5f7385537ca95#egg=statsmodels + + rm -Rf $SITE_PKG_DIR/pandas # scrub phoney pandas fi - pip install $PIP_ARGS --use-mirrors openpyxl matplotlib; - pip install $PIP_ARGS --use-mirrors xlrd xlwt; - pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r=' -fi + # pack up the venv and cache it + if [ x"$STORE_KEY" != x"" ] && $UPLOAD; then + VENV_FNAME="venv-$TRAVIS_PYTHON_VERSION.zip" -if [ x"$VBENCH" == x"true" ]; then - if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - sudo apt-get $APT_ARGS install libhdf5-serial-dev; - pip install numexpr - pip install tables + zip $ZIP_FLAGS -r "$HOME/$VENV_FNAME" $SITE_PKG_DIR/ + ls -l "$HOME/$VENV_FNAME" + echo "posting venv" + # silent, don't expose key + curl -s --form upload=@"$HOME/$VENV_FNAME" "$CACHE_FILE_STORE_URL/$VENV_FNAME" fi - pip $PIP_ARGS install sqlalchemy git+git://github.com/pydata/vbench.git; -fi -#build and install pandas -python setup.py build_ext install - -#HACK: pandas is a statsmodels dependency -# so we need to install it after pandas -if [ x"$FULL_DEPS" == x"true" ]; then - pip install patsy - # pick recent 0.5dev dec/2012 - pip install git+git://github.com/statsmodels/statsmodels@c9062e43b8a5f7385537ca95#egg=statsmodels fi; -# make sure the desired locale is generated -if [ x"$LOCALE_OVERRIDE" != x"" ]; then - # piggyback this build for plotting tests. oh boy. - pip install $PIP_ARGS --use-mirrors matplotlib; +#build and install pandas +if [ x"$BUILD_CACHE_DIR" != x"" ]; then + scripts/use_build_cache.py -d + python setup.py install; +else + python setup.py build_ext install +fi - sudo locale-gen "$LOCALE_OVERRIDE" +# package pandas build artifacts and send them home +# that's everything the build cache (scripts/use_build_cache.py) +# stored during the build (.so, pyx->.c and 2to3) +if (! $CACHE_FILE_AVAILABLE) ; then + echo "Posting artifacts" + strip "$BUILD_CACHE_DIR/*" &> /dev/null + echo "$BUILD_CACHE_DIR" + cd "$BUILD_CACHE_DIR"/ + zip -r $ZIP_FLAGS "$HOME/$CYTHON_HASH".zip * + cd "$TRAVIS_BUILD_DIR" + pwd + zip "$HOME/$CYTHON_HASH".zip $(find pandas | grep -P '\.(pyx|pxd)$' | sed -r 's/.(pyx|pxd)$/.c/') + + # silent, don't expose key + curl -s --form upload=@"$HOME/$CYTHON_HASH".zip "$CACHE_FILE_STORE_URL/$CYTHON_HASH.zip" fi + +true diff --git a/scripts/use_build_cache.py b/scripts/use_build_cache.py index 60833affd9373..361ac59e5e852 100755 --- a/scripts/use_build_cache.py +++ b/scripts/use_build_cache.py @@ -15,21 +15,29 @@ Tested on releases back to 0.7.0. """ -import argparse -argparser = argparse.ArgumentParser(description=""" -'Program description. -""".strip()) -argparser.add_argument('-f', '--force-overwrite', +try: + import argparse + argparser = argparse.ArgumentParser(description=""" + 'Program description. + """.strip()) + + argparser.add_argument('-f', '--force-overwrite', default=False, help='Setting this will overwrite any existing cache results for the current commit', action='store_true') -argparser.add_argument('-d', '--debug', + argparser.add_argument('-d', '--debug', default=False, help='Report cache hits/misses', action='store_true') -args = argparser.parse_args() + args = argparser.parse_args() +except: + class Foo(object): + debug=False + force_overwrite=False + + args = Foo() # for 2.6, no argparse #print args.accumulate(args.integers) @@ -70,18 +78,28 @@ import shutil import multiprocessing pyver = "%d.%d" % (sys.version_info[:2]) - files = ["pandas"] + fileq = ["pandas"] to_process = dict() - orig_hashes= dict((f.split("-")[0],f) for f in os.listdir(BUILD_CACHE_DIR) - if "-" in f and f.endswith(pyver)) - post_hashes= dict((f.split("-")[1],f) for f in os.listdir(BUILD_CACHE_DIR) - if "-" in f and f.endswith(pyver)) - while files: - f = files.pop() + # retrieve the hashes existing in the cache + orig_hashes=dict() + post_hashes=dict() + for path,dirs,files in os.walk(os.path.join(BUILD_CACHE_DIR,'pandas')): + for f in files: + s=f.split(".py-")[-1] + try: + prev_h,post_h,ver = s.split('-') + if ver == pyver: + orig_hashes[prev_h] = os.path.join(path,f) + post_hashes[post_h] = os.path.join(path,f) + except: + pass + + while fileq: + f = fileq.pop() if os.path.isdir(f): - files.extend([os.path.join(f,x) for x in os.listdir(f)]) + fileq.extend([os.path.join(f,x) for x in os.listdir(f)]) else: if not f.endswith(".py"): continue @@ -90,40 +108,54 @@ h = sha1(open(f,"rb").read()).hexdigest() except IOError: to_process[h] = f - if h in orig_hashes and not BC_FORCE_OVERWRITE: - src = os.path.join(BUILD_CACHE_DIR,orig_hashes[h]) - if BC_DEBUG: - print("2to3 cache hit %s,%s" % (f,h)) - shutil.copyfile(src,f) - elif h not in post_hashes: - - # we're not in a dev dir with already processed files - if BC_DEBUG: - print("2to3 cache miss %s,%s" % (f,h)) - print("2to3 will process " + f) - to_process[h] = f + else: + if h in orig_hashes and not BC_FORCE_OVERWRITE: + src = orig_hashes[h] + if BC_DEBUG: + print("2to3 cache hit %s,%s" % (f,h)) + shutil.copyfile(src,f) + elif h not in post_hashes: + # we're not in a dev dir with already processed files + if BC_DEBUG: + print("2to3 cache miss (will process) %s,%s" % (f,h)) + to_process[h] = f avail_fixes = set(refactor.get_fixers_from_package("lib2to3.fixes")) avail_fixes.discard('lib2to3.fixes.fix_next') t=refactor.RefactoringTool(avail_fixes) - print("Starting 2to3 refactoring...") - for f in to_process.values(): - if BC_DEBUG: - print("2to3 on %s" % f) - try: - t.refactor([f],True) - post_h = sha1(open(f, "rb").read()).hexdigest() - cached_fname = f + "-" + post_h + "-" + pyver + if to_process: + print("Starting 2to3 refactoring...") + for orig_h,f in to_process.items(): if BC_DEBUG: - print("cache put %s,%s in %s" % (f, h, cached_fname)) - shutil.copyfile(f, os.path.join(BUILD_CACHE_DIR, cached_fname)) + print("2to3 on %s" % f) + try: + t.refactor([f],True) + post_h = sha1(open(f, "rb").read()).hexdigest() + cached_fname = f + '-' + orig_h + '-' + post_h + '-' + pyver + path = os.path.join(BUILD_CACHE_DIR, cached_fname) + pathdir =os.path.dirname(path) + if BC_DEBUG: + print("cache put %s in %s" % (f, path)) + try: + os.makedirs(pathdir) + except OSError as exc: + import errno + if exc.errno == errno.EEXIST and os.path.isdir(pathdir): + pass + else: + raise - except: - pass - print("2to3 done refactoring.") + shutil.copyfile(f, path) + + except Exception as e: + print("While processing %s 2to3 raised: %s" % (f,str(e))) + + pass + print("2to3 done refactoring.") except Exception as e: - print( "Exception: " + str(e)) + if not isinstance(e,ZeroDivisionError): + print( "Exception: " + str(e)) BUILD_CACHE_DIR = None class CompilationCacheMixin(object):
https://api.github.com/repos/pandas-dev/pandas/pulls/3383
2013-04-17T14:39:53Z
2013-04-17T14:40:10Z
2013-04-17T14:40:10Z
2014-08-11T06:22:53Z
ENH/CLN: make Timestamp repr valid python code, like datetime does.
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index f34a237b55dd4..10a5e039b9fc6 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1298,6 +1298,14 @@ def test_to_timestamp_preserve_name(self): conv = index.to_timestamp('D') self.assertEquals(conv.name, 'foo') + def test_to_timestamp_repr_is_code(self): + zs=[Timestamp('99-04-17 00:00:00',tz='UTC'), + Timestamp('2001-04-17 00:00:00',tz='UTC'), + Timestamp('2001-04-17 00:00:00',tz='America/Los_Angeles'), + Timestamp('2001-04-17 00:00:00',tz=None)] + for z in zs: + self.assertEquals( eval(repr(z)), z) + def test_as_frame_columns(self): rng = period_range('1/1/2000', periods=5) df = DataFrame(randn(10, 5), columns=rng) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 4d15ec8c8ace9..f9c1b2329c16d 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -164,20 +164,25 @@ class Timestamp(_Timestamp): def __repr__(self): result = self._repr_base + zone = None try: result += self.strftime('%z') if self.tzinfo: zone = _get_zone(self.tzinfo) - result += _tz_format(self, zone) except ValueError: year2000 = self.replace(year=2000) result += year2000.strftime('%z') if self.tzinfo: zone = _get_zone(self.tzinfo) - result += _tz_format(year2000, zone) - return '<Timestamp: %s>' % result + try: + result += zone.strftime(' %%Z') + except: + pass + zone = "'%s'" % zone if zone else 'None' + + return "Timestamp('%s', tz=%s)" % (result,zone) @property def _repr_base(self):
In the same vein as #3038 , make `repr()` output valid python code where feasible. I find this not being the case inconvenient for example when issues contain code examples, and the data needs to be recreated by hand rather then merely copy pasted. ``` In [2]: zs=[Timestamp('99-04-17 00:00:00',tz='UTC'), ...: Timestamp('2001-04-17 00:00:00',tz='UTC'), ...: Timestamp('2001-04-17 00:00:00',tz='America/Los_Angeles'), ...: Timestamp('2001-04-17 00:00:00',tz=None)] ...: for z in zs: ...: print repr(z) ...: assert eval(repr(z)) == z Timestamp('1999-04-17 00:00:00+0000', tz='UTC') Timestamp('2001-04-17 00:00:00+0000', tz='UTC') Timestamp('2001-04-17 00:00:00-0700', tz='America/Los_Angeles') Timestamp('2001-04-17 00:00:00', tz=None) ``` Misgivings?
https://api.github.com/repos/pandas-dev/pandas/pulls/3379
2013-04-17T01:02:05Z
2013-04-23T02:13:36Z
2013-04-23T02:13:36Z
2014-06-19T05:28:32Z
multiindex xs slicing bug fix
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5ffd81ec80157..2a1ee06984aa8 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2226,8 +2226,9 @@ def xs(self, key, axis=0, level=None, copy=True): raise ValueError('Cannot retrieve view (copy=False)') # level = 0 - if not isinstance(loc, slice): - indexer = [slice(None, None)] * 2 + loc_is_slice = isinstance(loc, slice) + if not loc_is_slice: + indexer = [slice(None)] * 2 indexer[axis] = loc indexer = tuple(indexer) else: @@ -2237,10 +2238,9 @@ def xs(self, key, axis=0, level=None, copy=True): indexer = self.index[loc] # select on the correct axis - if axis == 1: - result = self.ix[:, indexer] - else: - result = self.ix[indexer] + if axis == 1 and loc_is_slice: + indexer = slice(None), indexer + result = self.ix[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index c3d6faf6e71b7..bc717a0fbf6d1 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -724,6 +724,11 @@ def test_xs_multiindex(self): expected = df.iloc[:,0:2].loc[:,'a'] assert_frame_equal(result,expected) + result = df.xs('foo', level='lvl1', axis=1) + expected = df.iloc[:, 1:2].copy() + expected.columns = expected.columns.droplevel('lvl1') + assert_frame_equal(result, expected) + def test_setitem_dtype_upcast(self): # GH3216
#2903 solves the case for the first level but using the same variables from that issue, the following code raises a `TypeError` for the second level: ``` python import pandas as pd from numpy.random import rand columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'hello'), ('b', 'world')], names=['lvl0', 'lvl1']) df = pd.DataFrame(rand(4, 4), columns=columns) df.xs('foo', level='lvl1', axis=1) ``` This pull request fixes that.
https://api.github.com/repos/pandas-dev/pandas/pulls/3378
2013-04-17T00:13:10Z
2013-04-17T22:40:58Z
2013-04-17T22:40:58Z
2014-06-30T05:21:11Z
DOC: Moved note about cursor position in TS plots out of changes to pd.data.io.Options in v0.11.0
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt index 834b23c92d3b5..2024b9a6598e4 100644 --- a/doc/source/v0.11.0.txt +++ b/doc/source/v0.11.0.txt @@ -309,7 +309,8 @@ Enhancements only return forward looking data for options near the current stock price. This just obtains the data from Options.get_near_stock_price instead of Options.get_xxx_data() (GH2758_). - + Cursor coordinate information is now displayed in time-series plots. + + - Cursor coordinate information is now displayed in time-series plots. - added option `display.max_seq_items` to control the number of elements printed per sequence pprinting it. (GH2979_)
Very simple formatting change. Note about cursor position in time series plots should not have been a part of changes to Options class in data.io.
https://api.github.com/repos/pandas-dev/pandas/pulls/3365
2013-04-15T15:29:33Z
2013-04-15T15:39:33Z
2013-04-15T15:39:33Z
2014-06-16T14:25:07Z
CLN: remove some unicode hair from pre-pprint_thing days GH3360
diff --git a/pandas/core/format.py b/pandas/core/format.py index 0f0029167ce64..dc79a1cbc762a 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -139,14 +139,9 @@ def to_string(self): maxlen = max(len(x) for x in fmt_index) pad_space = min(maxlen, 60) - _encode_diff = _encode_diff_func() - result = ['%s %s'] * len(fmt_values) for i, (k, v) in enumerate(izip(fmt_index[1:], fmt_values)): - try: - idx = k.ljust(pad_space + _encode_diff(k)) - except UnicodeEncodeError: - idx = k.ljust(pad_space) + idx = k.ljust(pad_space) result[i] = result[i] % (idx, v) if self.header and have_header: @@ -158,21 +153,6 @@ def to_string(self): return unicode(u'\n'.join(result)) - -def _encode_diff_func(): - if py3compat.PY3: # pragma: no cover - _encode_diff = lambda x: 0 - else: - encoding = get_option("display.encoding") - - def _encode_diff(x): - if not isinstance(x,unicode): - return len(x) - len(x.decode(encoding)) - return 0 - - return _encode_diff - - def _strlen_func(): if py3compat.PY3: # pragma: no cover _strlen = len @@ -1490,7 +1470,6 @@ def _make_fixed_width(strings, justify='right', minimum=None): return strings _strlen = _strlen_func() - _encode_diff = _encode_diff_func() max_len = np.max([_strlen(x) for x in strings]) @@ -1507,10 +1486,7 @@ def _make_fixed_width(strings, justify='right', minimum=None): justfunc = lambda self, x: self.rjust(x) def just(x): - try: - eff_len = max_len + _encode_diff(x) - except UnicodeError: - eff_len = max_len + eff_len = max_len if conf_max is not None: if (conf_max > 3) & (_strlen(x) > max_len):
breaks no tests, and the code it removes resolved an issue which now works even without it. even so, no sudden moves a couple of days before a major release, so 0.12.
https://api.github.com/repos/pandas-dev/pandas/pulls/3364
2013-04-15T15:20:36Z
2013-04-23T02:09:41Z
2013-04-23T02:09:41Z
2014-07-16T08:05:55Z
PERF: get_numeric_data now a bit faster
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ab7d23acf183e..bc39d4e4ff5d4 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4930,25 +4930,10 @@ def _get_agg_axis(self, axis_num): raise Exception('Must have 0<= axis <= 1') def _get_numeric_data(self): - if self._is_mixed_type: - num_data = self._data.get_numeric_data() - return DataFrame(num_data, index=self.index, copy=False) - else: - if (self.values.dtype != np.object_ and - not issubclass(self.values.dtype.type, np.datetime64)): - return self - else: - return self.ix[:, []] + return self._constructor(self._data.get_numeric_data(), index=self.index, copy=False) def _get_bool_data(self): - if self._is_mixed_type: - bool_data = self._data.get_bool_data() - return DataFrame(bool_data, index=self.index, copy=False) - else: # pragma: no cover - if self.values.dtype == np.bool_: - return self - else: - return self.ix[:, []] + return self._constructor(self._data.get_bool_data(), index=self.index, copy=False) def quantile(self, q=0.5, axis=0, numeric_only=True): """ diff --git a/pandas/core/internals.py b/pandas/core/internals.py index b44ef5d465bb9..94029e3212057 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -30,9 +30,7 @@ class Block(object): _can_hold_na = False _downcast_dtype = None - def __init__(self, values, items, ref_items, ndim=2): - if issubclass(values.dtype.type, basestring): - values = np.array(values, dtype=object) + def __init__(self, values, items, ref_items, ndim=2, fastpath=False): if values.ndim != ndim: raise ValueError('Wrong number of dimensions') @@ -44,8 +42,13 @@ def __init__(self, values, items, ref_items, ndim=2): self._ref_locs = None self.values = values self.ndim = ndim - self.items = _ensure_index(items) - self.ref_items = _ensure_index(ref_items) + + if fastpath: + self.items = items + self.ref_items = ref_items + else: + self.items = _ensure_index(items) + self.ref_items = _ensure_index(ref_items) def _gi(self, arg): return self.values[arg] @@ -114,7 +117,7 @@ def copy(self, deep=True): values = self.values if deep: values = values.copy() - return make_block(values, self.items, self.ref_items) + return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True) def merge(self, other): if not self.ref_items.equals(other.ref_items): @@ -133,7 +136,7 @@ def reindex_axis(self, indexer, axis=1, fill_value=np.nan, mask_info=None): raise AssertionError('axis must be at least 1, got %d' % axis) new_values = com.take_nd(self.values, indexer, axis, fill_value=fill_value, mask_info=mask_info) - return make_block(new_values, self.items, self.ref_items) + return make_block(new_values, self.items, self.ref_items, fastpath=True) def reindex_items_from(self, new_ref_items, copy=True): """ @@ -155,7 +158,7 @@ def reindex_items_from(self, new_ref_items, copy=True): new_values = com.take_nd(self.values, masked_idx, axis=0, allow_fill=False) new_items = self.items.take(masked_idx) - return make_block(new_values, new_items, new_ref_items) + return make_block(new_values, new_items, new_ref_items, fastpath=True) def get(self, item): loc = self.items.get_loc(item) @@ -181,7 +184,7 @@ def delete(self, item): loc = self.items.get_loc(item) new_items = self.items.delete(loc) new_values = np.delete(self.values, loc, 0) - return make_block(new_values, new_items, self.ref_items) + return make_block(new_values, new_items, self.ref_items, klass=self.__class__, fastpath=True) def split_block_at(self, item): """ @@ -204,7 +207,9 @@ def split_block_at(self, item): for s, e in com.split_ranges(mask): yield make_block(self.values[s:e], self.items[s:e].copy(), - self.ref_items) + self.ref_items, + klass=self.__class__, + fastpath=True) def fillna(self, value, inplace=False, downcast=None): if not self._can_hold_na: @@ -217,7 +222,7 @@ def fillna(self, value, inplace=False, downcast=None): mask = com.isnull(new_values) np.putmask(new_values, mask, value) - block = make_block(new_values, self.items, self.ref_items) + block = make_block(new_values, self.items, self.ref_items, fastpath=True) if downcast: block = block.downcast() return block @@ -251,7 +256,7 @@ def astype(self, dtype, copy = True, raise_on_error = True): """ try: newb = make_block(com._astype_nansafe(self.values, dtype, copy = copy), - self.items, self.ref_items) + self.items, self.ref_items, fastpath=True) except: if raise_on_error is True: raise @@ -365,14 +370,14 @@ def putmask(self, mask, new, inplace=False): nv = new_values[i] if inplace else new_values[i].copy() nv = _block_shape(nv) - new_blocks.append(make_block(nv, [ item ], self.ref_items)) + new_blocks.append(make_block(nv, Index([ item ]), self.ref_items, fastpath=True)) return new_blocks if inplace: return [ self ] - return [ make_block(new_values, self.items, self.ref_items) ] + return [ make_block(new_values, self.items, self.ref_items, fastpath=True) ] def interpolate(self, method='pad', axis=0, inplace=False, limit=None, missing=None, coerce=False): @@ -403,14 +408,14 @@ def interpolate(self, method='pad', axis=0, inplace=False, else: com.backfill_2d(transf(values), limit=limit, mask=mask) - return make_block(values, self.items, self.ref_items) + return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True) - def take(self, indexer, axis=1): + def take(self, indexer, ref_items, axis=1): if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) new_values = com.take_nd(self.values, indexer, axis=axis, allow_fill=False) - return make_block(new_values, self.items, self.ref_items) + return make_block(new_values, self.items, ref_items, klass=self.__class__, fastpath=True) def get_values(self, dtype): return self.values @@ -418,7 +423,7 @@ def get_values(self, dtype): def diff(self, n): """ return block for the diff of the values """ new_values = com.diff(self.values, n, axis=1) - return make_block(new_values, self.items, self.ref_items) + return make_block(new_values, self.items, self.ref_items, fastpath=True) def shift(self, indexer, periods): """ shift the block by periods, possibly upcast """ @@ -431,7 +436,7 @@ def shift(self, indexer, periods): new_values[:, :periods] = fill_value else: new_values[:, periods:] = fill_value - return make_block(new_values, self.items, self.ref_items) + return make_block(new_values, self.items, self.ref_items, fastpath=True) def eval(self, func, other, raise_on_error = True, try_cast = False): """ @@ -486,7 +491,7 @@ def eval(self, func, other, raise_on_error = True, try_cast = False): if try_cast: result = self._try_cast_result(result) - return make_block(result, self.items, self.ref_items) + return make_block(result, self.items, self.ref_items, fastpath=True) def where(self, other, cond, raise_on_error = True, try_cast = False): """ @@ -551,7 +556,7 @@ def func(c,v,o): result.fill(np.nan) return result - def create_block(result, items, transpose = True): + def create_block(result, items, transpose=True): if not isinstance(result, np.ndarray): raise TypeError('Could not compare [%s] with block values' % repr(other)) @@ -581,7 +586,7 @@ def create_block(result, items, transpose = True): result = np.repeat(result,self.shape[1:]) result = _block_shape(result,ndim=self.ndim,shape=self.shape[1:]) - result_blocks.append(create_block(result, item, transpose = False)) + result_blocks.append(create_block(result, item, transpose=False)) return result_blocks else: @@ -683,6 +688,12 @@ class ObjectBlock(Block): is_object = True _can_hold_na = True + def __init__(self, values, items, ref_items, ndim=2, fastpath=False): + if issubclass(values.dtype.type, basestring): + values = np.array(values, dtype=object) + + super(ObjectBlock, self).__init__(values, items, ref_items, ndim=ndim, fastpath=fastpath) + @property def is_bool(self): """ we can be a bool if we have only bool values but are of type object """ @@ -704,7 +715,7 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True): values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric) values = _block_shape(values) items = self.items.take([i]) - newb = make_block(values, items, self.ref_items) + newb = make_block(values, items, self.ref_items, fastpath=True) blocks.append(newb) return blocks @@ -727,11 +738,11 @@ def should_store(self, value): class DatetimeBlock(Block): _can_hold_na = True - def __init__(self, values, items, ref_items, ndim=2): + def __init__(self, values, items, ref_items, ndim=2, fastpath=True): if values.dtype != _NS_DTYPE: values = tslib.cast_to_nanoseconds(values) - Block.__init__(self, values, items, ref_items, ndim=ndim) + super(DatetimeBlock, self).__init__(values, items, ref_items, ndim=ndim, fastpath=fastpath) def _gi(self, arg): return lib.Timestamp(self.values[arg]) @@ -813,40 +824,41 @@ def get_values(self, dtype): return self.values -def make_block(values, items, ref_items): - dtype = values.dtype - vtype = dtype.type - klass = None - - if issubclass(vtype, np.floating): - klass = FloatBlock - elif issubclass(vtype, np.complexfloating): - klass = ComplexBlock - elif issubclass(vtype, np.datetime64): - klass = DatetimeBlock - elif issubclass(vtype, np.integer): - klass = IntBlock - elif dtype == np.bool_: - klass = BoolBlock - - # try to infer a datetimeblock - if klass is None and np.prod(values.shape): - flat = values.ravel() - inferred_type = lib.infer_dtype(flat) - if inferred_type == 'datetime': - - # we have an object array that has been inferred as datetime, so - # convert it - try: - values = tslib.array_to_datetime(flat).reshape(values.shape) - klass = DatetimeBlock - except: # it already object, so leave it - pass +def make_block(values, items, ref_items, klass = None, fastpath=False): if klass is None: - klass = ObjectBlock - - return klass(values, items, ref_items, ndim=values.ndim) + dtype = values.dtype + vtype = dtype.type + + if issubclass(vtype, np.floating): + klass = FloatBlock + elif issubclass(vtype, np.complexfloating): + klass = ComplexBlock + elif issubclass(vtype, np.datetime64): + klass = DatetimeBlock + elif issubclass(vtype, np.integer): + klass = IntBlock + elif dtype == np.bool_: + klass = BoolBlock + + # try to infer a datetimeblock + if klass is None and np.prod(values.shape): + flat = values.ravel() + inferred_type = lib.infer_dtype(flat) + if inferred_type == 'datetime': + + # we have an object array that has been inferred as datetime, so + # convert it + try: + values = tslib.array_to_datetime(flat).reshape(values.shape) + klass = DatetimeBlock + except: # it already object, so leave it + pass + + if klass is None: + klass = ObjectBlock + + return klass(values, items, ref_items, ndim=values.ndim, fastpath=fastpath) # TODO: flexible with index=None and/or items=None @@ -1168,8 +1180,11 @@ def get_slice(self, slobj, axis=0, raise_on_error=False): new_items = new_axes[0] if len(self.blocks) == 1: blk = self.blocks[0] - newb = make_block(blk.values[slobj], new_items, - new_items) + newb = make_block(blk.values[slobj], + new_items, + new_items, + klass=blk.__class__, + fastpath=True) new_blocks = [newb] else: return self.reindex_items(new_items) @@ -1186,8 +1201,11 @@ def _slice_blocks(self, slobj, axis): slicer = tuple(slicer) for block in self.blocks: - newb = make_block(block.values[slicer], block.items, - block.ref_items) + newb = make_block(block.values[slicer], + block.items, + block.ref_items, + klass=block.__class__, + fastpath=True) new_blocks.append(newb) return new_blocks @@ -1296,13 +1314,22 @@ def xs(self, key, axis=1, copy=True): raise Exception('cannot get view of mixed-type or ' 'non-consolidated DataFrame') for blk in self.blocks: - newb = make_block(blk.values[slicer], blk.items, blk.ref_items) + newb = make_block(blk.values[slicer], + blk.items, + blk.ref_items, + klass=blk.__class__, + fastpath=True) new_blocks.append(newb) elif len(self.blocks) == 1: - vals = self.blocks[0].values[slicer] + block = self.blocks[0] + vals = block.values[slicer] if copy: vals = vals.copy() - new_blocks = [make_block(vals, self.items, self.items)] + new_blocks = [make_block(vals, + self.items, + self.items, + klass=block.__class__, + fastpath=True)] return BlockManager(new_blocks, new_axes) @@ -1491,7 +1518,7 @@ def _add_new_block(self, item, value, loc=None): if loc is None: loc = self.items.get_loc(item) new_block = make_block(value, self.items[loc:loc + 1].copy(), - self.items) + self.items, fastpath=True) self.blocks.append(new_block) def _find_block(self, item): @@ -1569,7 +1596,7 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value): new_values = com.take_nd(blk.values, blk_indexer[selector], axis=0, allow_fill=False) new_blocks.append(make_block(new_values, new_block_items, - new_items)) + new_items, fastpath=True)) if not mask.all(): na_items = new_items[-mask] @@ -1593,7 +1620,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan): # TODO: this part could be faster (!) new_items, indexer = self.items.reindex(new_items) - # could have some pathological (MultiIndex) issues here + # could have so me pathological (MultiIndex) issues here new_blocks = [] if indexer is None: for blk in self.blocks: @@ -1630,7 +1657,7 @@ def _make_na_block(self, items, ref_items, fill_value=np.nan): na_block = make_block(block_values, items, ref_items) return na_block - def take(self, indexer, axis=1, verify=True): + def take(self, indexer, new_index=None, axis=1, verify=True): if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) @@ -1645,15 +1672,11 @@ def take(self, indexer, axis=1, verify=True): 'the axis length') new_axes = list(self.axes) - new_axes[axis] = self.axes[axis].take(indexer) - new_blocks = [] - for blk in self.blocks: - new_values = com.take_nd(blk.values, indexer, axis=axis, - allow_fill=False) - newb = make_block(new_values, blk.items, self.items) - new_blocks.append(newb) + if new_index is None: + new_index = self.axes[axis].take(indexer) - return BlockManager(new_blocks, new_axes) + new_axes[axis] = new_index + return self.apply('take',axes=new_axes,indexer=indexer,ref_items=new_axes[0],axis=axis) def merge(self, other, lsuffix=None, rsuffix=None): if not self._is_indexed_like(other): diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index 383b98bfc440d..82719817b5744 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -46,8 +46,14 @@ def axes(self): @property def blocks(self): """ return our series in the column order """ - s = self.sp_frame._series - return [ self.iget(i) for i in self.sp_frame.columns ] + return [ self.iget(i) for i, c in enumerate(self.sp_frame.columns) ] + + def get_numeric_data(self): + # does not check, but assuming all numeric for now + return self.sp_frame + + def get_bool_data(self): + raise NotImplementedError class SparseDataFrame(DataFrame): """ @@ -125,10 +131,13 @@ def convert_objects(self, convert_dates=True): @property def _constructor(self): - def wrapper(data, index=None, columns=None): - return SparseDataFrame(data, index=index, columns=columns, - default_fill_value=self.default_fill_value, - default_kind=self.default_kind) + def wrapper(data, index=None, columns=None, copy=False): + sf = SparseDataFrame(data, index=index, columns=columns, + default_fill_value=self.default_fill_value, + default_kind=self.default_kind) + if copy: + sf = sf.copy() + return sf return wrapper def _init_dict(self, data, index, columns, dtype=None): diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index b20303efe222f..57f861aff8bfc 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -282,13 +282,7 @@ def _take_new_index(obj, indexer, new_index, axis=0): elif isinstance(obj, DataFrame): if axis == 1: raise NotImplementedError - data = obj._data - - new_blocks = [b.take(indexer, axis=1) for b in data.blocks] - new_axes = list(data.axes) - new_axes[1] = new_index - new_data = BlockManager(new_blocks, new_axes) - return DataFrame(new_data) + return DataFrame(obj._data.take(indexer,new_index=new_index,axis=1)) else: raise NotImplementedError
CLN: cleaned up take in tseries/resample and core/internals to use interenals take ``` (which wasn't using the block level take) ``` This is not a big diff on get_numeric_data, about 5us on avg when I test it, but its non-zero! (and there is some other code cleanups) The other tests might be noise, not sure. ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- join_dataframe_index_single_key_bigger | 5.0523 | 5.8804 | 0.8592 | frame_drop_dup_na_inplace | 2.3123 | 2.4796 | 0.9325 | frame_get_numeric_data | 0.0637 | 0.0664 | 0.9593 | Target [ff6501e] : PERF: get_numeric_data now a bit faster Base [c45e769] : Merge pull request #3357 from jreback/hdf_fix ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3359
2013-04-14T16:18:01Z
2013-04-14T16:31:55Z
2013-04-14T16:31:55Z
2014-06-16T23:22:22Z
BLD: Add 2to3 caching, move the build cache logic into use_build_cache.py
diff --git a/scripts/use_build_cache.py b/scripts/use_build_cache.py new file mode 100755 index 0000000000000..0b43e897df9d4 --- /dev/null +++ b/scripts/use_build_cache.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os + +""" +This script should be run from the repo root dir, it rewrites setup.py +to use the build cache directory specified in the envar BUILD_CACHE_DIR +or in a file named .build_cache_dir in the repo root directory. + +Artifacts included in the cache: +- gcc artifacts +- The .c files resulting from cythonizing pyx/d files +- 2to3 refactoring results (when run under python3) + +Tested on all released back to 0.7.0. + +""" +shim=""" +import os +import sys +import shutil +import warnings + +try: + if not ("develop" in sys.argv) and not ("install" in sys.argv): + 1/0 + basedir = os.path.dirname(__file__) + dotfile = os.path.join(basedir,".build_cache_dir") + BUILD_CACHE_DIR = "" + if os.path.exists(dotfile): + BUILD_CACHE_DIR = open(dotfile).readline().strip() + BUILD_CACHE_DIR = os.environ.get('BUILD_CACHE_DIR',BUILD_CACHE_DIR) + + if os.path.isdir(BUILD_CACHE_DIR): + print("--------------------------------------------------------") + print("BUILD CACHE ACTIVATED (V2). be careful, this is experimental.") + print("--------------------------------------------------------") + else: + BUILD_CACHE_DIR = None + + # retrieve 2to3 artifacts + if sys.version_info[0] >= 3: + from lib2to3 import refactor + from hashlib import sha1 + import shutil + import multiprocessing + pyver = "%d.%d" % (sys.version_info[:2]) + files = ["pandas"] + to_process = dict() + orig_hashes= dict((f.split("-")[0],f) for f in os.listdir(BUILD_CACHE_DIR) + if "-" in f and f.endswith(pyver)) + post_hashes= dict((f.split("-")[1],f) for f in os.listdir(BUILD_CACHE_DIR) + if "-" in f and f.endswith(pyver)) + + while files: + f = files.pop() + + if os.path.isdir(f): + files.extend([os.path.join(f,x) for x in os.listdir(f)]) + else: + if not f.endswith(".py"): + continue + else: + try: + h = sha1(open(f,"rb").read()).hexdigest() + except IOError: + to_process[h] = f + if h in orig_hashes: + src = os.path.join(BUILD_CACHE_DIR,orig_hashes[h]) + # print("cache hit %s,%s" % (f,h)) + shutil.copyfile(src,f) + elif h not in post_hashes: + + # we're not in a dev dir with already processed files + # print("cache miss %s,%s" % (f,h)) + # print("will process " + f) + to_process[h] = f + + avail_fixes = set(refactor.get_fixers_from_package("lib2to3.fixes")) + avail_fixes.discard('lib2to3.fixes.fix_next') + t=refactor.RefactoringTool(avail_fixes) + t.refactor(to_process.values(),True) + print("2to3 done refactoring.") + for orig_h in to_process: + f = to_process[orig_h] + post_h = sha1(open(f,"rb").read()).hexdigest() + cached_fname = orig_h + "-" + post_h + "-" + pyver + # print("cache put %s,%s in %s" % (f,h,cached_fname)) + shutil.copyfile(f,os.path.join(BUILD_CACHE_DIR,cached_fname)) + +except: + BUILD_CACHE_DIR = None + +print("BUILD_CACHE_DIR: " + str(BUILD_CACHE_DIR) ) + +class CompilationCacheMixin(object): + def __init__(self, *args, **kwds): + cache_dir = kwds.pop("cache_dir", BUILD_CACHE_DIR) + self.cache_dir = cache_dir + if not os.path.isdir(cache_dir): + raise Exception("Error: path to Cache directory (%s) is not a dir" % cache_dir) + + def _copy_from_cache(self, hash, target): + src = os.path.join(self.cache_dir, hash) + if os.path.exists(src): + # print("Cache HIT: asked to copy file %s in %s" % + # (src,os.path.abspath(target))) + s = "." + for d in target.split(os.path.sep)[:-1]: + s = os.path.join(s, d) + if not os.path.exists(s): + os.mkdir(s) + shutil.copyfile(src, target) + + return True + + return False + + def _put_to_cache(self, hash, src): + target = os.path.join(self.cache_dir, hash) + # print( "Cache miss: asked to copy file from %s to %s" % (src,target)) + s = "." + for d in target.split(os.path.sep)[:-1]: + s = os.path.join(s, d) + if not os.path.exists(s): + os.mkdir(s) + shutil.copyfile(src, target) + + def _hash_obj(self, obj): + try: + return hash(obj) + except: + raise NotImplementedError("You must override this method") + +class CompilationCacheExtMixin(CompilationCacheMixin): + def _hash_file(self, fname): + from hashlib import sha1 + f= None + try: + hash = sha1() + hash.update(self.build_lib.encode('utf-8')) + try: + if sys.version_info[0] >= 3: + import io + f = io.open(fname, "rb") + else: + f = open(fname) + + first_line = f.readline() + # ignore cython generation timestamp header + if "Generated by Cython" not in first_line.decode('utf-8'): + hash.update(first_line) + hash.update(f.read()) + return hash.hexdigest() + + except: + raise + return None + finally: + if f: + f.close() + + except IOError: + return None + + def _hash_obj(self, ext): + from hashlib import sha1 + + sources = ext.sources + if (sources is None or + (not hasattr(sources, '__iter__')) or + isinstance(sources, str) or + sys.version[0] == 2 and isinstance(sources, unicode)): # argh + return False + + sources = list(sources) + ext.depends + hash = sha1() + try: + for fname in sources: + fhash = self._hash_file(fname) + if fhash: + hash.update(fhash.encode('utf-8')) + except: + return None + + return hash.hexdigest() + + +class CachingBuildExt(build_ext, CompilationCacheExtMixin): + def __init__(self, *args, **kwds): + CompilationCacheExtMixin.__init__(self, *args, **kwds) + kwds.pop("cache_dir", None) + build_ext.__init__(self, *args, **kwds) + + def build_extension(self, ext, *args, **kwds): + ext_path = self.get_ext_fullpath(ext.name) + build_path = os.path.join(self.build_lib, os.path.basename(ext_path)) + + hash = self._hash_obj(ext) + if hash and self._copy_from_cache(hash, ext_path): + return + + build_ext.build_extension(self, ext, *args, **kwds) + + hash = self._hash_obj(ext) + if os.path.exists(build_path): + self._put_to_cache(hash, build_path) # build_ext + if os.path.exists(ext_path): + self._put_to_cache(hash, ext_path) # develop + + def cython_sources(self, sources, extension): + import re + cplus = self.cython_cplus or getattr(extension, 'cython_cplus', 0) or \ + (extension.language and extension.language.lower() == 'c++') + target_ext = '.c' + if cplus: + target_ext = '.cpp' + + for i, s in enumerate(sources): + if not re.search("\.(pyx|pxi|pxd)$", s): + continue + ext_dir = os.path.dirname(s) + ext_basename = re.sub("\.[^\.]+$", "", os.path.basename(s)) + ext_basename += target_ext + target = os.path.join(ext_dir, ext_basename) + hash = self._hash_file(s) + sources[i] = target + if hash and self._copy_from_cache(hash, target): + continue + build_ext.cython_sources(self, [s], extension) + self._put_to_cache(hash, target) + + sources = [x for x in sources if x.startswith("pandas") or "lib." in x] + + return sources + +if BUILD_CACHE_DIR: # use the cache + cmdclass['build_ext'] = CachingBuildExt + +try: + # recent + setuptools_kwargs['use_2to3'] = True if BUILD_CACHE_DIR is None else False +except: + pass + +try: + # pre eb2234231 , ~ 0.7.0, + setuptools_args['use_2to3'] = True if BUILD_CACHE_DIR is None else False +except: + pass + +""" +def main(): + opd = os.path.dirname + opj = os.path.join + s= None + with open(opj(opd(__file__),"..","setup.py")) as f: + s = f.read() + if s: + if "BUILD CACHE ACTIVATED (V2)" in s: + print( "setup.py already wired with V2 build_cache, skipping..") + else: + SEP="\nsetup(" + before,after = s.split(SEP) + with open(opj(opd(__file__),"..","setup.py"),"wb") as f: + f.write(before + shim + SEP + after) + print(""" + setup.py was rewritten to use a build cache. + Make sure you've put the following in your .bashrc: + + export BUILD_CACHE_DIR=<an existing directory for saving cached files> + echo $BUILD_CACHE_DIR > pandas_repo_rootdir/.build_cache_dir + + Once active, build results (compilation, cythonizations and 2to3 artifacts) + will be cached in "$BUILD_CACHE_DIR" and subsequent builds should be + sped up if no changes requiring recompilation were made. + + Go ahead and run: + + python setup.py clean + python setup.py develop + + """) + + +if __name__ == '__main__': + import sys + sys.exit(main()) diff --git a/setup.py b/setup.py index d65e303758ee8..bf992d66b88f4 100755 --- a/setup.py +++ b/setup.py @@ -11,23 +11,6 @@ import shutil import warnings -try: - basedir = os.path.dirname(__file__) - dotfile = os.path.join(basedir,".build_cache_dir") - BUILD_CACHE_DIR = "" - if os.path.exists(dotfile): - BUILD_CACHE_DIR = open(dotfile).readline().strip() - BUILD_CACHE_DIR = os.environ.get('BUILD_CACHE_DIR',BUILD_CACHE_DIR) - - if os.path.isdir(BUILD_CACHE_DIR): - print("--------------------------------------------------------") - print("BUILD CACHE ACTIVATED. be careful, this is experimental.") - print("--------------------------------------------------------") - else: - BUILD_CACHE_DIR = None -except: - BUILD_CACHE_DIR = None - # may need to work around setuptools bug by providing a fake Pyrex try: import Cython @@ -346,155 +329,6 @@ def build_extensions(self): build_ext.build_extensions(self) -class CompilationCacheMixin(object): - def __init__(self, *args, **kwds): - cache_dir = kwds.pop("cache_dir", BUILD_CACHE_DIR) - self.cache_dir = cache_dir - if not os.path.isdir(cache_dir): - raise Exception("Error: path to Cache directory (%s) is not a dir" % cache_dir) - - def _copy_from_cache(self, hash, target): - src = os.path.join(self.cache_dir, hash) - if os.path.exists(src): - # print("Cache HIT: asked to copy file %s in %s" % - # (src,os.path.abspath(target))) - s = "." - for d in target.split(os.path.sep)[:-1]: - s = os.path.join(s, d) - if not os.path.exists(s): - os.mkdir(s) - shutil.copyfile(src, target) - - return True - - return False - - def _put_to_cache(self, hash, src): - target = os.path.join(self.cache_dir, hash) - # print( "Cache miss: asked to copy file from %s to %s" % (src,target)) - s = "." - for d in target.split(os.path.sep)[:-1]: - s = os.path.join(s, d) - if not os.path.exists(s): - os.mkdir(s) - shutil.copyfile(src, target) - - def _hash_obj(self, obj): - """ - you should override this method to provide a sensible - implementation of hashing functions for your intended objects - """ - try: - return hash(obj) - except: - raise NotImplementedError("You must override this method") - -class CompilationCacheExtMixin(CompilationCacheMixin): - def __init__(self, *args, **kwds): - CompilationCacheMixin.__init__(self, *args, **kwds) - - def _hash_file(self, fname): - from hashlib import sha1 - f= None - try: - hash = sha1() - hash.update(self.build_lib.encode('utf-8')) - try: - if sys.version_info[0] >= 3: - import io - f = io.open(fname, "rb") - else: - f = open(fname) - - first_line = f.readline() - # ignore cython generation timestamp header - if "Generated by Cython" not in first_line.decode('utf-8'): - hash.update(first_line) - hash.update(f.read()) - return hash.hexdigest() - - except: - raise - return None - finally: - if f: - f.close() - - except IOError: - return None - - def _hash_obj(self, ext): - from hashlib import sha1 - - sources = ext.sources - if (sources is None or - (not hasattr(sources, '__iter__')) or - isinstance(sources, str) or - sys.version[0] == 2 and isinstance(sources, unicode)): # argh - return False - - sources = list(sources) + ext.depends - hash = sha1() - try: - for fname in sources: - fhash = self._hash_file(fname) - if fhash: - hash.update(fhash.encode('utf-8')) - except: - return None - - return hash.hexdigest() - - -class CachingBuildExt(build_ext, CompilationCacheExtMixin): - def __init__(self, *args, **kwds): - CompilationCacheExtMixin.__init__(self, *args, **kwds) - kwds.pop("cache_dir", None) - build_ext.__init__(self, *args, **kwds) - - def build_extension(self, ext, *args, **kwds): - ext_path = self.get_ext_fullpath(ext.name) - build_path = os.path.join(self.build_lib, os.path.basename(ext_path)) - - hash = self._hash_obj(ext) - if hash and self._copy_from_cache(hash, ext_path): - return - - build_ext.build_extension(self, ext, *args, **kwds) - - hash = self._hash_obj(ext) - if os.path.exists(build_path): - self._put_to_cache(hash, build_path) # build_ext - if os.path.exists(ext_path): - self._put_to_cache(hash, ext_path) # develop - - def cython_sources(self, sources, extension): - import re - cplus = self.cython_cplus or getattr(extension, 'cython_cplus', 0) or \ - (extension.language and extension.language.lower() == 'c++') - target_ext = '.c' - if cplus: - target_ext = '.cpp' - - for i, s in enumerate(sources): - if not re.search("\.(pyx|pxi|pxd)$", s): - continue - ext_dir = os.path.dirname(s) - ext_basename = re.sub("\.[^\.]+$", "", os.path.basename(s)) - ext_basename += target_ext - target = os.path.join(ext_dir, ext_basename) - hash = self._hash_file(s) - sources[i] = target - if hash and self._copy_from_cache(hash, target): - continue - build_ext.cython_sources(self, [s], extension) - self._put_to_cache(hash, target) - - sources = [x for x in sources if x.startswith("pandas")] - - return sources - - class CythonCommand(build_ext): """Custom distutils command subclassed from Cython.Distutils.build_ext to compile pyx->c, and stop there. All this does is override the @@ -524,8 +358,6 @@ def run(self): if cython: suffix = '.pyx' cmdclass['build_ext'] = CheckingBuildExt - if BUILD_CACHE_DIR: # use the cache - cmdclass['build_ext'] = CachingBuildExt cmdclass['cython'] = CythonCommand else: suffix = '.c' @@ -645,6 +477,10 @@ def pxd(name): setuptools_kwargs["test_suite"] = "nose.collector" write_version_py() + +# The build cache system does string matching below this point. +# if you change something, be careful. + setup(name=DISTNAME, version=FULLVERSION, maintainer=AUTHOR, diff --git a/tox.sh b/tox.sh new file mode 100755 index 0000000000000..b68ffc7fdb91c --- /dev/null +++ b/tox.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + + +if [ x"$1" == x"fast" ]; then + scripts/use_build_cache.py +fi; + +tox diff --git a/tox_prll.sh b/tox_prll.sh index 66311aaf1991e..910e49b6b5a80 100755 --- a/tox_prll.sh +++ b/tox_prll.sh @@ -12,6 +12,10 @@ ENVS=$(cat tox.ini | grep envlist | tr "," " " | cut -d " " -f 3-) TOX_INI_PAR="tox_prll.ini" +if [ x"$1" == x"fast" ]; then + scripts/use_build_cache.py +fi; + echo "[Creating distfile]" tox --sdistonly export DISTFILE="$(find .tox/dist -type f )"
I tested this script all the way back to 0.7.0. The build cache code was a lot of noise to shove into setup.py, I'm glad to pull it back out. a "goto_ver.sh" script is now really easy to do, and very useful for benchmarking and bisecting regressions. .bashrc: ``` bash # Use the pandas build cache export BUILD_CACHE_DIR="/home/user1/tmp/.pandas_build_cache/" if [ ! -e $BUILD_CACHE_DIR ]; then mkdir $BUILD_CACHE_DIR ; fi function cpdev { scripts/use_build_cache.py # rewire setup.py with build_cache python ./setup.py clean; python ./setup.py develop; git checkout setup.py # restore setup.py } ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3358
2013-04-14T14:11:56Z
2013-04-14T14:58:21Z
2013-04-14T14:58:21Z
2013-04-14T15:04:13Z
ENH: HDFStore now auto creates data_columns if they are specified in min_itemsize
diff --git a/RELEASE.rst b/RELEASE.rst index 610e9254289aa..efa4950a36bb3 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -178,6 +178,7 @@ pandas 0.11.0 - added the method ``select_column`` to select a single column from a table as a Series. - deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` + - ``min_itemsize`` parameter will now automatically create data_columns for passed keys - Downcast on pivot if possible (GH3283_), adds argument ``downcast`` to ``fillna`` diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index db311e9be9ecb..f2779e90f206a 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -282,6 +282,9 @@ The :ref:`HDFStores <io.hdf5>` docs `Troubleshoot HDFStore exceptions <http://stackoverflow.com/questions/15488809/how-to-trouble-shoot-hdfstore-exception-cannot-find-the-correct-atom-type>`__ +`Setting min_itemsize with strings +<http://stackoverflow.com/questions/15988871/hdfstore-appendstring-dataframe-fails-when-string-column-contents-are-longer>`__ + Storing Attributes to a group node .. ipython:: python diff --git a/doc/source/io.rst b/doc/source/io.rst index 25c42780afd65..9001ae393d552 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1391,7 +1391,7 @@ of rows in an object. Multiple Table Queries ~~~~~~~~~~~~~~~~~~~~~~ -New in 0.10.1 are the methods ``append_to_multple`` and +New in 0.10.1 are the methods ``append_to_multiple`` and ``select_as_multiple``, that can perform appending/selecting from multiple tables at once. The idea is to have one table (call it the selector table) that you index most/all of the columns, and perform your @@ -1535,24 +1535,6 @@ Notes & Caveats ``tables``. The sizes of a string based indexing column (e.g. *columns* or *minor_axis*) are determined as the maximum size of the elements in that axis or by passing the parameter - ``min_itemsize`` on the first table creation (``min_itemsize`` can - be an integer or a dict of column name to an integer). If - subsequent appends introduce elements in the indexing axis that are - larger than the supported indexer, an Exception will be raised - (otherwise you could have a silent truncation of these indexers, - leading to loss of information). Just to be clear, this fixed-width - restriction applies to **indexables** (the indexing columns) and - **string values** in a mixed_type table. - - .. ipython:: python - - store.append('wp_big_strings', wp, min_itemsize = { 'minor_axis' : 30 }) - wp = wp.rename_axis(lambda x: x + '_big_strings', axis=2) - store.append('wp_big_strings', wp) - store.select('wp_big_strings') - - # we have provided a minimum minor_axis indexable size - store.root.wp_big_strings.table DataTypes ~~~~~~~~~ @@ -1589,6 +1571,34 @@ conversion may not be necessary in future versions of pandas) df df.dtypes +String Columns +~~~~~~~~~~~~~~ + +The underlying implementation of ``HDFStore`` uses a fixed column width (itemsize) for string columns. A string column itemsize is calculated as the maximum of the +length of data (for that column) that is passed to the ``HDFStore``, **in the first append**. Subsequent appends, may introduce a string for a column **larger** than the column can hold, an Exception will be raised (otherwise you could have a silent truncation of these columns, leading to loss of information). In the future we may relax this and allow a user-specified truncation to occur. + +Pass ``min_itemsize`` on the first table creation to a-priori specifiy the minimum length of a particular string column. ``min_itemsize`` can be an integer, or a dict mapping a column name to an integer. You can pass ``values`` as a key to allow all *indexables* or *data_columns* to have this min_itemsize. + +Starting in 0.11, passing a ``min_itemsize`` dict will cause all passed columns to be created as *data_columns* automatically. + +.. note:: + + If you are not passing any *data_columns*, then the min_itemsize will be the maximum of the length of any string passed + +.. ipython:: python + + dfs = DataFrame(dict(A = 'foo', B = 'bar'),index=range(5)) + dfs + + # A and B have a size of 30 + store.append('dfs', dfs, min_itemsize = 30) + store.get_storer('dfs').table + + # A is created as a data_column with a size of 30 + # B is size is calculated + store.append('dfs2', dfs, min_itemsize = { 'A' : 30 }) + store.get_storer('dfs2').table + External Compatibility ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt index 9c0a6d5a421c7..834b23c92d3b5 100644 --- a/doc/source/v0.11.0.txt +++ b/doc/source/v0.11.0.txt @@ -229,9 +229,11 @@ API changes - Added to_series() method to indicies, to facilitate the creation of indexers (GH3275_) - - In ``HDFStore``, added the method ``select_column`` to select a single column from a table as a Series. + - ``HDFStore`` - - In ``HDFStore``, deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` + - added the method ``select_column`` to select a single column from a table as a Series. + - deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` + - ``min_itemsize`` parameter to ``append`` will now automatically create data_columns for passed keys Enhancements ~~~~~~~~~~~~ @@ -244,25 +246,26 @@ Enhancements - Bottleneck is now a :ref:`Recommended Dependencies <install.recommended_dependencies>`, to accelerate certain types of ``nan`` operations - - For ``HDFStore``, support ``read_hdf/to_hdf`` API similar to ``read_csv/to_csv`` + - ``HDFStore`` - .. ipython:: python + - support ``read_hdf/to_hdf`` API similar to ``read_csv/to_csv`` - df = DataFrame(dict(A=range(5), B=range(5))) - df.to_hdf('store.h5','table',append=True) - read_hdf('store.h5', 'table', where = ['index>2']) + .. ipython:: python - .. ipython:: python - :suppress: - :okexcept: + df = DataFrame(dict(A=range(5), B=range(5))) + df.to_hdf('store.h5','table',append=True) + read_hdf('store.h5', 'table', where = ['index>2']) + + .. ipython:: python + :suppress: + :okexcept: - os.remove('store.h5') + os.remove('store.h5') - - In ``HDFStore``, provide dotted attribute access to ``get`` from stores - (e.g. ``store.df == store['df']``) + - provide dotted attribute access to ``get`` from stores, e.g. ``store.df == store['df']`` - - In ``HDFStore``, new keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` are - provided to support iteration on ``select`` and ``select_as_multiple`` (GH3076_) + - new keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` are + provided to support iteration on ``select`` and ``select_as_multiple`` (GH3076_) - You can now select timestamps from an *unordered* timeseries similarly to an *ordered* timeseries (GH2437_) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0568ee7f7f8bf..da4077165add2 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2181,7 +2181,7 @@ def validate_min_itemsize(self, min_itemsize): if k == 'values': continue if k not in q: - raise ValueError("min_itemsize has [%s] which is not an axis or data_column" % k) + raise ValueError("min_itemsize has the key [%s] which is not an axis or data_column" % k) @property def indexables(self): @@ -2293,6 +2293,30 @@ def get_object(self, obj): """ return the data for this obj """ return obj + def validate_data_columns(self, data_columns, min_itemsize): + """ take the input data_columns and min_itemize and create a data_columns spec """ + + if not len(self.non_index_axes): + return [] + + axis_labels = self.non_index_axes[0][1] + + # evaluate the passed data_columns, True == use all columns + # take only valide axis labels + if data_columns is True: + data_columns = axis_labels + elif data_columns is None: + data_columns = [] + + # if min_itemsize is a dict, add the keys (exclude 'values') + if isinstance(min_itemsize,dict): + + existing_data_columns = set(data_columns) + data_columns.extend([ k for k in min_itemsize.keys() if k != 'values' and k not in existing_data_columns ]) + + # return valid columns in the order of our axis + return [c for c in data_columns if c in axis_labels] + def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, min_itemsize=None, **kwargs): """ create and return the axes leagcy tables create an indexable column, indexable index, non-indexable fields @@ -2380,26 +2404,18 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, for a in self.non_index_axes: obj = obj.reindex_axis(a[1], axis=a[0], copy=False) - # get out blocks + # figure out data_columns and get out blocks block_obj = self.get_object(obj) - blocks = None - - if data_columns is not None and len(self.non_index_axes): - axis = self.non_index_axes[0][0] - axis_labels = self.non_index_axes[0][1] - if data_columns is True: - data_columns = axis_labels - - data_columns = [c for c in data_columns if c in axis_labels] + blocks = block_obj._data.blocks + if len(self.non_index_axes): + axis, axis_labels = self.non_index_axes[0] + data_columns = self.validate_data_columns(data_columns, min_itemsize) if len(data_columns): blocks = block_obj.reindex_axis(Index(axis_labels) - Index( - data_columns), axis=axis, copy=False)._data.blocks + data_columns), axis=axis, copy=False)._data.blocks for c in data_columns: blocks.extend(block_obj.reindex_axis( - [c], axis=axis, copy=False)._data.blocks) - - if blocks is None: - blocks = block_obj._data.blocks + [c], axis=axis, copy=False)._data.blocks) # add my values self.values_axes = [] diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 6acf17b1220a7..598812373538c 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -694,25 +694,41 @@ def check_col(key,name,size): with ensure_clean(self.path) as store: - # infer the .typ on subsequent appends + def check_col(key,name,size): + self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size) + df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10)) + + # a min_itemsize that creates a data_column + store.remove('df') + store.append('df', df, min_itemsize={'A' : 200 }) + check_col('df', 'A', 200) + self.assert_(store.get_storer('df').data_columns == ['A']) + + # a min_itemsize that creates a data_column2 + store.remove('df') + store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 }) + check_col('df', 'A', 200) + self.assert_(store.get_storer('df').data_columns == ['B','A']) + + # a min_itemsize that creates a data_column2 + store.remove('df') + store.append('df', df, data_columns = ['B'], min_itemsize={'values' : 200 }) + check_col('df', 'B', 200) + check_col('df', 'values_block_0', 200) + self.assert_(store.get_storer('df').data_columns == ['B']) + + # infer the .typ on subsequent appends store.remove('df') store.append('df', df[:5], min_itemsize=200) store.append('df', df[5:], min_itemsize=200) tm.assert_frame_equal(store['df'], df) # invalid min_itemsize keys - df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A']) - store.remove('df') self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20}) - # invalid sizes - store.remove('df') - store.append('df', df[:3], min_itemsize=3) - self.assertRaises(ValueError, store.append, 'df', df[3:]) - def test_append_with_data_columns(self): with ensure_clean(self.path) as store:
DOC: created String Columns section, added cookbook example
https://api.github.com/repos/pandas-dev/pandas/pulls/3357
2013-04-14T12:39:59Z
2013-04-14T12:59:34Z
2013-04-14T12:59:34Z
2014-07-10T11:41:42Z
CLN: DataFrame move doc from __init__ to cls
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ab7d23acf183e..ee986f1a466b1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -333,6 +333,41 @@ def f(self, other): class DataFrame(NDFrame): + """ Two-dimensional size-mutable, potentially heterogeneous tabular data + structure with labeled axes (rows and columns). Arithmetic operations + align on both row and column labels. Can be thought of as a dict-like + container for Series objects. The primary pandas data structure + + Parameters + ---------- + data : numpy ndarray (structured or homogeneous), dict, or DataFrame + Dict can contain Series, arrays, constants, or list-like objects + index : Index or array-like + Index to use for resulting frame. Will default to np.arange(n) if + no indexing information part of input data and no index provided + columns : Index or array-like + Will default to np.arange(n) if not column labels provided + dtype : dtype, default None + Data type to force, otherwise infer + copy : boolean, default False + Copy data from inputs. Only affects DataFrame / 2d ndarray input + + Examples + -------- + >>> d = {'col1': ts1, 'col2': ts2} + >>> df = DataFrame(data=d, index=index) + >>> df2 = DataFrame(np.random.randn(10, 5)) + >>> df3 = DataFrame(np.random.randn(10, 5), + ... columns=['a', 'b', 'c', 'd', 'e']) + + See also + -------- + DataFrame.from_records: constructor from tuples, also record arrays + DataFrame.from_dict: from dicts of Series, arrays, or dicts + DataFrame.from_csv: from CSV files + DataFrame.from_items: from sequence of (key, value) pairs + read_csv / read_table / read_clipboard + """ _auto_consolidate = True _het_axis = 1 _info_axis = 'columns' @@ -347,41 +382,6 @@ class DataFrame(NDFrame): def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): - """Two-dimensional size-mutable, potentially heterogeneous tabular data - structure with labeled axes (rows and columns). Arithmetic operations - align on both row and column labels. Can be thought of as a dict-like - container for Series objects. The primary pandas data structure - - Parameters - ---------- - data : numpy ndarray (structured or homogeneous), dict, or DataFrame - Dict can contain Series, arrays, constants, or list-like objects - index : Index or array-like - Index to use for resulting frame. Will default to np.arange(n) if - no indexing information part of input data and no index provided - columns : Index or array-like - Will default to np.arange(n) if not column labels provided - dtype : dtype, default None - Data type to force, otherwise infer - copy : boolean, default False - Copy data from inputs. Only affects DataFrame / 2d ndarray input - - Examples - -------- - >>> d = {'col1': ts1, 'col2': ts2} - >>> df = DataFrame(data=d, index=index) - >>> df2 = DataFrame(np.random.randn(10, 5)) - >>> df3 = DataFrame(np.random.randn(10, 5), - ... columns=['a', 'b', 'c', 'd', 'e']) - - See also - -------- - DataFrame.from_records: constructor from tuples, also record arrays - DataFrame.from_dict: from dicts of Series, arrays, or dicts - DataFrame.from_csv: from CSV files - DataFrame.from_items: from sequence of (key, value) pairs - read_csv / read_table / read_clipboard - """ if data is None: data = {} diff --git a/pandas/core/index.py b/pandas/core/index.py index 43cb7734a1cc5..a6da188c6011c 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1,7 +1,5 @@ # pylint: disable=E1101,E1103,W0232 -from datetime import time - from itertools import izip import numpy as np diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 8e18e93e955ef..4f346d2e1860e 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -147,6 +147,24 @@ def f(self, other): class Panel(NDFrame): + """ + Represents wide format panel data, stored as 3-dimensional array + + Parameters + ---------- + data : ndarray (items x major x minor), or dict of DataFrames + items : Index or array-like + axis=1 + major_axis : Index or array-like + axis=1 + minor_axis : Index or array-like + axis=2 + dtype : dtype, default None + Data type to force, otherwise infer + copy : boolean, default False + Copy data from inputs. Only affects DataFrame / 2d ndarray input + """ + _AXIS_ORDERS = ['items', 'major_axis', 'minor_axis'] _AXIS_NUMBERS = dict([(a, i) for i, a in enumerate(_AXIS_ORDERS)]) _AXIS_ALIASES = { @@ -218,23 +236,6 @@ def _construct_axes_dict_for_slice(self, axes=None, **kwargs): def __init__(self, data=None, items=None, major_axis=None, minor_axis=None, copy=False, dtype=None): - """ - Represents wide format panel data, stored as 3-dimensional array - - Parameters - ---------- - data : ndarray (items x major x minor), or dict of DataFrames - items : Index or array-like - axis=1 - major_axis : Index or array-like - axis=1 - minor_axis : Index or array-like - axis=2 - dtype : dtype, default None - Data type to force, otherwise infer - copy : boolean, default False - Copy data from inputs. Only affects DataFrame / 2d ndarray input - """ self._init_data( data=data, items=items, major_axis=major_axis, minor_axis=minor_axis, copy=copy, dtype=dtype) diff --git a/pandas/core/series.py b/pandas/core/series.py index 919dd57ee70ab..4115c3e6abc3a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -386,6 +386,33 @@ def f(self, axis=0, dtype=None, out=None, skipna=True, level=None): class Series(pa.Array, generic.PandasObject): + """ + One-dimensional ndarray with axis labels (including time series). + Labels need not be unique but must be any hashable type. The object + supports both integer- and label-based indexing and provides a host of + methods for performing operations involving the index. Statistical + methods from ndarray have been overridden to automatically exclude + missing data (currently represented as NaN) + + Operations between Series (+, -, /, *, **) align values based on their + associated index values-- they need not be the same length. The result + index will be the sorted union of the two indexes. + + Parameters + ---------- + data : array-like, dict, or scalar value + Contains data stored in Series + index : array-like or Index (1d) + Values must be unique and hashable, same length as data. Index + object (or other iterable of same length as data) Will default to + np.arange(len(data)) if not provided. If both a dict and index + sequence are used, the index will override the keys found in the + dict. + dtype : numpy.dtype or None + If None, dtype will be inferred copy : boolean, default False Copy + input data + copy : boolean, default False + """ _AXIS_NUMBERS = { 'index': 0 } @@ -411,7 +438,7 @@ def __new__(cls, data=None, index=None, dtype=None, name=None, elif isinstance(data, dict): if index is None: from pandas.util.compat import OrderedDict - if isinstance(data,OrderedDict): + if isinstance(data, OrderedDict): index = Index(data) else: index = Index(sorted(data)) @@ -482,33 +509,6 @@ def from_array(cls, arr, index=None, name=None, copy=False): def __init__(self, data=None, index=None, dtype=None, name=None, copy=False): - """ - One-dimensional ndarray with axis labels (including time series). - Labels need not be unique but must be any hashable type. The object - supports both integer- and label-based indexing and provides a host of - methods for performing operations involving the index. Statistical - methods from ndarray have been overridden to automatically exclude - missing data (currently represented as NaN) - - Operations between Series (+, -, /, *, **) align values based on their - associated index values-- they need not be the same length. The result - index will be the sorted union of the two indexes. - - Parameters - ---------- - data : array-like, dict, or scalar value - Contains data stored in Series - index : array-like or Index (1d) - Values must be unique and hashable, same length as data. Index - object (or other iterable of same length as data) Will default to - np.arange(len(data)) if not provided. If both a dict and index - sequence are used, the index will override the keys found in the - dict. - dtype : numpy.dtype or None - If None, dtype will be inferred copy : boolean, default False Copy - input data - copy : boolean, default False - """ pass @property diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index b799188170e6f..6ad165570038e 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -74,6 +74,23 @@ def _sparse_series_op(left, right, op, name): class SparseSeries(SparseArray, Series): + """Data structure for labeled, sparse floating point data + + Parameters + ---------- + data : {array-like, Series, SparseSeries, dict} + kind : {'block', 'integer'} + fill_value : float + Defaults to NaN (code for missing) + sparse_index : {BlockIndex, IntIndex}, optional + Only if you have one. Mainly used internally + + Notes + ----- + SparseSeries objects are immutable via the typical Python means. If you + must change values, convert to dense, make your changes, then convert back + to sparse + """ __array_priority__ = 15 sp_index = None @@ -168,23 +185,6 @@ def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None): def __init__(self, data, index=None, sparse_index=None, kind='block', fill_value=None, name=None, copy=False): - """Data structure for labeled, sparse floating point data - -Parameters ----------- -data : {array-like, Series, SparseSeries, dict} -kind : {'block', 'integer'} -fill_value : float - Defaults to NaN (code for missing) -sparse_index : {BlockIndex, IntIndex}, optional - Only if you have one. Mainly used internally - -Notes ------ -SparseSeries objects are immutable via the typical Python means. If you -must change values, convert to dense, make your changes, then convert back -to sparse - """ pass @property diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 51903b7179822..14119dd94290a 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -40,29 +40,28 @@ def f(self): class Period(object): + """ + Represents an period of time + Parameters + ---------- + value : Period or basestring, default None + The time period represented (e.g., '4Q2005') + freq : str, default None + e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes + year : int, default None + month : int, default 1 + quarter : int, default None + day : int, default 1 + hour : int, default 0 + minute : int, default 0 + second : int, default 0 + """ __slots__ = ['freq', 'ordinal'] def __init__(self, value=None, freq=None, ordinal=None, year=None, month=1, quarter=None, day=1, hour=0, minute=0, second=0): - """ - Represents an period of time - - Parameters - ---------- - value : Period or basestring, default None - The time period represented (e.g., '4Q2005') - freq : str, default None - e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes - year : int, default None - month : int, default 1 - quarter : int, default None - day : int, default 1 - hour : int, default 0 - minute : int, default 0 - second : int, default 0 - """ # freq points to a tuple (base, mult); base is one of the defined # periods such as A, Q, etc. Every five minutes would be, e.g., # ('T', 5) but may be passed in as a string like '5T'
This, in part addresses our discussion from #3337. This PR moves the doc string from **init** to the more common class location. (cf.. numpy, rest of pandas classes that have a doc string) Besides STY / consistency this would allow for more directly accessing the doc string in a debugging (pdb) session: ``` Python print mydf.__doc__ ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3342
2013-04-13T16:28:49Z
2013-04-14T15:14:43Z
null
2013-04-14T18:14:50Z
TST: properly raise the quoted exception when trying to unpickle on py2
diff --git a/pandas/core/common.py b/pandas/core/common.py index c41784d015e7c..610477caddba8 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1584,6 +1584,8 @@ def load(path): with open(path,'rb') as fh: return pickle.load(fh) except: + if not py3compat.PY3: + raise with open(path,'rb') as fh: return pickle.load(fh, encoding='latin1')
should throw the error in the unpickle if on py2, rather than try to decode as the try: except: is mainly to catch a decoding error
https://api.github.com/repos/pandas-dev/pandas/pulls/3336
2013-04-13T01:10:29Z
2013-04-13T01:10:36Z
2013-04-13T01:10:36Z
2013-04-13T01:10:36Z
PERF: series construction perf enhancements, use a fast path based on dt...
diff --git a/pandas/core/common.py b/pandas/core/common.py index 4acaa3f421e3a..c41784d015e7c 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -42,6 +42,8 @@ class AmbiguousIndexError(PandasError, KeyError): pass +_POSSIBLY_CAST_DTYPES = set([ np.dtype(t) for t in ['M8[ns]','m8[ns]','O','int8','uint8','int16','uint16','int32','uint32','int64','uint64'] ]) + def isnull(obj): ''' Detect missing values (NaN in numeric arrays, None/NaN in object arrays) @@ -1038,6 +1040,9 @@ def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True): return values +def _possibly_castable(arr): + return arr.dtype not in _POSSIBLY_CAST_DTYPES + def _possibly_convert_platform(values): """ try to do platform conversion, allow ndarray or list here """ diff --git a/pandas/core/series.py b/pandas/core/series.py index 8c60bfdd582d6..919dd57ee70ab 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3196,7 +3196,6 @@ def remove_na(arr): """ return arr[notnull(arr)] - def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False): @@ -3208,7 +3207,13 @@ def _sanitize_array(data, index, dtype=None, copy=False, else: data = data.copy() - def _try_cast(arr): + def _try_cast(arr, take_fast_path): + + # perf shortcut as this is the most common case + if take_fast_path: + if com._possibly_castable(arr) and not copy and dtype is None: + return arr + try: arr = com._possibly_cast_to_datetime(arr, dtype) subarr = pa.array(arr, dtype=dtype, copy=copy) @@ -3227,7 +3232,7 @@ def _try_cast(arr): # possibility of nan -> garbage if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype): if not isnull(data).any(): - subarr = _try_cast(data) + subarr = _try_cast(data, True) elif copy: subarr = data.copy() else: @@ -3239,9 +3244,9 @@ def _try_cast(arr): elif raise_cast_failure: raise TypeError('Cannot cast datetime64 to %s' % dtype) else: - subarr = _try_cast(data) + subarr = _try_cast(data, True) else: - subarr = _try_cast(data) + subarr = _try_cast(data, True) if copy: subarr = data.copy() @@ -3249,7 +3254,7 @@ def _try_cast(arr): elif isinstance(data, list) and len(data) > 0: if dtype is not None: try: - subarr = _try_cast(data) + subarr = _try_cast(data, False) except Exception: if raise_cast_failure: # pragma: no cover raise @@ -3262,7 +3267,7 @@ def _try_cast(arr): subarr = com._possibly_cast_to_datetime(subarr, dtype) else: - subarr = _try_cast(data) + subarr = _try_cast(data, False) # scalar like if subarr.ndim == 0:
This is only 1us improvement, but non-zero (I measure the existing one at 9us, this puts it at 10, before this change was about 11) difference is 3 function calls, so not much more we can do I think (as this is handling more cases) ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- series_constructor_ndarray | 0.0106 | 0.0120 | 0.8874 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [c54848f] : PERF: series construction perf enhancements, use a fast path based on dtype Base [9764ea6] : Merge pull request #3332 from jreback/period_perf2 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3333
2013-04-12T18:50:22Z
2013-04-12T19:07:36Z
2013-04-12T19:07:36Z
2014-06-21T22:22:12Z
PERF/CLN: infer Period in infer_dtype, later index inference is faster
diff --git a/pandas/core/index.py b/pandas/core/index.py index 57222d6a04c2f..43cb7734a1cc5 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -127,8 +127,8 @@ def __new__(cls, data, dtype=None, copy=False, name=None): from pandas.tseries.index import DatetimeIndex return DatetimeIndex(subarr, copy=copy, name=name) - if lib.is_period_array(subarr): - return PeriodIndex(subarr, name=name) + elif inferred == 'period': + return PeriodIndex(subarr, name=name) subarr = subarr.view(cls) subarr.name = name diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 3ecf513cc3212..7d13aa8ce6765 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -97,6 +97,10 @@ def infer_dtype(object _values): if is_timedelta_or_timedelta64_array(values): return 'timedelta' + elif is_period(val): + if is_period_array(values): + return 'period' + for i in range(n): val = util.get_value_1d(values, i) if util.is_integer_object(val): @@ -321,6 +325,10 @@ def is_time_array(ndarray[object] values): return False return True +def is_period(object o): + from pandas import Period + return isinstance(o,Period) + def is_period_array(ndarray[object] values): cdef int i, n = len(values) from pandas import Period
This was meant to fix the vbench: ctor_index_array_string see #3326 not sure if this is that stable of a perf fix ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- frame_get_dtype_counts | 0.0960 | 0.1700 | 0.5647 | frame_insert_500_columns | 87.8456 | 142.5257 | 0.6163 | frame_iteritems_cached | 0.0820 | 0.1043 | 0.7860 | stat_ops_level_frame_sum | 2.5051 | 2.8849 | 0.8683 | ctor_index_array_string | 0.0333 | 0.0383 | 0.8693 | stat_ops_level_series_sum_multiple | 5.6090 | 6.4037 | 0.8759 | frame_fancy_lookup | 2.7517 | 3.1263 | 0.8802 | Target [f3b19d6] : PERF/CLN: infer Period in infer_dtype, later index inference is faster Base [df2c21c] : Merge pull request #3327 from jreback/index_perf ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3332
2013-04-12T16:18:05Z
2013-04-12T16:18:14Z
2013-04-12T16:18:14Z
2013-04-12T16:18:14Z
PERF: fixed int64 indexing perf issue when conversion to int64
diff --git a/pandas/core/index.py b/pandas/core/index.py index c30b5dca3a28a..57222d6a04c2f 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1332,11 +1332,6 @@ def inferred_type(self): def _constructor(self): return Int64Index - @cache_readonly - def _engine(self): - # property, for now, slow to look up - return self._engine_type(lambda: com._ensure_int64(self.values), len(self)) - @property def asi8(self): # do not cache or you'll create a memory leak diff --git a/pandas/index.pyx b/pandas/index.pyx index c13f1f506f5ed..2ad5474549ec6 100644 --- a/pandas/index.pyx +++ b/pandas/index.pyx @@ -272,6 +272,9 @@ cdef class IndexEngine: cdef class Int64Engine(IndexEngine): + cdef _get_index_values(self): + return algos.ensure_int64(self.vgetter()) + cdef _make_hash_table(self, n): return _hash.Int64HashTable(n)
fixes an issue I when using non-int64 Int64Indexes (which is very rare) anyhow ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- timeseries_large_lookup_value | 0.0213 | 2.5117 | 0.0085 | Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [0933ba3] : PERF: fixed int64 indexing perf issue when conversion to int64 Base [8f92d9a] : Merge PR #2881 and fix unit test ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3331
2013-04-12T16:08:41Z
2013-04-12T16:08:52Z
2013-04-12T16:08:52Z
2013-04-12T16:14:26Z
PERF/CLN: infer Period in infer_dtype, later index inference is faster
diff --git a/pandas/core/index.py b/pandas/core/index.py index 57222d6a04c2f..43cb7734a1cc5 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -127,8 +127,8 @@ def __new__(cls, data, dtype=None, copy=False, name=None): from pandas.tseries.index import DatetimeIndex return DatetimeIndex(subarr, copy=copy, name=name) - if lib.is_period_array(subarr): - return PeriodIndex(subarr, name=name) + elif inferred == 'period': + return PeriodIndex(subarr, name=name) subarr = subarr.view(cls) subarr.name = name diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 3ecf513cc3212..7d13aa8ce6765 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -97,6 +97,10 @@ def infer_dtype(object _values): if is_timedelta_or_timedelta64_array(values): return 'timedelta' + elif is_period(val): + if is_period_array(values): + return 'period' + for i in range(n): val = util.get_value_1d(values, i) if util.is_integer_object(val): @@ -321,6 +325,10 @@ def is_time_array(ndarray[object] values): return False return True +def is_period(object o): + from pandas import Period + return isinstance(o,Period) + def is_period_array(ndarray[object] values): cdef int i, n = len(values) from pandas import Period
This was meant to fix the vbench: ctor_index_array_string see #3326 not sure if this is that stable of a perf fix ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- frame_get_dtype_counts | 0.0960 | 0.1700 | 0.5647 | frame_insert_500_columns | 87.8456 | 142.5257 | 0.6163 | frame_iteritems_cached | 0.0820 | 0.1043 | 0.7860 | stat_ops_level_frame_sum | 2.5051 | 2.8849 | 0.8683 | ctor_index_array_string | 0.0333 | 0.0383 | 0.8693 | stat_ops_level_series_sum_multiple | 5.6090 | 6.4037 | 0.8759 | frame_fancy_lookup | 2.7517 | 3.1263 | 0.8802 | Target [f3b19d6] : PERF/CLN: infer Period in infer_dtype, later index inference is faster Base [df2c21c] : Merge pull request #3327 from jreback/index_perf ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3329
2013-04-12T14:40:13Z
2013-04-12T15:09:16Z
2013-04-12T15:09:16Z
2013-04-12T15:09:16Z
PERF: fixed int64 indexing perf issue when conversion to int64
diff --git a/pandas/core/index.py b/pandas/core/index.py index c30b5dca3a28a..57222d6a04c2f 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1332,11 +1332,6 @@ def inferred_type(self): def _constructor(self): return Int64Index - @cache_readonly - def _engine(self): - # property, for now, slow to look up - return self._engine_type(lambda: com._ensure_int64(self.values), len(self)) - @property def asi8(self): # do not cache or you'll create a memory leak diff --git a/pandas/index.pyx b/pandas/index.pyx index c13f1f506f5ed..2ad5474549ec6 100644 --- a/pandas/index.pyx +++ b/pandas/index.pyx @@ -272,6 +272,9 @@ cdef class IndexEngine: cdef class Int64Engine(IndexEngine): + cdef _get_index_values(self): + return algos.ensure_int64(self.vgetter()) + cdef _make_hash_table(self, n): return _hash.Int64HashTable(n)
fixes an issue I when using non-int64 Int64Indexes (which is very rare) anyhow ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- timeseries_large_lookup_value | 0.0213 | 2.5117 | 0.0085 | Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [0933ba3] : PERF: fixed int64 indexing perf issue when conversion to int64 Base [8f92d9a] : Merge PR #2881 and fix unit test ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3327
2013-04-12T13:35:07Z
2013-04-12T13:53:41Z
2013-04-12T13:53:41Z
2014-07-07T14:09:14Z
BUG: ensure index casting works even in Int64Index
diff --git a/RELEASE.rst b/RELEASE.rst index a542a406fcfaa..9016a9efdcae3 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -293,6 +293,7 @@ pandas 0.11.0 - fixed pretty priniting of sets (GH3294_) - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (GH3303_) - DataFrame where with a datetimelike incorrectly selecting (GH3311_) + - Ensure index casts work even in Int64Index .. _GH3294: https://github.com/pydata/pandas/issues/3294 .. _GH622: https://github.com/pydata/pandas/issues/622 diff --git a/pandas/core/index.py b/pandas/core/index.py index aa0fd5b6b0351..5b10a2321a387 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1332,6 +1332,11 @@ def inferred_type(self): def _constructor(self): return Int64Index + @cache_readonly + def _engine(self): + # property, for now, slow to look up + return self._engine_type(lambda: com._ensure_int64(self.values), len(self)) + @property def asi8(self): # do not cache or you'll create a memory leak diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 5e5b24bb6734d..de0011185e35b 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -1836,6 +1836,17 @@ def test_set_index(self): self.assertRaises(Exception, setattr, self.mixed_frame, 'index', idx[::2]) + def test_set_index_cast(self): + + # issue casting an index then set_index + df = DataFrame({'A' : [1.1,2.2,3.3], 'B' : [5.0,6.1,7.2]}, + index = [2010,2011,2012]) + expected = df.ix[2010] + new_index = df.index.astype(np.int32) + df.index = new_index + result = df.ix[2010] + assert_series_equal(result,expected) + def test_set_index2(self): df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'], 'B': ['one', 'two', 'three', 'one', 'two'],
``` df = DataFrame({'A' : [1.1,2.2,3.3], 'B' : [5.0,6.1,7.2]}, index = [2010, 2011, 2012]) df.index = df.index.astype(int) df.ix[2010] ``` was failing - fixed up from the mailing list https://groups.google.com/forum/?fromgroups#!topic/pydata/ClJA5ldNQNQ
https://api.github.com/repos/pandas-dev/pandas/pulls/3322
2013-04-11T21:48:58Z
2013-04-12T04:14:37Z
2013-04-12T04:14:37Z
2013-04-12T04:14:49Z
SCR: add a script for tracking down all commits touching a named method
diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py new file mode 100755 index 0000000000000..d23889ec80d05 --- /dev/null +++ b/scripts/find_commits_touching_func.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# copryright 2013, y-p @ github + +from __future__ import print_function + +"""Search the git history for all commits touching a named method + +You need the sh module to run this +WARNING: this script uses git clean -f, running it on a repo with untracked files +will probably erase them. +""" +import logging +import re +import os +from collections import namedtuple +from dateutil import parser + +try: + import sh +except ImportError: + raise ImportError("The 'sh' package is required in order to run this script. ") + +import argparse + +desc = """ +Find all commits touching a sepcified function across the codebase. +""".strip() +argparser = argparse.ArgumentParser(description=desc) +argparser.add_argument('funcname', metavar='FUNCNAME', + help='Name of function/method to search for changes on.') +argparser.add_argument('-f', '--file-masks', metavar='f_re(,f_re)*', + default=["\.py.?$"], + help='comma seperated list of regexes to match filenames against\n'+ + 'defaults all .py? files') +argparser.add_argument('-d', '--dir-masks', metavar='d_re(,d_re)*', + default=[], + help='comma seperated list of regexes to match base path against') +argparser.add_argument('-p', '--path-masks', metavar='p_re(,p_re)*', + default=[], + help='comma seperated list of regexes to match full file path against') +argparser.add_argument('-y', '--saw-the-warning', + action='store_true',default=False, + help='must specify this to run, acknowledge you realize this will erase untracked files') +argparser.add_argument('--debug-level', + default="CRITICAL", + help='debug level of messages (DEBUG,INFO,etc...)') + +args = argparser.parse_args() + + +lfmt = logging.Formatter(fmt='%(levelname)-8s %(message)s', + datefmt='%m-%d %H:%M:%S' +) + +shh = logging.StreamHandler() +shh.setFormatter(lfmt) + +logger=logging.getLogger("findit") +logger.addHandler(shh) + + +Hit=namedtuple("Hit","commit path") +HASH_LEN=8 + +def clean_checkout(comm): + h,s,d = get_commit_vitals(comm) + if len(s) > 60: + s = s[:60] + "..." + s=s.split("\n")[0] + logger.info("CO: %s %s" % (comm,s )) + + sh.git('checkout', comm ,_tty_out=False) + sh.git('clean', '-f') + +def get_hits(defname,files=()): + cs=set() + for f in files: + try: + r=sh.git('blame', '-L', '/def\s*{start}/,/def/'.format(start=defname),f,_tty_out=False) + except sh.ErrorReturnCode_128: + logger.debug("no matches in %s" % f) + continue + + lines = r.strip().splitlines()[:-1] + # remove comment lines + lines = [x for x in lines if not re.search("^\w+\s*\(.+\)\s*#",x)] + hits = set(map(lambda x: x.split(" ")[0],lines)) + cs.update(set([Hit(commit=c,path=f) for c in hits])) + + return cs + +def get_commit_info(c,fmt,sep='\t'): + r=sh.git('log', "--format={}".format(fmt), '{}^..{}'.format(c,c),"-n","1",_tty_out=False) + return unicode(r).split(sep) + +def get_commit_vitals(c,hlen=HASH_LEN): + h,s,d= get_commit_info(c,'%H\t%s\t%ci',"\t") + return h[:hlen],s,parser.parse(d) + +def file_filter(state,dirname,fnames): + if args.dir_masks and not any([re.search(x,dirname) for x in args.dir_masks]): + return + for f in fnames: + p = os.path.abspath(os.path.join(os.path.realpath(dirname),f)) + if any([re.search(x,f) for x in args.file_masks])\ + or any([re.search(x,p) for x in args.path_masks]): + if os.path.isfile(p): + state['files'].append(p) + +def search(defname,head_commit="HEAD"): + HEAD,s = get_commit_vitals("HEAD")[:2] + logger.info("HEAD at %s: %s" % (HEAD,s)) + done_commits = set() + # allhits = set() + files = [] + state = dict(files=files) + os.path.walk('.',file_filter,state) + # files now holds a list of paths to files + + # seed with hits from q + allhits= set(get_hits(defname, files = files)) + q = set([HEAD]) + try: + while q: + h=q.pop() + clean_checkout(h) + hits = get_hits(defname, files = files) + for x in hits: + prevc = get_commit_vitals(x.commit+"^")[0] + if prevc not in done_commits: + q.add(prevc) + allhits.update(hits) + done_commits.add(h) + + logger.debug("Remaining: %s" % q) + finally: + logger.info("Restoring HEAD to %s" % HEAD) + clean_checkout(HEAD) + return allhits + +def pprint_hits(hits): + SUBJ_LEN=50 + PATH_LEN = 20 + hits=list(hits) + max_p = 0 + for hit in hits: + p=hit.path.split(os.path.realpath(os.curdir)+os.path.sep)[-1] + max_p=max(max_p,len(p)) + + if max_p < PATH_LEN: + SUBJ_LEN += PATH_LEN - max_p + PATH_LEN = max_p + + def sorter(i): + h,s,d=get_commit_vitals(hits[i].commit) + return hits[i].path,d + + print("\nThese commits touched the %s method in these files on these dates:\n" \ + % args.funcname) + for i in sorted(range(len(hits)),key=sorter): + hit = hits[i] + h,s,d=get_commit_vitals(hit.commit) + p=hit.path.split(os.path.realpath(os.curdir)+os.path.sep)[-1] + + fmt = "{:%d} {:10} {:<%d} {:<%d}" % (HASH_LEN, SUBJ_LEN, PATH_LEN) + if len(s) > SUBJ_LEN: + s = s[:SUBJ_LEN-5] + " ..." + print(fmt.format(h[:HASH_LEN],d.isoformat()[:10],s,p[-20:]) ) + + print("\n") + +def main(): + if not args.saw_the_warning: + argparser.print_help() + print(""" +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +WARNING: this script uses git clean -f, running it on a repo with untracked files. +It's recommended that you make a fresh clone and run from it's root directory. +You must specify the -y argument to ignore this warning. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +""") + return + if isinstance(args.file_masks,basestring): + args.file_masks = args.file_masks.split(',') + if isinstance(args.path_masks,basestring): + args.path_masks = args.path_masks.split(',') + if isinstance(args.dir_masks,basestring): + args.dir_masks = args.dir_masks.split(',') + + logger.setLevel(getattr(logging,args.debug_level)) + + hits=search(args.funcname) + pprint_hits(hits) + + pass + +if __name__ == "__main__": + import sys + sys.exit(main())
New script to get a change hirsoty for a functions across the code base. Initially, I hope this well help define "golden" commits for the various adhoc versions of pickled pandas objects, so we can finish off #3310 , get robust back-compat testing and feel easier about touching the serialization code (which always scares me a little, pesonally). That's also prep for #3297, and the changes jeff suggested there. Example usage (check out the --help for filtering): ``` λ cd /tmp/pandas/pandas λ ~/src/pandas/scripts/find_commits_touching_func.py __getstate__ These commits touched the __getstate__ method in these files on these dates: 138b3ef6 2010-01-05 * Cleaned up and sped up DataMatrix and DataFram ... core/frame.py f49a3719 2010-06-04 code reorg in Series, miscellaneous groupby refa ... core/frame.py 9bddd0f4 2010-12-11 working toward consistent column ordering in Dat ... core/frame.py c35a39de 2011-06-12 bit the bullet and did the big merge. now time t ... core/frame.py 739f8a1f 2011-06-09 picklability, column reordering issues, more ref ... core/internals.py 3133a6b0 2011-06-12 need some changes to block internals, getting st ... core/internals.py 1ef60ee0 2011-06-12 unit tests pass...winning! core/internals.py 32b9481d 2011-06-21 mixed-type fixes in DataFrame statistical methods core/internals.py 4833ea9d 2011-06-30 biting the bullet, working to make generic n-dim ... core/internals.py 80b91769 2011-08-15 ENH: support picklability of Index subclasses core/internals.py 0f6d8b43 2009-09-01 latest edits, miscellaneous cleanup and bug fixe ... core/matrix.py 138b3ef6 2010-01-05 * Cleaned up and sped up DataMatrix and DataFram ... core/matrix.py 3ef46c24 2010-04-12 code rearrangement core/matrix.py 739f8a1f 2011-06-09 picklability, column reordering issues, more ref ... core/matrix.py 6fd65acd 2009-11-20 merging latest developments, in particular LongP ... core/panel.py bf621508 2010-01-09 fleshing out WidePanel functionality. miscellane ... core/panel.py 00494462 2010-06-21 committing tests and changes in broken state, to ... core/panel.py 9eba5770 2011-06-30 removed usages of indexMap, misc panel stuff core/panel.py 8b4d2084 2011-05-03 skeleton of sparse WidePanel, unit tests and stu ... core/sparse.py 4218857a 2011-05-13 correct pickling of sparse frame/panel. setitem/ ... core/sparse.py b3c12b15 2011-12-10 REF: broke apart sparse.py, created sparse subpackage sparse/frame.py a5f773bd 2012-06-04 BUG: cast other datetime64 units to nanos from I ... sparse/frame.py b3c12b15 2011-12-10 REF: broke apart sparse.py, created sparse subpackage sparse/panel.py 5fda0afc 2012-04-04 ENH: non-unique index support. much more testing ... sparse/panel.py ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3316
2013-04-11T01:19:57Z
2013-04-11T05:03:16Z
2013-04-11T05:03:16Z
2014-07-08T12:01:29Z
DOC add dayfirst to to_datetime
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index c93d9cd425eef..e35b80fff013f 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -58,9 +58,15 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, arg : string, datetime, array of strings (with possible NAs) errors : {'ignore', 'raise'}, default 'ignore' Errors are ignored by default (values left untouched) + dayfirst : boolean, default False + If True parses dates with the day first, eg 20/01/2005 utc : boolean, default None Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime objects as well) + box : boolean, default True + If True returns a DatetimeIndex, if False returns ndarray of values + format : string, default None + strftime to parse time, eg "%d/%m/%Y" Returns -------
At the moment only some of the arguments are shown in the docstring for to_datetime, which can [lead to unnecessary confusion](http://stackoverflow.com/questions/15929861/pandas-to-datetime-inconsistent-time-string-format/15937890#15937890). This adds all of them to the docstring. ~~Also a cheeky enhancement to use print configs default to set date (copied from `parse_time_string`), not sure how to test that one.... tbh **I think it would make sense to default dayfirst to None**, the current behaviour can be strange (as seen in the above question).~~ Thoughts?
https://api.github.com/repos/pandas-dev/pandas/pulls/3314
2013-04-10T23:47:41Z
2013-04-13T00:21:54Z
2013-04-13T00:21:54Z
2013-04-13T00:21:55Z
TST: test fix on GH3235 (failing because of system dtype on different sy...
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 4f17135385748..20ff6e95b436c 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1154,17 +1154,17 @@ def test_where(self): self.assertRaises(Exception, s.__setitem__, tuple(mask), values) # GH3235 - s = Series(np.arange(10)) + s = Series(np.arange(10),dtype='int64') mask = s < 5 s[mask] = range(2,7) - expected = Series(range(2,7) + range(5,10)) + expected = Series(range(2,7) + range(5,10),dtype='int64') assert_series_equal(s, expected) self.assertEquals(s.dtype, expected.dtype) - s = Series(np.arange(10)) + s = Series(np.arange(10),dtype='int64') mask = s > 5 s[mask] = [0]*4 - expected = Series([0,1,2,3,4,5] + [0]*4) + expected = Series([0,1,2,3,4,5] + [0]*4,dtype='int64') assert_series_equal(s,expected) s = Series(np.arange(10))
...stems)
https://api.github.com/repos/pandas-dev/pandas/pulls/3313
2013-04-10T15:08:27Z
2013-04-10T18:24:33Z
2013-04-10T18:24:33Z
2014-06-17T19:53:14Z
BUG: GH3311 Dataframe where with a datetimelike was not correctly selecting
diff --git a/RELEASE.rst b/RELEASE.rst index e01ccad6db96a..a542a406fcfaa 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -292,6 +292,7 @@ pandas 0.11.0 spacing (GH3258_) - fixed pretty priniting of sets (GH3294_) - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (GH3303_) + - DataFrame where with a datetimelike incorrectly selecting (GH3311_) .. _GH3294: https://github.com/pydata/pandas/issues/3294 .. _GH622: https://github.com/pydata/pandas/issues/622 @@ -400,6 +401,7 @@ pandas 0.11.0 .. _GH3258: https://github.com/pydata/pandas/issues/3258 .. _GH3283: https://github.com/pydata/pandas/issues/3283 .. _GH2919: https://github.com/pydata/pandas/issues/2919 +.. _GH3311: https://github.com/pydata/pandas/issues/3311 pandas 0.10.1 ============= diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 1ab6eb05b86d4..b44ef5d465bb9 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -284,6 +284,14 @@ def _try_cast_result(self, result): we may have roundtripped thru object in the mean-time """ return result + def _try_coerce_args(self, values, other): + """ provide coercion to our input arguments """ + return values, other + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + return result + def to_native_types(self, slicer=None, na_rep='', **kwargs): """ convert to our native types format, slicing if desired """ @@ -454,9 +462,10 @@ def eval(self, func, other, raise_on_error = True, try_cast = False): values = values.T is_transposed = True + values, other = self._try_coerce_args(values, other) args = [ values, other ] try: - result = func(*args) + result = self._try_coerce_result(func(*args)) except (Exception), detail: if raise_on_error: raise TypeError('Could not operate [%s] with block values [%s]' @@ -529,8 +538,9 @@ def func(c,v,o): if c.ravel().all(): return v + v, o = self._try_coerce_args(v, o) try: - return expressions.where(c, v, o, raise_on_error=True) + return self._try_coerce_result(expressions.where(c, v, o, raise_on_error=True)) except (Exception), detail: if raise_on_error: raise TypeError('Could not operate [%s] with block values [%s]' @@ -735,6 +745,29 @@ def _try_cast(self, element): except: return element + def _try_coerce_args(self, values, other): + """ provide coercion to our input arguments + we are going to compare vs i8, so coerce to integer + values is always ndarra like, other may not be """ + values = values.view('i8') + if isinstance(other, datetime): + other = lib.Timestamp(other).asm8.view('i8') + elif isnull(other): + other = tslib.iNaT + else: + other = other.view('i8') + + return values, other + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + if isinstance(result, np.ndarray): + if result.dtype == 'i8': + result = tslib.array_to_datetime(result.astype(object).ravel()).reshape(result.shape) + elif isinstance(result, np.integer): + result = lib.Timestamp(result) + return result + def to_native_types(self, slicer=None, na_rep=None, **kwargs): """ convert to our native types format, slicing if desired """ diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 213547c4132b9..5e5b24bb6734d 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -22,6 +22,7 @@ import pandas.core.datetools as datetools from pandas.core.api import (DataFrame, Index, Series, notnull, isnull, MultiIndex, DatetimeIndex, Timestamp, Period) +from pandas import date_range from pandas.io.parsers import read_csv from pandas.util.testing import (assert_almost_equal, @@ -2197,7 +2198,6 @@ def test_constructor_error_msgs(self): self.assert_("Mixing dicts with non-Series may lead to ambiguous ordering." in str(detail)) # wrong size ndarray, GH 3105 - from pandas import date_range try: DataFrame(np.arange(12).reshape((4, 3)), columns=['foo', 'bar', 'baz'], index=date_range('2000-01-01', periods=3)) @@ -2888,7 +2888,6 @@ def test_constructor_with_datetimes(self): assert_series_equal(result, expected) # GH 2809 - from pandas import date_range ind = date_range(start="2000-01-01", freq="D", periods=10) datetimes = [ts.to_pydatetime() for ts in ind] datetime_s = Series(datetimes) @@ -2975,7 +2974,6 @@ def test_constructor_for_list_with_dtypes(self): def test_timedeltas(self): - from pandas import date_range df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')), B = Series([ timedelta(days=i) for i in range(3) ]))) result = df.get_dtype_counts() @@ -3001,7 +2999,6 @@ def test_timedeltas(self): def test_operators_timedelta64(self): - from pandas import date_range from datetime import datetime, timedelta df = DataFrame(dict(A = date_range('2012-1-1', periods=3, freq='D'), B = date_range('2012-1-2', periods=3, freq='D'), @@ -6838,6 +6835,19 @@ def test_where_bug(self): result.where(result > 2, np.nan, inplace=True) assert_frame_equal(result, expected) + def test_where_datetime(self): + + # GH 3311 + df = DataFrame(dict(A = date_range('20130102',periods=5), + B = date_range('20130104',periods=5), + C = np.random.randn(5))) + + stamp = datetime(2013,1,3) + result = df[df>stamp] + expected = df.copy() + expected.loc[[0,1],'A'] = np.nan + assert_frame_equal(result,expected) + def test_mask(self): df = DataFrame(np.random.randn(5, 3)) cond = df > 0
closes #3311
https://api.github.com/repos/pandas-dev/pandas/pulls/3312
2013-04-10T15:01:47Z
2013-04-10T18:25:33Z
2013-04-10T18:25:33Z
2018-07-23T04:16:06Z
TST/CLN: legacy pickle testing
https://github.com/pandas-dev/pandas/pull/3310.diff
CLN: - standardized testing data to live under tests/data (for that set of tests), - added method `util/testing.get_data_path` to give you this path TST: - added `io/tests/test_pickle.py` and `io/tests/generate_legacy_pickles.py` to generate legacy pickle files and provide comparsions just run `python path_to_generate_legacy_pickles.py` it will create a version of pickle for that python version, arch, and system, and store in `pandas/io/tests/data/legacy_pickle/VERSION` - sample pickles for windows-64/2.7/0.10.1 and linux/2.7/0.10.1 and linux/2.7/0.11.0 added - need to run this on various combinations to generate the reference library - allows backwards compatibility testing for these pickles
https://api.github.com/repos/pandas-dev/pandas/pulls/3310
2013-04-10T13:59:18Z
2013-04-12T05:07:46Z
2013-04-12T05:07:46Z
2014-08-06T13:12:24Z
PR: Panel and Panel.from_dict() don't honor ordering when passed OrderedDict
diff --git a/RELEASE.rst b/RELEASE.rst index a5f9a9aad9447..db68a9590f329 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -287,6 +287,7 @@ pandas 0.11.0 - Fix Python ascii file parsing when integer falls outside of floating point spacing (GH3258_) - fixed pretty priniting of sets (GH3294_) + - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (GH3303_) .. _GH3294: https://github.com/pydata/pandas/issues/3294 .. _GH622: https://github.com/pydata/pandas/issues/622 diff --git a/pandas/core/panel.py b/pandas/core/panel.py index d1f87e4e7c932..8e18e93e955ef 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -13,7 +13,7 @@ from pandas.core.index import (Index, MultiIndex, _ensure_index, _get_combined_index) from pandas.core.indexing import _maybe_droplevels, _is_list_like -from pandas.core.internals import (BlockManager, +from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks) from pandas.core.series import Series @@ -274,14 +274,18 @@ def _from_axes(cls, data, axes): return cls(data, **d) def _init_dict(self, data, axes, dtype=None): + from pandas.util.compat import OrderedDict haxis = axes.pop(self._het_axis) # prefilter if haxis passed if haxis is not None: haxis = _ensure_index(haxis) - data = dict((k, v) for k, v in data.iteritems() if k in haxis) + data = OrderedDict((k, v) for k, v in data.iteritems() if k in haxis) else: - haxis = Index(_try_sort(data.keys())) + ks = data.keys() + if not isinstance(data,OrderedDict): + ks = _try_sort(ks) + haxis = Index(ks) for k, v in data.iteritems(): if isinstance(v, dict): @@ -341,11 +345,11 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): ------- Panel """ - from collections import defaultdict + from pandas.util.compat import OrderedDict,OrderedDefaultdict orient = orient.lower() if orient == 'minor': - new_data = defaultdict(dict) + new_data = OrderedDefaultdict(dict) for col, df in data.iteritems(): for item, s in df.iteritems(): new_data[item][col] = s @@ -354,7 +358,10 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): raise ValueError('only recognize items or minor for orientation') d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype) - d[cls._info_axis] = Index(sorted(d['data'].keys())) + ks = d['data'].keys() + if not isinstance(d['data'],OrderedDict): + ks = list(sorted(ks)) + d[cls._info_axis] = Index(ks) return cls(**d) def __getitem__(self, key): @@ -1491,9 +1498,13 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None): ------- dict of aligned results & indicies """ - result = {} + from pandas.util.compat import OrderedDict - adj_frames = {} + result = dict() + if isinstance(frames,OrderedDict): # caller differs dict/ODict, presered type + result = OrderedDict() + + adj_frames = OrderedDict() for k, v in frames.iteritems(): if isinstance(v, dict): adj_frames[k] = self._constructor_sliced(v) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 921097e3408fd..437f8b7279824 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -19,7 +19,9 @@ assert_frame_equal, assert_series_equal, assert_almost_equal, - ensure_clean) + ensure_clean, + makeCustomDataframe as mkdf + ) import pandas.core.panel as panelm import pandas.util.testing as tm @@ -904,6 +906,16 @@ def test_constructor_dict_mixed(self): data['ItemB'] = self.panel['ItemB'].values[:, :-1] self.assertRaises(Exception, Panel, data) + def test_ctor_orderedDict(self): + from pandas.util.compat import OrderedDict + keys = list(set(np.random.randint(0,5000,100)))[:50] # unique random int keys + d = OrderedDict([(k,mkdf(10,5)) for k in keys]) + p = Panel(d) + self.assertTrue(list(p.items) == keys) + + p = Panel.from_dict(d) + self.assertTrue(list(p.items) == keys) + def test_constructor_resize(self): data = self.panel._data items = self.panel.items[:-1] diff --git a/pandas/util/compat.py b/pandas/util/compat.py index 41055f48c2fac..c18044fc6c492 100644 --- a/pandas/util/compat.py +++ b/pandas/util/compat.py @@ -475,3 +475,28 @@ def __and__(self, other): Counter = _Counter else: from collections import OrderedDict, Counter + +# http://stackoverflow.com/questions/4126348 +# Thanks to @martineau at SO + +class OrderedDefaultdict(OrderedDict): + def __init__(self, *args, **kwargs): + newdefault = None + newargs = () + if args: + newdefault = args[0] + if not (newdefault is None or callable(newdefault)): + raise TypeError('first argument must be callable or None') + newargs = args[1:] + self.default_factory = newdefault + super(self.__class__, self).__init__(*newargs, **kwargs) + + def __missing__ (self, key): + if self.default_factory is None: + raise KeyError(key) + self[key] = value = self.default_factory() + return value + + def __reduce__(self): # optional, for pickle support + args = self.default_factory if self.default_factory else tuple() + return type(self), args, None, None, self.items()
#3303
https://api.github.com/repos/pandas-dev/pandas/pulls/3304
2013-04-09T22:19:50Z
2013-04-09T23:03:06Z
2013-04-09T23:03:06Z
2014-06-16T07:12:33Z
Work around mpl 1.2.1 regression re bar log plot GH3298, GH3254
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 51b2676abb0ae..b130aabeb2fe6 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -404,6 +404,21 @@ def test_bar_center(self): ax = df.plot(kind='bar', grid=True) self.assertEqual(ax.xaxis.get_ticklocs()[0], ax.patches[0].get_x() + ax.patches[0].get_width()) + @slow + def test_bar_log(self): + # GH3254, GH3298 matplotlib/matplotlib#1882, #1892 + # regressions in 1.2.1 + + df = DataFrame({'A': [3] * 5, 'B': range(5)}, index=range(5)) + ax = df.plot(kind='bar', grid=True,log=True) + self.assertEqual(ax.yaxis.get_ticklocs()[0],1.0) + + p1 = Series([200,500]).plot(log=True,kind='bar') + p2 = DataFrame([Series([200,300]),Series([300,500])]).plot(log=True,kind='bar',subplots=True) + + (p1.yaxis.get_ticklocs() == np.array([ 0.625, 1.625])) + (p2[0].yaxis.get_ticklocs() == np.array([ 100., 1000.])).all() + (p2[1].yaxis.get_ticklocs() == np.array([ 100., 1000.])).all() @slow def test_boxplot(self): diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 723e581ab2c16..1f879f9ace133 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1325,6 +1325,7 @@ def __init__(self, data, **kwargs): else: self.tickoffset = 0.375 self.bar_width = 0.5 + self.log = kwargs.pop('log',False) MPLPlot.__init__(self, data, **kwargs) def _args_adjust(self): @@ -1335,9 +1336,9 @@ def _args_adjust(self): def bar_f(self): if self.kind == 'bar': def f(ax, x, y, w, start=None, **kwds): - return ax.bar(x, y, w, bottom=start, **kwds) + return ax.bar(x, y, w, bottom=start,log=self.log, **kwds) elif self.kind == 'barh': - def f(ax, x, y, w, start=None, **kwds): + def f(ax, x, y, w, start=None, log=self.log, **kwds): return ax.barh(x, y, w, left=start, **kwds) else: raise NotImplementedError @@ -1354,6 +1355,7 @@ def _get_colors(self): return colors def _make_plot(self): + import matplotlib as mpl colors = self._get_colors() rects = [] labels = [] @@ -1371,10 +1373,15 @@ def _make_plot(self): kwds = self.kwds.copy() kwds['color'] = colors[i % len(colors)] + # default, GH3254 + # I tried, I really did. + start = 0 if mpl.__version__ == "1.2.1" else None if self.subplots: ax = self._get_ax(i) # self.axes[i] - rect = bar_f(ax, self.ax_pos, y, - self.bar_width, **kwds) + + rect = bar_f(ax, self.ax_pos, y, self.bar_width, + start = start, + **kwds) ax.set_title(label) elif self.stacked: mask = y > 0 @@ -1385,6 +1392,7 @@ def _make_plot(self): neg_prior = neg_prior + np.where(mask, 0, y) else: rect = bar_f(ax, self.ax_pos + i * 0.75 / K, y, 0.75 / K, + start = start, label=label, **kwds) rects.append(rect) labels.append(label) @@ -1404,7 +1412,8 @@ def _post_plot_logic(self): ax.set_xticks(self.ax_pos + self.tickoffset) ax.set_xticklabels(str_index, rotation=self.rot, fontsize=self.fontsize) - ax.axhline(0, color='k', linestyle='--') + if not self.log: # GH3254+ + ax.axhline(0, color='k', linestyle='--') if name is not None: ax.set_xlabel(name) else:
#3298, #3254 added tests and Tested visually on 1.2.0 and 1.2.1. y-p's 2nd rule of github - if you see an area in the codebase that you never ever want to touch, you'll end up maintaining it.
https://api.github.com/repos/pandas-dev/pandas/pulls/3300
2013-04-09T19:34:39Z
2013-04-09T20:39:25Z
2013-04-09T20:39:25Z
2013-04-09T20:39:25Z
BUG: DataFrame.hist passes keyword arguments to mpl.hist; closes #3296
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 51b2676abb0ae..eae2ea6bbc086 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -442,6 +442,7 @@ def test_kde(self): @slow def test_hist(self): + import matplotlib.pyplot as plt df = DataFrame(np.random.randn(100, 4)) _check_plot_works(df.hist) _check_plot_works(df.hist, grid=False) @@ -462,7 +463,7 @@ def test_hist(self): # make sure sharex, sharey is handled _check_plot_works(df.hist, sharex=True, sharey=True) - # make sure kwargs are handled + # make sure xlabelsize and xrot are handled ser = df[0] xf, yf = 20, 20 xrot, yrot = 30, 30 @@ -486,6 +487,21 @@ def test_hist(self): self.assertAlmostEqual(xtick.get_fontsize(), xf) self.assertAlmostEqual(xtick.get_rotation(), xrot) + plt.close('all') + # make sure kwargs to hist are handled + ax = ser.hist(normed=True, cumulative=True, bins=4) + # height of last bin (index 5) must be 1.0 + self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0) + + plt.close('all') + ax = ser.hist(log=True) + # scale of y must be 'log' + self.assert_(ax.get_yscale() == 'log') + + plt.close('all') + # propagate attr exception from matplotlib.Axes.hist + self.assertRaises(AttributeError, ser.hist, foo='bar') + @slow def test_scatter(self): _skip_if_no_scipy() @@ -724,6 +740,28 @@ def test_grouped_hist(self): for ax in axes.ravel(): self.assert_(len(ax.patches) > 0) + plt.close('all') + # make sure kwargs to hist are handled + axes = plotting.grouped_hist(df.A, by=df.C, normed=True, + cumulative=True, bins=4) + + # height of last bin (index 5) must be 1.0 + for ax in axes.ravel(): + height = ax.get_children()[5].get_height() + self.assertAlmostEqual(height, 1.0) + + plt.close('all') + axes = plotting.grouped_hist(df.A, by=df.C, log=True) + # scale of y must be 'log' + for ax in axes.ravel(): + self.assert_(ax.get_yscale() == 'log') + + plt.close('all') + # propagate attr exception from matplotlib.Axes.hist + self.assertRaises(AttributeError, plotting.grouped_hist, df.A, + by=df.C, foo='bar') + + def test_option_mpl_style(self): # just a sanity check try: diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 723e581ab2c16..06e4acb1d7dcb 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -645,9 +645,9 @@ def r(h): return ax -def grouped_hist(data, column=None, by=None, ax=None, bins=50, log=False, +def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, layout=None, sharex=False, sharey=False, - rot=90, **kwargs): + rot=90, grid=True, **kwargs): """ Grouped histogram @@ -658,19 +658,20 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, log=False, by: object, optional ax: axes, optional bins: int, default 50 - log: boolean, default False figsize: tuple, optional layout: optional sharex: boolean, default False sharey: boolean, default False rot: int, default 90 + grid: bool, default True + kwargs: dict, keyword arguments passed to matplotlib.Axes.hist Returns ------- axes: collection of Matplotlib Axes """ def plot_group(group, ax): - ax.hist(group.dropna().values, bins=bins) + ax.hist(group.dropna().values, bins=bins, **kwargs) fig, axes = _grouped_plot(plot_group, data, column=column, by=by, sharex=sharex, sharey=sharey,
This is a fix for https://github.com/pydata/pandas/issues/3296 . Now keyword arguments to `plotting.grouped_hist` are passed to `matplotlib.Axes.hist`. This allows you to create e.g. normalized, cumulative, or log scaled histograms:: ``` df = pd.DataFrame(np.random.randn(500, 2), columns=['A', 'B']) df['C'] = np.random.randint(0, 4, 500) df.hist(column=['A'], by=df.C, normed=True) ``` PS: the guideline says I should enable Travis-CI on my fork - is there a tutorial how to do that? thx!
https://api.github.com/repos/pandas-dev/pandas/pulls/3299
2013-04-09T19:15:11Z
2013-04-10T07:01:03Z
2013-04-10T07:01:03Z
2013-04-10T07:01:03Z
WIP: Support metadata at de/serialization time
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0d7913819f115..1bd9b8c55a762 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -450,6 +450,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, raise PandasError('DataFrame constructor not properly called!') NDFrame.__init__(self, mgr) + self.meta=dict() @classmethod def _from_axes(cls, data, axes): @@ -1696,10 +1697,20 @@ def swapaxes(self, i, j): # Picklability def __getstate__(self): - return self._data + return self._data,dict(meta=self.meta) def __setstate__(self, state): # old DataFrame pickle + attrs = {} + if ( isinstance(state, tuple) + and isinstance(state[0],BlockManager) + and isinstance(state[1],dict)): + attrs=state[1] + + # put things back to the prev version and + # reuse the old path + state = state[0] + if isinstance(state, BlockManager): self._data = state elif isinstance(state[0], dict): # pragma: no cover @@ -1711,6 +1722,9 @@ def __setstate__(self, state): # ordinarily created in NDFrame self._item_cache = {} + for k,v in attrs.items(): + setattr(self,k,v) + # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover from pandas.core.common import _unpickle_array diff --git a/pandas/core/panel.py b/pandas/core/panel.py index d1f87e4e7c932..b55dd6d0dc59b 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -238,6 +238,7 @@ def __init__(self, data=None, items=None, major_axis=None, minor_axis=None, self._init_data( data=data, items=items, major_axis=major_axis, minor_axis=minor_axis, copy=copy, dtype=dtype) + self.meta = {} def _init_data(self, data, copy, dtype, **kwargs): """ generate ND initialization; axes are passed as required objects to __init__ """ @@ -706,10 +707,21 @@ def pop(self, item): def __getstate__(self): "Returned pickled representation of the panel" - return self._data + return self._data,dict(meta=self.meta) def __setstate__(self, state): # old Panel pickle + attrs = {} + print( state) + if ( isinstance(state, tuple) + and isinstance(state[0],BlockManager) + and isinstance(state[1],dict)): + attrs = state[1] + + # put things back to the prev version and + # reuse the old path + state = state[0] + if isinstance(state, BlockManager): self._data = state elif len(state) == 4: # pragma: no cover @@ -718,6 +730,9 @@ def __setstate__(self, state): raise ValueError('unrecognized pickle') self._item_cache = {} + for k,v in attrs.items(): + setattr(self,k,v) + def _unpickle_panel_compat(self, state): # pragma: no cover "Unpickle the panel" _unpickle = com._unpickle_array diff --git a/pandas/core/series.py b/pandas/core/series.py index 8427274488cef..52e9193089445 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -509,6 +509,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None, input data copy : boolean, default False """ + self.meta = {} pass @property @@ -539,7 +540,7 @@ def __contains__(self, key): def __reduce__(self): """Necessary for making this object picklable""" object_state = list(ndarray.__reduce__(self)) - subclass_state = (self.index, self.name) + subclass_state = (self.index, dict(name=self.name,meta=self.meta)) object_state[2] = (object_state[2], subclass_state) return tuple(object_state) @@ -548,6 +549,16 @@ def __setstate__(self, state): nd_state, own_state = state ndarray.__setstate__(self, nd_state) + attrs = {} + if len(own_state) > 1 and isinstance(own_state[1],dict): + attrs = own_state[1] + + # and put things back they the previous pickle + # schema worked + own_state = (own_state[0],attrs.get('name')) + + index, dict_or_name = own_state[0], None + # backwards compat index, name = own_state[0], None if len(own_state) > 1: @@ -556,6 +567,9 @@ def __setstate__(self, state): self.index = _handle_legacy_indexes([index])[0] self.name = name + for k,v in attrs.items(): + setattr(self,k,v) + # indexers @property def axes(self): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 213547c4132b9..80f4f0aa23c37 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -27,7 +27,8 @@ from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal, - ensure_clean) + ensure_clean, + makeCustomDataframe as mkdf ) from pandas.util import py3compat from pandas.util.compat import OrderedDict @@ -4616,7 +4617,6 @@ def test_to_csv_from_csv(self): @slow def test_to_csv_moar(self): - from pandas.util.testing import makeCustomDataframe as mkdf path = '__tmp_to_csv_moar__' chunksize=1000 @@ -6021,7 +6021,6 @@ def test_replace_mixed(self): assert_frame_equal(result,expected) # test case from - from pandas.util.testing import makeCustomDataframe as mkdf df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') }) result = df.replace(3, df.mean().to_dict()) expected = df.copy().astype('float64') @@ -9428,6 +9427,20 @@ def test_any_all(self): # df.any(1, bool_only=True) # df.all(1, bool_only=True) + def test_meta_serialization(self): + import pandas as pd + df=mkdf(10,5) + df.meta == {} + # create some kv pairs for serialization + df.meta['Im']="persistent" + # roundtrip + with ensure_clean() as path: + df.save(path) + dfrt =pd.load(path) + + # still here + self.assertEqual(dfrt.meta['Im'],'persistent') + def test_consolidate_datetime64(self): # numpy vstack bug diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 921097e3408fd..55632e9e8424f 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -40,6 +40,21 @@ def test_pickle(self): unpickled = cPickle.loads(pickled) assert_frame_equal(unpickled['ItemA'], self.panel['ItemA']) + def test_meta_serialization(self): + import pandas as pd + + p = self.panel + p.meta = {} + # create some kv pairs for serialization + p.meta['Im']="persistent" + # roundtrip + with ensure_clean() as path: + p.save(path) + prt =pd.load(path) + + # still here + self.assertEqual(prt.meta['Im'],'persistent') + def test_cumsum(self): cumsum = self.panel.cumsum() assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum()) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 4f17135385748..82261027a7878 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -540,6 +540,20 @@ def test_fromDict(self): series = Series(data, dtype=float) self.assert_(series.dtype == np.float64) + def test_meta_serialization(self): + import pandas as pd + s=Series([np.random.randn(100)]) + s.meta == {} + # create some kv pairs for serialization + s.meta['Im']="persistent" + # roundtrip + with ensure_clean() as path: + s.save(path) + srt =pd.load(path) + + # still here + self.assertEqual(srt.meta['Im'],'persistent') + def test_from_json_to_json(self): raise nose.SkipTest
#2485, with much reduced scope. While collecting vbench data recently, it became painfully obvious how useful it would be to be able to attach metadata tags to serialized frames/series/panel. This is a rough draft for something I'd like to see happen in 0.12. It creates yet another version of pickle files, so much testing needs to be done there. There's a plan (#686, timeline unclear) for implementing a binary serialization format for pandas, which will need to replicate this functionality if this makes it in. @jreback, if you have something planned in this direction, I'm glad to withdraw this PR, it's just a statement of intent to prod us into getting something working during the 0.12 release cycle. The design choices I'm going for right now: - **no propagation** semantics through operations, the metadata tags are restored if present, and are guranteed on load, only. Example use case : store measurements as dataframes with data, location, etc'. Then, load a mess of them back up and (generally) you may use the metadata either as column/index labels, or just pick out a subset based on them, sort based on them, etc'. **edit:** Document this very clearly. - **only JSON-able** data allowed in dicts. Allowing anything else would bring us back to the pickle problem, might create issues if #686 gets implemented (difficulty in serializing arbitrary objects in metadata without using pickle), and is generally an unknown quantity. JSON can cover a lot of mileage. letting objects define their own way to be serialized for metadata purposes is reinventing pickle, so no. (JSONable not yet enforced in the code). - TODO: consider the binary blob case. - TODO: if there's no propogation, should the data live on the object at all? ( what about `o, meta = pd.load()`?) - reserved keywords/namespace for pandas' use? - establish naming convention (for reserved kwds or more broadly?) - make it minimal, no setdefault(), save/load -time hooks etc, users should roll their own there. - currently, it's just a dict `.meta` that gets pickled and unpickled with the object. - versioning/schema to be baked in as reserved keywords? the pickle code taught me that much. - The usual "I want tab completion/attr access/call it _foo" discussion to follow. ``` python In [8]: df=mkdf(10,5) ...: df.meta['Im']="persistent" ...: df.save('/tmp/1.pickle') ...: dfrt =pd.load('/tmp/1.pickle') ...: In [9]: dfrt.meta Out[9]: {'Im': 'persistent'} ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3297
2013-04-09T18:24:51Z
2013-05-18T15:05:08Z
null
2014-06-24T19:43:15Z
BUG: fix unsafe dtype conversion in Series
diff --git a/pandas/core/common.py b/pandas/core/common.py index 49d3015a5c871..60d3e86e185fe 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -760,12 +760,27 @@ def _maybe_upcast_putmask(result, mask, other, dtype=None, change=None): def changeit(): - # our type is wrong here, need to upcast + # try to directly set by expanding our array to full + # length of the boolean + om = other[mask] + om_at = om.astype(result.dtype) + if (om == om_at).all(): + new_other = result.values.copy() + new_other[mask] = om_at + result[:] = new_other + return result, False + + # we are forced to change the dtype of the result as the input isn't compatible r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True) np.putmask(r, mask, other) # we need to actually change the dtype here if change is not None: + + # if we are trying to do something unsafe + # like put a bigger dtype in a smaller one, use the smaller one + if change.dtype.itemsize < r.dtype.itemsize: + raise Exception("cannot change dtype of input to smaller size") change.dtype = r.dtype change[:] = r diff --git a/pandas/core/series.py b/pandas/core/series.py index cd9ed90c57a43..8427274488cef 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -739,7 +739,13 @@ def where(self, cond, other=nan, inplace=False): if isinstance(other, Series): other = other.reindex(ser.index) elif isinstance(other, (tuple,list)): - other = np.array(other) + + # try to set the same dtype as ourselves + new_other = np.array(other,dtype=self.dtype) + if not (new_other == np.array(other)).all(): + other = np.array(other) + else: + other = new_other if len(other) != len(ser): icond = ~cond diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index dc036a933e2fe..4f17135385748 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1127,17 +1127,44 @@ def test_where(self): self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]), [0,2,3]) self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]), []) + # unsafe dtype changes + for dtype in [ np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64 ]: + s = Series(np.arange(10), dtype=dtype) + mask = s < 5 + s[mask] = range(2,7) + expected = Series(range(2,7) + range(5,10), dtype=dtype) + assert_series_equal(s, expected) + self.assertEquals(s.dtype, expected.dtype) + + # these are allowed operations, but are upcasted + for dtype in [ np.int64, np.float64 ]: + s = Series(np.arange(10), dtype=dtype) + mask = s < 5 + values = [2.5,3.5,4.5,5.5,6.5] + s[mask] = values + expected = Series(values + range(5,10), dtype='float64') + assert_series_equal(s, expected) + self.assertEquals(s.dtype, expected.dtype) + + # can't do these as we are forced to change the itemsize of the input to something we cannot + for dtype in [ np.int8, np.int16, np.int32, np.float16, np.float32 ]: + s = Series(np.arange(10), dtype=dtype) + mask = s < 5 + values = [2.5,3.5,4.5,5.5,6.5] + self.assertRaises(Exception, s.__setitem__, tuple(mask), values) + # GH3235 s = Series(np.arange(10)) mask = s < 5 - s[mask] = range(5) - expected = Series(np.arange(10),dtype='float64') - assert_series_equal(s,expected) + s[mask] = range(2,7) + expected = Series(range(2,7) + range(5,10)) + assert_series_equal(s, expected) + self.assertEquals(s.dtype, expected.dtype) s = Series(np.arange(10)) mask = s > 5 s[mask] = [0]*4 - expected = Series([0,1,2,3,4,5] + [0]*4,dtype='float64') + expected = Series([0,1,2,3,4,5] + [0]*4) assert_series_equal(s,expected) s = Series(np.arange(10)) @@ -3165,7 +3192,7 @@ def test_cast_on_putmask(self): # need to upcast s = Series([1,2],index=[1,2],dtype='int64') s[[True, False]] = Series([0],index=[1],dtype='int64') - expected = Series([0,2],index=[1,2],dtype='float64') + expected = Series([0,2],index=[1,2],dtype='int64') assert_series_equal(s, expected)
fixes assignment to smaller itemsizes on dtype upcasting
https://api.github.com/repos/pandas-dev/pandas/pulls/3292
2013-04-09T02:06:06Z
2013-04-09T17:18:25Z
2013-04-09T17:18:25Z
2013-04-09T19:44:07Z
BUG: test case showing why assigning to dtype is unsafe
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index dc036a933e2fe..3abcee52f181d 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1127,12 +1127,21 @@ def test_where(self): self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]), [0,2,3]) self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]), []) + + s = Series(np.arange(10), dtype=np.int32) + mask = s < 5 + s[mask] = range(5) + expected = Series(np.arange(10), dtype=np.int32) + assert_series_equal(s, expected) + self.assertEquals(s.dtype, expected.dtype) + # GH3235 s = Series(np.arange(10)) mask = s < 5 s[mask] = range(5) - expected = Series(np.arange(10),dtype='float64') - assert_series_equal(s,expected) + expected = Series(np.arange(10)) + assert_series_equal(s, expected) + self.assertEquals(s.dtype, expected.dtype) s = Series(np.arange(10)) mask = s > 5
@jreback this is a test case that shows why assigning to `dtype` is unsafe, namely that the dtype item sizes have to be compatible obviously =P Could you take a look at this and fix? This came up when I booted the windows box back up because Windows uses int32 by default. There is a secondary bug here that the test case in question probably _should not_ cause the dtype of the resulting Series to change from int64 to float64, agreed?
https://api.github.com/repos/pandas-dev/pandas/pulls/3290
2013-04-09T01:15:53Z
2013-04-09T14:40:02Z
null
2013-05-18T15:13:52Z
BUG: suppress error raise by nonnumeric columns when plotting DataFrame ...
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index f18e862a30293..afe9065b4f9c2 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -234,6 +234,17 @@ def test_plot(self): index=index) _check_plot_works(df.plot, title=u'\u03A3') + @slow + def test_nonnumeric_exclude(self): + import matplotlib.pyplot as plt + plt.close('all') + + df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]}) + ax = df.plot(raise_on_error=False) # it works + self.assert_(len(ax.get_lines()) == 1) #B was plotted + + self.assertRaises(Exception, df.plot) + @slow def test_label(self): import matplotlib.pyplot as plt diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index fc896492527e9..eb118e160e390 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -691,8 +691,10 @@ class MPLPlot(object): """ _default_rot = 0 - _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog'] - _attr_defaults = {'logy': False, 'logx': False, 'loglog': False} + _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog', + 'raise_on_error'] + _attr_defaults = {'logy': False, 'logx': False, 'loglog': False, + 'raise_on_error': True} def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, sharey=False, use_index=True, @@ -1170,17 +1172,27 @@ def _make_plot(self): else: args = (ax, x, y, style) - newline = plotf(*args, **kwds)[0] - lines.append(newline) - leg_label = label - if self.mark_right and self.on_right(i): - leg_label += ' (right)' - labels.append(leg_label) - ax.grid(self.grid) - - if self._is_datetype(): - left, right = _get_xlim(lines) - ax.set_xlim(left, right) + try: + newline = plotf(*args, **kwds)[0] + lines.append(newline) + leg_label = label + if self.mark_right and self.on_right(i): + leg_label += ' (right)' + labels.append(leg_label) + ax.grid(self.grid) + + if self._is_datetype(): + left, right = _get_xlim(lines) + ax.set_xlim(left, right) + except AttributeError as inst: # non-numeric + msg = ('Unable to plot data %s vs index %s,\n' + 'error was: %s' % (str(y), str(x), str(inst))) + if not self.raise_on_error: + print msg + else: + msg = msg + ('\nConsider setting raise_on_error=False' + 'to suppress') + raise Exception(msg) self._make_legend(lines, labels) @@ -1198,6 +1210,24 @@ def to_leg_label(label, i): return label + ' (right)' return label + def _plot(data, col_num, ax, label, style, **kwds): + try: + newlines = tsplot(data, plotf, ax=ax, label=label, + style=style, **kwds) + ax.grid(self.grid) + lines.append(newlines[0]) + leg_label = to_leg_label(label, col_num) + labels.append(leg_label) + except AttributeError as inst: #non-numeric + msg = ('Unable to plot %s,\n' + 'error was: %s' % (str(data), str(inst))) + if not self.raise_on_error: + print msg + else: + msg = msg + ('\nConsider setting raise_on_error=False' + 'to suppress') + raise Exception(msg) + if isinstance(data, Series): ax = self._get_ax(0) # self.axes[0] style = self.style or '' @@ -1205,12 +1235,7 @@ def to_leg_label(label, i): kwds = kwargs.copy() self._maybe_add_color(colors, kwds, style, 0) - newlines = tsplot(data, plotf, ax=ax, label=label, - style=self.style, **kwds) - ax.grid(self.grid) - lines.append(newlines[0]) - leg_label = to_leg_label(label, 0) - labels.append(leg_label) + _plot(data, 0, ax, label, self.style, **kwds) else: for i, col in enumerate(data.columns): label = com.pprint_thing(col) @@ -1220,13 +1245,7 @@ def to_leg_label(label, i): self._maybe_add_color(colors, kwds, style, i) - newlines = tsplot(data[col], plotf, ax=ax, label=label, - style=style, **kwds) - - lines.append(newlines[0]) - leg_label = to_leg_label(label, i) - labels.append(leg_label) - ax.grid(self.grid) + _plot(data[col], i, ax, label, style, **kwds) self._make_legend(lines, labels) diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index 968883ca64afd..5d7dc880b2868 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -80,6 +80,26 @@ def test_frame_inferred(self): df = DataFrame(np.random.randn(len(idx), 3), index=idx) df.plot() + @slow + def test_nonnumeric_exclude(self): + import matplotlib.pyplot as plt + plt.close('all') + + idx = date_range('1/1/1987', freq='A', periods=3) + df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]}, idx) + self.assertRaises(Exception, df.plot) + + plt.close('all') + ax = df.plot(raise_on_error=False) # it works + self.assert_(len(ax.get_lines()) == 1) #B was plotted + + plt.close('all') + self.assertRaises(Exception, df.A.plot) + + plt.close('all') + ax = df['A'].plot(raise_on_error=False) # it works + self.assert_(len(ax.get_lines()) == 0) + @slow def test_tsplot(self): from pandas.tseries.plotting import tsplot
...#3108 suppressing AttributeError here so you can plot the DataFrame and automatically exclude non-numeric columns. @y-p @wesm @jreback any thoughts on 1) whether it should raise instead of exclude and 2) whether catching the AttributeError here is too dangerously broad?
https://api.github.com/repos/pandas-dev/pandas/pulls/3287
2013-04-08T16:16:53Z
2013-04-12T04:14:13Z
2013-04-12T04:14:13Z
2014-07-02T01:57:24Z
ENH/BUG: GH3283 allow pivot tables to downcast the output (e.g. float -> int) if possible
diff --git a/RELEASE.rst b/RELEASE.rst index 7b36b11371737..a02fcd1e3f83d 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -171,6 +171,8 @@ pandas 0.11.0 - added the method ``select_column`` to select a single column from a table as a Series. - deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` + - Downcast on pivot if possible (GH3283_), adds argument ``downcast`` to ``fillna`` + **Bug Fixes** - Fix seg fault on empty data frame when fillna with ``pad`` or ``backfill`` @@ -376,6 +378,7 @@ pandas 0.11.0 .. _GH3222: https://github.com/pydata/pandas/issues/3222 .. _GH2641: https://github.com/pydata/pandas/issues/2641 .. _GH3238: https://github.com/pydata/pandas/issues/3238 +.. _GH3283: https://github.com/pydata/pandas/issues/3283 pandas 0.10.1 ============= diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8bc7bfe12aaee..0d7913819f115 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3231,7 +3231,7 @@ def reorder_levels(self, order, axis=0): # Filling NA's def fillna(self, value=None, method=None, axis=0, inplace=False, - limit=None): + limit=None, downcast=None): """ Fill NA/NaN values using the specified method @@ -3255,6 +3255,8 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, a reference to the filled object, which is self if inplace=True limit : int, default None Maximum size gap to forward or backward fill + downcast : dict, default is None, a dict of item->dtype of what to + downcast if possible See also -------- @@ -3300,7 +3302,8 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, result[k].fillna(v, inplace=True) return result else: - new_data = self._data.fillna(value, inplace=inplace) + new_data = self._data.fillna(value, inplace=inplace, + downcast=downcast) if inplace: self._data = new_data diff --git a/pandas/core/internals.py b/pandas/core/internals.py index a47d747216f49..591e8adbe5c4a 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -28,6 +28,7 @@ class Block(object): is_bool = False is_object = False _can_hold_na = False + _downcast_dtype = None def __init__(self, values, items, ref_items, ndim=2): if issubclass(values.dtype.type, basestring): @@ -205,7 +206,7 @@ def split_block_at(self, item): self.items[s:e].copy(), self.ref_items) - def fillna(self, value, inplace=False): + def fillna(self, value, inplace=False, downcast=None): if not self._can_hold_na: if inplace: return self @@ -216,10 +217,32 @@ def fillna(self, value, inplace=False): mask = com.isnull(new_values) np.putmask(new_values, mask, value) - if inplace: - return self - else: - return make_block(new_values, self.items, self.ref_items) + block = make_block(new_values, self.items, self.ref_items) + if downcast: + block = block.downcast() + return block + + def downcast(self, dtypes = None): + """ try to downcast each item to the dict of dtypes if present """ + + if dtypes is None: + dtypes = dict() + + values = self.values + blocks = [] + for i, item in enumerate(self.items): + + dtype = dtypes.get(item,self._downcast_dtype) + if dtype is None: + nv = _block_shape(values[i]) + blocks.append(make_block(nv, [ item ], self.ref_items)) + continue + + nv = _possibly_downcast_to_dtype(values[i], np.dtype(dtype)) + nv = _block_shape(nv) + blocks.append(make_block(nv, [ item ], self.ref_items)) + + return blocks def astype(self, dtype, copy = True, raise_on_error = True): """ @@ -563,6 +586,7 @@ def _try_cast_result(self, result): return _possibly_downcast_to_dtype(result, self.dtype) class FloatBlock(NumericBlock): + _downcast_dtype = 'int64' def _can_hold_element(self, element): if isinstance(element, np.ndarray): @@ -974,6 +998,9 @@ def shift(self, *args, **kwargs): def fillna(self, *args, **kwargs): return self.apply('fillna', *args, **kwargs) + def downcast(self, *args, **kwargs): + return self.apply('downcast', *args, **kwargs) + def astype(self, *args, **kwargs): return self.apply('astype', *args, **kwargs) diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index bed1fe2212746..d920df1ca867a 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -111,7 +111,7 @@ def pivot_table(data, values=None, rows=None, cols=None, aggfunc='mean', table = table.sort_index(axis=1) if fill_value is not None: - table = table.fillna(value=fill_value) + table = table.fillna(value=fill_value, downcast=True) if margins: table = _add_margins(table, data, values, rows=rows, diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py index 0e50d606d6e7e..e9383e26f148a 100644 --- a/pandas/tools/tests/test_pivot.py +++ b/pandas/tools/tests/test_pivot.py @@ -69,6 +69,26 @@ def test_pivot_table_multiple(self): expected = self.data.groupby(rows + [cols]).agg(np.mean).unstack() tm.assert_frame_equal(table, expected) + def test_pivot_dtypes(self): + + # can convert dtypes + f = DataFrame({'a' : ['cat', 'bat', 'cat', 'bat'], 'v' : [1,2,3,4], 'i' : ['a','b','a','b']}) + self.assert_(f.dtypes['v'] == 'int64') + + z = pivot_table(f, values='v', rows=['a'], cols=['i'], fill_value=0, aggfunc=np.sum) + result = z.get_dtype_counts() + expected = Series(dict(int64 = 2)) + tm.assert_series_equal(result, expected) + + # cannot convert dtypes + f = DataFrame({'a' : ['cat', 'bat', 'cat', 'bat'], 'v' : [1.5,2.5,3.5,4.5], 'i' : ['a','b','a','b']}) + self.assert_(f.dtypes['v'] == 'float64') + + z = pivot_table(f, values='v', rows=['a'], cols=['i'], fill_value=0, aggfunc=np.mean) + result = z.get_dtype_counts() + expected = Series(dict(float64 = 2)) + tm.assert_series_equal(result, expected) + def test_pivot_multi_values(self): result = pivot_table(self.data, values=['D', 'E'], rows='A', cols=['B', 'C'], fill_value=0)
closes #3283
https://api.github.com/repos/pandas-dev/pandas/pulls/3286
2013-04-08T14:43:28Z
2013-04-08T15:32:30Z
2013-04-08T15:32:30Z
2014-06-20T01:23:28Z
ENH: Allow read_frame to use parameterized queries
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 0caf83838fb2d..3002f2f620f5e 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -22,6 +22,15 @@ def execute(sql, con, retry=True, cur=None, params=None): ---------- sql: string Query to be executed + con: database connection instance + Database connection. Must implement PEP249 (Database API v2.0). + retry: bool + Not currently implemented + cur: database cursor, optional + Must implement PEP249 (Datbase API v2.0). If cursor is not provided, + one will be obtained from the database connection. + params: list or tuple, optional + List of parameters to pass to execute method. Returns ------- @@ -102,7 +111,7 @@ def tquery(sql, con=None, cur=None, retry=True): return result -def uquery(sql, con=None, cur=None, retry=True, params=()): +def uquery(sql, con=None, cur=None, retry=True, params=None): """ Does the same thing as tquery, but instead of returning results, it returns the number of rows affected. Good for update queries. @@ -124,7 +133,7 @@ def uquery(sql, con=None, cur=None, retry=True, params=()): return result -def read_frame(sql, con, index_col=None, coerce_float=True): +def read_frame(sql, con, index_col=None, coerce_float=True, params=None): """ Returns a DataFrame corresponding to the result set of the query string. @@ -139,8 +148,10 @@ def read_frame(sql, con, index_col=None, coerce_float=True): con: DB connection object, optional index_col: string, optional column name to use for the returned DataFrame object. + params: list or tuple, optional + List of parameters to pass to execute method. """ - cur = execute(sql, con) + cur = execute(sql, con, params=params) rows = _safe_fetch(cur) columns = [col_desc[0] for col_desc in cur.description]
Added params argument to read_frame that is passed directly to the execute method. This allows users to use a parameterized query rather than having to pre-format the SQL themselves.
https://api.github.com/repos/pandas-dev/pandas/pulls/3285
2013-04-08T14:38:43Z
2013-04-08T16:54:21Z
2013-04-08T16:54:20Z
2013-04-08T16:54:29Z
ENH: add to_series() method to Index and subclasses GH3275
diff --git a/RELEASE.rst b/RELEASE.rst index 46f7c832ae149..291f1f8bf0f33 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -49,6 +49,8 @@ pandas 0.11.0 - Added support for expression evaluation using the ``numexpr`` library - Added ``convert=boolean`` to ``take`` routines to translate negative indices to positive, defaults to True + - Added to_series() method to indices, to facilitate the creation of indexeres + (GH3275_) **Improvements to existing features** @@ -277,6 +279,7 @@ pandas 0.11.0 .. _GH1893: https://github.com/pydata/pandas/issues/1893 .. _GH1978: https://github.com/pydata/pandas/issues/1978 .. _GH2758: https://github.com/pydata/pandas/issues/2758 +.. _GH3275: https://github.com/pydata/pandas/issues/3275 .. _GH2121: https://github.com/pydata/pandas/issues/2121 .. _GH3247: https://github.com/pydata/pandas/issues/3247 .. _GH2809: https://github.com/pydata/pandas/issues/2809 diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt index c6553b909f7a6..331bbfaf1a2e8 100644 --- a/doc/source/v0.11.0.txt +++ b/doc/source/v0.11.0.txt @@ -226,6 +226,9 @@ Astype conversion on ``datetime64[ns]`` to ``object``, implicity converts ``NaT` API changes ~~~~~~~~~~~ + - Added to_series() method to indicies, to facilitate the creation of indexers + (GH3275_) + - In ``HDFStore``, added the method ``select_column`` to select a single column from a table as a Series. - In ``HDFStore``, deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` @@ -343,6 +346,7 @@ on GitHub for a complete list. .. _GH2807: https://github.com/pydata/pandas/issues/2807 .. _GH2918: https://github.com/pydata/pandas/issues/2918 .. _GH2758: https://github.com/pydata/pandas/issues/2758 +.. _GH3275: https://github.com/pydata/pandas/issues/3275 .. _GH2979: https://github.com/pydata/pandas/issues/2979 .. _GH3011: https://github.com/pydata/pandas/issues/3011 .. _GH3076: https://github.com/pydata/pandas/issues/3076 diff --git a/pandas/core/index.py b/pandas/core/index.py index ac352eef4acfe..aa0fd5b6b0351 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -188,6 +188,14 @@ def __repr__(self): """ return str(self) + def to_series(self): + """ + return a series with both index and values equal to the index keys + useful with map for returning an indexer based on an index + """ + import pandas as pd + return pd.Series(self.values,index=self,name=self.name) + def astype(self, dtype): return Index(self.values.astype(dtype), name=self.name, dtype=dtype)
#3275 ``` python In [10]: df=mkdf(10,5,r_idx_nlevels=2) In [11]: df.index Out[11]: MultiIndex [R_l0_g0 R_l1_g0, R_l0_g1 R_l1_g1, R_l0_g2 R_l1_g2, R_l0_g3 R_l1_g3, R_l0_g4 R_l1_g4, R_l0_g5 R_l1_g5, R_l0_g6 R_l1_g6, R_l0_g7 R_l1_g7, R_l0_g8 R_l1_g8, R_l0_g9 R_l1_g9] In [12]: # new method ...: df.index.to_series() Out[12]: R0 R1 R_l0_g0 R_l1_g0 (R_l0_g0, R_l1_g0) R_l0_g1 R_l1_g1 (R_l0_g1, R_l1_g1) R_l0_g2 R_l1_g2 (R_l0_g2, R_l1_g2) R_l0_g3 R_l1_g3 (R_l0_g3, R_l1_g3) R_l0_g4 R_l1_g4 (R_l0_g4, R_l1_g4) R_l0_g5 R_l1_g5 (R_l0_g5, R_l1_g5) R_l0_g6 R_l1_g6 (R_l0_g6, R_l1_g6) R_l0_g7 R_l1_g7 (R_l0_g7, R_l1_g7) R_l0_g8 R_l1_g8 (R_l0_g8, R_l1_g8) R_l0_g9 R_l1_g9 (R_l0_g9, R_l1_g9) dtype: object In [13]: # so now this is possible ...: df[df.index.to_series().map(lambda x: "4" in x[0])] Out[13]: C0 C_l0_g0 C_l0_g1 C_l0_g2 C_l0_g3 C_l0_g4 R0 R1 R_l0_g4 R_l1_g4 R4C0 R4C1 R4C2 R4C3 R4C4 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3280
2013-04-08T11:29:06Z
2013-04-09T20:05:09Z
2013-04-09T20:05:09Z
2014-06-21T14:58:26Z
API: raise TypeError out of GroupBy.agg
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index cb0a03d306c53..1954321e55a81 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -438,7 +438,7 @@ def _try_cast(self, result, obj): dtype = obj.dtype if _is_numeric_dtype(dtype): - + # need to respect a non-number here (e.g. Decimal) if len(result) and issubclass(type(result[0]),(np.number,float,int)): result = _possibly_downcast_to_dtype(result, dtype) @@ -494,7 +494,7 @@ def _python_agg_general(self, func, *args, **kwargs): values = result if _is_numeric_dtype(values.dtype): values = com.ensure_float(values) - + output[name] = self._try_cast(values[mask],result) return self._wrap_aggregated_output(output) @@ -794,7 +794,7 @@ def wrapper(*args, **kwargs): # need to curry our sub-function func = wrapper - + else: func = get_func(ftype) @@ -1778,7 +1778,7 @@ def _aggregate_item_by_item(self, func, *args, **kwargs): colg = SeriesGroupBy(obj[item], selection=item, grouper=self.grouper) result[item] = colg.aggregate(func, *args, **kwargs) - except (ValueError, TypeError): + except ValueError: cannot_agg.append(item) continue @@ -1917,7 +1917,7 @@ def transform(self, func, *args, **kwargs): # if we make it here, test if we can use the fast path try: res_fast = fast_path(group) - + # compare that we get the same results if res.shape == res_fast.shape: res_r = res.values.ravel() @@ -1925,7 +1925,7 @@ def transform(self, func, *args, **kwargs): mask = notnull(res_r) if (res_r[mask] == res_fast_r[mask]).all(): path = fast_path - + except: pass except TypeError: diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 7aad2e0b734b1..3ca0c2d08c99f 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -121,7 +121,7 @@ def checkit(dtype): agged = grouped.aggregate([np.mean, np.std]) agged = grouped.aggregate({'one': np.mean, 'two': np.std}) - + group_constants = { 0: 10, 1: 20, @@ -425,10 +425,11 @@ def test_aggregate_item_by_item(self): df['E'] = ['a'] * len(self.df) grouped = self.df.groupby('A') - def aggfun(ser): - return len(ser + 'a') - result = grouped.agg(aggfun) - self.assertEqual(len(result.columns), 1) + # API change in 0.11 + # def aggfun(ser): + # return len(ser + 'a') + # result = grouped.agg(aggfun) + # self.assertEqual(len(result.columns), 1) aggfun = lambda ser: ser.size result = grouped.agg(aggfun) @@ -444,6 +445,19 @@ def aggfun(ser): self.assert_(isinstance(result, DataFrame)) self.assertEqual(len(result), 0) + def test_agg_item_by_item_raise_typeerror(self): + from numpy.random import randint + + df = DataFrame(randint(10, size=(20, 10))) + + def raiseException(df): + print '----------------------------------------' + print df.to_string() + raise TypeError + + self.assertRaises(TypeError, df.groupby(0).agg, + raiseException) + def test_basic_regression(self): # regression T = [1.0 * x for x in range(1, 10) * 10][:1095] @@ -614,7 +628,7 @@ def f3(x): df = DataFrame({'a':[1,2,2,2], 'b':range(4), 'c':range(5,9)}) - + df2 = DataFrame({'a':[3,2,2,2], 'b':range(4), 'c':range(5,9)}) @@ -624,7 +638,7 @@ def f3(x): result1 = df.groupby('a').apply(f1) result2 = df2.groupby('a').apply(f1) assert_frame_equal(result1, result2) - + # should fail (not the same number of levels) self.assertRaises(AssertionError, df.groupby('a').apply, f2) self.assertRaises(AssertionError, df2.groupby('a').apply, f2)
Fixes #3238
https://api.github.com/repos/pandas-dev/pandas/pulls/3277
2013-04-08T06:51:51Z
2013-04-08T07:12:24Z
2013-04-08T07:12:24Z
2014-08-12T07:30:23Z
WIP: add df.dgrep, df.neighbours
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt index 9c0a6d5a421c7..5e272fcb1fa48 100644 --- a/doc/source/v0.11.0.txt +++ b/doc/source/v0.11.0.txt @@ -233,6 +233,12 @@ API changes - In ``HDFStore``, deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` + - an experimental new df.dgrep() for selecting rows by applying regex/predicate function to *data* values + can be made available via the "sandbox.dgrep" option. Note that this is not an official part of the API yet + so is subject to breaking change. Feedback is welcome (GH2460_). + +.. _GH2460: https://github.com/pydata/pandas/issues/2460 + Enhancements ~~~~~~~~~~~~ diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 9f599ffe908ba..2cb6abe154f5c 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -241,3 +241,76 @@ def use_inf_as_null_cb(key): with cf.config_prefix('mode'): cf.register_option('use_inf_as_null', False, use_inf_as_null_doc, cb=use_inf_as_null_cb) + + +sb_xp_dgrep_doc = """ +: boolean + Enables the experimental df.dgrep() method, for selecting series/DataFrame + rows by regex/predicate match against *data* values. + + Features exposed via the sandbox are subject to change or removal, and are not + yet part of the official API. + +""" + +def sandbox(gh_issue_num,msg=None): + def inner(cb): + def f(key): + + s = """ +This is an experimental feature being considered for inclusion in pandas core. +We'd appreciate your feedback on it in the Github issue page: + + http://github.com/pydata/pandas/issues/%d + +If you find this useful, lacking in major functionality or buggy please +take a moment to let us know, so we can make pandas (even) better. + +Thank you, + +The Pandas dev team + +""" % gh_issue_num + + if msg: + s += "P.S.\n\n" + msg + + # don't print( the msessage on turn off + val = cf.get_option(key) + if val: + print(s) + + return cb(key) + + return f + return inner + +@sandbox(3276,msg=""" +Series/DataFrame now have the `dgrep` and `neighbours` methods. +See the docstrings for usage examples. +""") +def xp_dgrep_cb(key): + import pandas + val = cf.get_option(key) + if val: + from pandas.sandbox.dgrep import dgrep,neighbours + pandas.DataFrame.dgrep = dgrep + pandas.Series.dgrep = dgrep + pandas.DataFrame.neighbours = neighbours + pandas.Series.neighbours = neighbours + + else: + try: + del pandas.DataFrame.dgrep + del pandas.DataFrame.context + except: + pass + try: + del pandas.series.dgrep + del pandas.series.context + except: + pass + +with cf.config_prefix('sandbox'): + cf.register_option('dgrep', False, sb_xp_dgrep_doc, + validator=is_bool, cb=xp_dgrep_cb) diff --git a/pandas/sandbox/dgrep/__init__.py b/pandas/sandbox/dgrep/__init__.py new file mode 100644 index 0000000000000..ea69c317426ad --- /dev/null +++ b/pandas/sandbox/dgrep/__init__.py @@ -0,0 +1,141 @@ +import numpy as np +# TODO, add axis argument +def dgrep(self,pred,cols=None,C=0,B=0,A=0,split=False,keys=True): + """Select rows by regex match or predicate function, against *data*. + + This is an unindexed operation, and is substantially slower then + index-based selection for large datasets. + + cols : string or sequence of str + name or sequence of column named if running against a DataFrame, + ignored otherwise. + pred : string regex or f(val) -> bool or value to test equality against. + + if the predicate function expects *args or multiple unnamed + arguments, the row values for the specified columns will be passed + in to the the predicate function as a list, one call per row. + + A/B,C : int, grep-like argument, context lines (A)fter/(B)efore or (C)entered (C)ontext + split: bool , False returns a slice of the current object, if context lines overlap + between matches, they will only appear once. a True value will return + a list of frames or (matched_index_label, self_sliced) pairs (default), + depending on the the value of `keys`. Similar to the groupby API. + keys: bool, if split==True, keys=False will make the function return + a list of frames, rather then a list of (label, dataframe) pairs. + + Usage examples: + + from pandas.util.testing import makeCustomDataframe as mkdf + + df=mkdf(30,4,r_idx_nlevels=3) + df.index=range(30) + df.iloc[5,0] = "supercool" + df.iloc[6,0] = "supercool" + df.iloc[29,0] = "supercool" + df.iloc[15,1] = "supercool" + df.iloc[17,2] = "supercool" + # accepts colname and regex string + df.dgrep(".cool$","C_l0_g0") + + df.dgrep(".cool$",["C_l0_g0","C_l0_g1"]) + # specifying C=2 (or A/B=) does a grep context , providing + # context lines around the hit + # NB overlapping context lines do not cause line duplication (*) + df.dgrep(".cool$",["C_l0_g0"],C=2) + # also accepts lambda + # NB, last match is at end, so only previous line of context displayed + df.dgrep(lambda x: bool(re.search(".cool$",x)),["C_l0_g0"],C=3) + # split=True returns a series of (index_label_matched, dataframe) + # pairs, similar to groupby + # NB some lines appear in more then one group in this case (*) + df.dgrep(".cool$",["C_l0_g0"],split=True,C=3) + + # works on series too + df.C_l0_g0.dgrep(".cool$",C=3) + + # can also get the values "applied" onto the function + # TODO?: df.dgrep(lambda c1,c2: "cool" in c1 or "cool" in c2,df.columns[:2]) + + # which also works with *args + df.dgrep(lambda *args: "supercool" in args,df.columns[:3]) + """ + from pandas import DataFrame + from pandas.core.common import _is_sequence + import inspect + + if _is_sequence(cols): + cols = list(cols) # convert index to list, from slice such as df.columns[:3] + if not isinstance(cols,(list,tuple)): + cols = [cols] + + combine=False + if callable(pred): + fargs=inspect.getargspec(pred) + if fargs.varargs: + combine=True + + # elif len(fargs.args) > 1: + # if len(fargs.args) != len(cols): + # raise ValueError("predicate function argcount doesn't match num. of cols") + # combine=True + + elif isinstance(pred,basestring): + import re + _pat = pred + matcher = re.compile(_pat) + def f1(x): + return bool(matcher.search(unicode(x))) + pred=f1 + else: # can also match non-string values by equality + def f2(x): + return x == pred + pred=f2 + + indicies = set() + if isinstance(self,DataFrame): + if combine: + vals = self.ix[cols].apply(pred).sum(1) + indicies.update(np.where(vals)[0].tolist()) + + else: + for col in cols: + # print np.where(self[col].apply(pred)) + vals = np.where(self[col].apply(pred))[0] + indicies.update(vals.tolist()) + else: + + indicies.update(np.where(self.apply(pred))[0].tolist()) + + return self.neighbours(self.index[list(sorted(indicies))],C=C,B=B,A=A,split=split,keys=keys) + +def neighbours(self,labels,C=None,B=None,A=None,split=False,keys=True): + """Takes a list of labels and return one ore more frame/series with the indicated + rows + surrounding rows as determined by the (A)fter/(B)efore or + (C)entered (C)ontext. + + see the `dgrep` docstring for more details about the identical arguments. + + """ + if C: + B = C//2 + A = C-B-1 + + indicies = map(self.index.get_loc,labels) + if split: + #list of (hit_label,sliced frame) + def g(x): + return (x,range(max(0,x-B),min(x+A+1,len(self.index)))) + + indicies_grps = map(g,indicies) + results = [] + for i,indicies in indicies_grps: + if keys: + results.append((self.index[i],self.iloc[indicies])) + else: + results.append(self.iloc[indicies]) + return results + else: + indicies=reduce(lambda acc,x: acc+range(max(0,x-B),min(x+A+1,len(self.index))), + indicies,[]) + # there's just one, and return just the sliced frame, not the hit label + return self.iloc[sorted(set(indicies))] diff --git a/pandas/sandbox/dgrep/test_dgrep.py b/pandas/sandbox/dgrep/test_dgrep.py new file mode 100644 index 0000000000000..d2944890f7fb0 --- /dev/null +++ b/pandas/sandbox/dgrep/test_dgrep.py @@ -0,0 +1,68 @@ +# pylint: disable-msg=W0612,E1101 + +import unittest +import nose + +from pandas.util.testing import assert_series_equal + +class TestDgrep(unittest.TestCase): + def test_dgrep(self): + import pandas as pd + from pandas import Series as Series + from pandas.util.testing import makeCustomDataframe as mkdf + + import re + pd.options.sandbox.dgrep=True # turn it on + df=mkdf(30,4,r_idx_nlevels=3) + df.index=range(30) + df.iloc[5,0] = "supercool" + df.iloc[6,0] = "supercool" + df.iloc[29,0] = "supercool" + df.iloc[15,1] = "supercool" + df.iloc[17,2] = "supercool" + # accepts colname and regex string + rs = df.dgrep(".cool$","C_l0_g0") + assert_series_equal(rs.C_l0_g0,Series(["supercool"]*3,index=[5,6,29])) + # accepts lists of cols, can include a series such as df.series_name + # (convenient for tab completion on columns) + rs = df.dgrep(".cool$",['C_l0_g0','C_l0_g1']) + xp = Series(["supercool","supercool","R15C0","supercool"],index=[5,6,15,29]) + assert_series_equal(rs.C_l0_g0,xp) + self.assertEqual(rs.iloc[2,1],"supercool") + + # accepts a single named series + rs = df.dgrep(".cool$",'C_l0_g1') + xp = Series(["supercool"],index=[15]) + assert_series_equal(rs.C_l0_g1,xp) + + + # specifying C=2 (or A/B=) does a grep context , providing + # context lines around the hit + # NB overlapping context lines do not cause line duplication (*) + rs = df.dgrep(".cool$",["C_l0_g0"],C=2) + xp = Series(['R4C0', 'supercool', 'supercool', 'R28C0', 'supercool'],index=[4,5,6,28,29]) + assert_series_equal(rs.C_l0_g0,xp) + + # also accepts lambda + # NB, last match is at end, so only previous line of context displayed + rs=df.dgrep(lambda x: bool(re.search(".cool$",x)),["C_l0_g0"],C=3) + xp = Series(['R4C0', 'supercool', 'supercool', 'R7C0', 'R28C0', 'supercool'],index=[4,5,6,7,28,29]) + assert_series_equal(xp,rs.C_l0_g0) + # split=True returns a series of (index_label_matched, dataframe) + # pairs, similar to groupby + # NB some lines appear in more then one group in this case (*) + rs = df.dgrep(".cool$",["C_l0_g0"],split=True,C=3) + self.assertEqual(len(rs),3) + xp = Series(['R4C0', 'supercool', 'supercool'],index=[4,5,6]) + assert_series_equal(xp,rs[0][1].C_l0_g0) + + # works on series too + s = df.C_l0_g0.dgrep(".cool$",C=3) + xp = Series(['R4C0', 'supercool', 'supercool', 'R7C0', 'R28C0', 'supercool'],index=[4,5,6,7,28,29]) + assert_series_equal(xp,s) + + # can also get the values "applied" onto the function + # TODO?: df.dgrep(lambda c1,c2: "cool" in c1 or "cool" in c2,df.columns[:2]) + + # which also works with *args + df.dgrep(lambda *args: any(["supercool" in x for x in args]),df.columns[:3])
#2460 partly fulfilles #3269, should be extended to an equivalent method on index labels? (vectorized? numexpr?). Just experimenting with the API, performance later (as much as possible with unindexed data). - [x] need to add option to interpret multiple columns as a predicate expecting a list of values corresponding to the row values on the specified columns, to allow for boolean expressions across columns, rather then single value at a time. - [x] tests - [ ] make lazy like groupby? - [ ] need to add `axis` argument, per jeff's suggestion. - [ ] documentation. ``` python In [7]: pd.options.sandbox.dgrep=True This is an experimental feature being considered for inclusion in pandas core. We'd appreciate your feedback on it in the Github issue page: http://github.com/pydata/pandas/issues/2460 If you find this useful, lacking in major functionality or buggy please take a moment to let us know, so we can make pandas (even) better. Thank you, The Pandas dev team P.S. Series/DataFrame now have a .dgrep method. See the docstring for usage examples. In [8]: df=mkdf(30,4,r_idx_nlevels=3) ...: df.index=range(30) ...: df.iloc[5,0] = "supercool" ...: df.iloc[6,0] = "supercool" ...: df.iloc[29,0] = "supercool" ...: df.iloc[15,1] = "supercool" ...: # accepts colname and regex string ...: print "\n" + str(df.dgrep(".cool$","C_l0_g0")) ...: # accepts lists of cols, ...: print "\n" + str(df.dgrep(".cool$",["C_l0_g0",'C_l0_g1'])) ...: # specifying C=2 (or A/B=) does a grep context , providing ...: # context lines around the hit ...: # NB overlapping context lines do not cause line duplication (*) ...: print "\n" + str(df.dgrep(".cool$",["C_l0_g0"],C=2)) ...: # also accepts lambda ...: # NB, last match is at end, so only previous line of context displayed ...: print "\n" + str(df.dgrep(lambda x: bool(re.search(".cool$",x)),["C_l0_g0"],C=3)) ...: # split=True returns a series of (index_label_matched, dataframe) ...: # pairs, similar to groupby ...: # NB some lines appear in more then one group in this case (*) ...: print "\n" + "\n".join(map(str,df.dgrep(".cool$",["C_l0_g0"],split=True,C=3))) ...: ...: # works on series too ...: print "\n" + str(df.C_l0_g0.dgrep(".cool$",C=3)) C0 C_l0_g0 C_l0_g1 C_l0_g2 C_l0_g3 5 supercool R5C1 R5C2 R5C3 6 supercool R6C1 R6C2 R6C3 29 supercool R29C1 R29C2 R29C3 C0 C_l0_g0 C_l0_g1 C_l0_g2 C_l0_g3 5 supercool R5C1 R5C2 R5C3 6 supercool R6C1 R6C2 R6C3 15 R15C0 supercool R15C2 R15C3 29 supercool R29C1 R29C2 R29C3 C0 C_l0_g0 C_l0_g1 C_l0_g2 C_l0_g3 4 R4C0 R4C1 R4C2 R4C3 5 supercool R5C1 R5C2 R5C3 6 supercool R6C1 R6C2 R6C3 28 R28C0 R28C1 R28C2 R28C3 29 supercool R29C1 R29C2 R29C3 C0 C_l0_g0 C_l0_g1 C_l0_g2 C_l0_g3 4 R4C0 R4C1 R4C2 R4C3 5 supercool R5C1 R5C2 R5C3 6 supercool R6C1 R6C2 R6C3 7 R7C0 R7C1 R7C2 R7C3 28 R28C0 R28C1 R28C2 R28C3 29 supercool R29C1 R29C2 R29C3 (5, C0 C_l0_g0 C_l0_g1 C_l0_g2 C_l0_g3 4 R4C0 R4C1 R4C2 R4C3 5 supercool R5C1 R5C2 R5C3 6 supercool R6C1 R6C2 R6C3) (6, C0 C_l0_g0 C_l0_g1 C_l0_g2 C_l0_g3 5 supercool R5C1 R5C2 R5C3 6 supercool R6C1 R6C2 R6C3 7 R7C0 R7C1 R7C2 R7C3) (29, C0 C_l0_g0 C_l0_g1 C_l0_g2 C_l0_g3 28 R28C0 R28C1 R28C2 R28C3 29 supercool R29C1 R29C2 R29C3) 4 R4C0 5 supercool 6 supercool 7 R7C0 28 R28C0 29 supercool Name: C_l0_g0, dtype: object # can also get the values "applied" onto the function df.dgrep(lambda c1,c2: "cool" in c1 or "cool" in c2,df.columns[:2]) # which also works with *args df.dgrep(lambda *args: "supercool" in args,df.columns[:3]) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3276
2013-04-08T05:25:49Z
2013-07-22T20:05:13Z
null
2014-07-22T00:40:26Z
ENH: an example of a sandbox feature
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 70f3fb045376e..6a51ec731675c 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -238,3 +238,56 @@ def use_inf_as_null_cb(key): with cf.config_prefix('mode'): cf.register_option('use_inf_as_null', False, use_inf_as_null_doc, cb=use_inf_as_null_cb) + + +sb_xp_foo_doc = """ +: boolean + Enables a snazzy, but not yet stable feature that could use + some feedback before going into core. +""" + +def sandbox(gh_issue_num,msg=None): + def inner(cb): + def f(key): + + s = """ +This is an experimental feature being considered for inclusion in pandas core. +We'd appreciate your feedback on it in the Github issue page: + + http://github.com/pydata/pandas/issues/%d + +If you find this useful, lacking in major functionality or buggy please +take a moment to let us know, so we can make pandas (even) better. + +Thank you, + +The Pandas dev team + +""" % gh_issue_num + + if msg: + s += "P.S.\n\n" + msg + + # don't print( the msessage on turn off + val = cf.get_option(key) + if val: + print(s) + + return cb(key) + + return f + return inner + +@sandbox(3274,msg= "love the hair.") +def xp_foo_cb(key): + import pandas + val = cf.get_option(key) + if val: + from pandas.sandbox.xp import foo + pandas.DataFrame.foo = foo + else: + del pandas.DataFrame.foo + +with cf.config_prefix('sandbox'): + cf.register_option('experimental_feature', False, sb_xp_foo_doc, + validator=is_bool, cb=xp_foo_cb) diff --git a/pandas/sandbox/xp/__init__.py b/pandas/sandbox/xp/__init__.py new file mode 100644 index 0000000000000..5ec592fb98c79 --- /dev/null +++ b/pandas/sandbox/xp/__init__.py @@ -0,0 +1,4 @@ + +# monkey patched onto something, when the config options is enabled +def foo(self): + print "Hi, I'm an experimental new method. grrrr!" % type(self)
Earlier today I suggested allowing "sandbox" features into releases, to make it easier to get some more testing and feedback before commiting something into core proper. Here's an example of how I imaging this to work: ``` In [1]: pd.describe_option("sandbox") sandbox.experimental_feature: [default: False] [currently: False] : boolean Enables a snazzy, but not yet stable feature that could use some feedback before going into core. In [1]: pd.options.sandbox.experimental_feature=True This is an experimental feature being considered for inclusion in pandas core. We'd appreciate your feedback on it in the appropriate Github issue page: http://github.com/pydata/pandas/issues/3274 If you find this feature useful, lacking in major functionality or buggy please take a moment to let us know, so we can make pandas (even) better. Thank you, The Pandas dev team P.S. love the hair. In [2]: df=mkdf(3,2) ...: df.foo() Hi, I'm a new experimental method. grrrr! In [3]: pd.options.sandbox.experimental_feature=False In [4]: df=mkdf(3,2) ...: df.foo() --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-4-66a4f195a54c> in <module>() 1 df=mkdf(3,2) ----> 2 df.foo() /home/user1/src/pandas/pandas/core/frame.pyc in __getattr__(self, name) 1965 return self[name] 1966 raise AttributeError("'%s' object has no attribute '%s'" % -> 1967 (type(self).__name__, name)) 1968 1969 def __setattr__(self, name, value): AttributeError: 'DataFrame' object has no attribute 'foo' ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3274
2013-04-08T01:37:45Z
2013-04-08T09:41:07Z
null
2022-10-13T00:14:54Z
CLN: delete sandbox/stats, that's all statsmodels now
diff --git a/pandas/sandbox/stats/__init__.py b/pandas/sandbox/stats/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/sandbox/stats/rls.py b/pandas/sandbox/stats/rls.py deleted file mode 100644 index 51166500c484f..0000000000000 --- a/pandas/sandbox/stats/rls.py +++ /dev/null @@ -1,145 +0,0 @@ -"""Restricted least squares""" - -import numpy as np -from scikits.statsmodels.regression import WLS, GLS, RegressionResults - - -class RLS(GLS): - """ - Restricted general least squares model that handles linear constraints - - Parameters - ---------- - endog: array-like - n length array containing the dependent variable - exog: array-like - n-by-p array of independent variables - constr: array-like - k-by-p array of linear constraints - param (0.): array-like or scalar - p-by-1 array (or scalar) of constraint parameters - sigma (None): scalar or array-like - The weighting matrix of the covariance. No scaling by default (OLS). - If sigma is a scalar, then it is converted into an n-by-n diagonal - matrix with sigma as each diagonal element. - If sigma is an n-length array, then it is assumed to be a diagonal - matrix with the given sigma on the diagonal (WLS). - - Notes - ----- - endog = exog * beta + epsilon - weights' * constr * beta = param - - See Greene and Seaks, "The Restricted Least Squares Estimator: - A Pedagogical Note", The Review of Economics and Statistics, 1991. - """ - - def __init__(self, endog, exog, constr, param=0., sigma=None): - N, Q = exog.shape - if constr.ndim == 1: - K, P = 1, constr.shape[0] - else: - K, P = constr.shape - if Q != P: - raise Exception('Constraints and design do not align') - self.ncoeffs = Q - self.nconstraint = K - self.constraint = constr - if np.isscalar(param) and K > 1: - param = np.ones((K,)) * param - self.param = param - if sigma is None: - sigma = 1. - if np.isscalar(sigma): - sigma = np.ones(N) * sigma - sigma = np.squeeze(sigma) - if sigma.ndim == 1: - self.sigma = np.diag(sigma) - self.cholsigmainv = np.diag(np.sqrt(sigma)) - else: - self.sigma = sigma - self.cholsigmainv = np.linalg.cholesky( - np.linalg.pinv(self.sigma)).T - super(GLS, self).__init__(endog, exog) - - _rwexog = None - - @property - def rwexog(self): - """Whitened exogenous variables augmented with restrictions""" - if self._rwexog is None: - P = self.ncoeffs - K = self.nconstraint - design = np.zeros((P + K, P + K)) - design[:P, :P] = np.dot(self.wexog.T, self.wexog) # top left - constr = np.reshape(self.constraint, (K, P)) - design[:P, P:] = constr.T # top right partition - design[P:, :P] = constr # bottom left partition - design[P:, P:] = np.zeros((K, K)) # bottom right partition - self._rwexog = design - return self._rwexog - - _inv_rwexog = None - - @property - def inv_rwexog(self): - """Inverse of self.rwexog""" - if self._inv_rwexog is None: - self._inv_rwexog = np.linalg.inv(self.rwexog) - return self._inv_rwexog - - _rwendog = None - - @property - def rwendog(self): - """Whitened endogenous variable augmented with restriction parameters""" - if self._rwendog is None: - P = self.ncoeffs - K = self.nconstraint - response = np.zeros((P + K,)) - response[:P] = np.dot(self.wexog.T, self.wendog) - response[P:] = self.param - self._rwendog = response - return self._rwendog - - _ncp = None - - @property - def rnorm_cov_params(self): - """Parameter covariance under restrictions""" - if self._ncp is None: - P = self.ncoeffs - self._ncp = self.inv_rwexog[:P, :P] - return self._ncp - - _wncp = None - - @property - def wrnorm_cov_params(self): - """ - Heteroskedasticity-consistent parameter covariance - Used to calculate White standard errors. - """ - if self._wncp is None: - df = self.df_resid - pred = np.dot(self.wexog, self.coeffs) - eps = np.diag((self.wendog - pred) ** 2) - sigmaSq = np.sum(eps) - pinvX = np.dot(self.rnorm_cov_params, self.wexog.T) - self._wncp = np.dot(np.dot(pinvX, eps), pinvX.T) * df / sigmaSq - return self._wncp - - _coeffs = None - - @property - def coeffs(self): - """Estimated parameters""" - if self._coeffs is None: - betaLambda = np.dot(self.inv_rwexog, self.rwendog) - self._coeffs = betaLambda[:self.ncoeffs] - return self._coeffs - - def fit(self): - rncp = self.wrnorm_cov_params - lfit = RegressionResults(self, self.coeffs, normalized_cov_params=rncp) - return lfit
https://api.github.com/repos/pandas-dev/pandas/pulls/3273
2013-04-08T01:07:04Z
2013-04-08T05:06:40Z
2013-04-08T05:06:40Z
2013-04-08T05:06:40Z
CLN: remove stale TODO.rst
diff --git a/TODO.rst b/TODO.rst deleted file mode 100644 index fc87174fd63bd..0000000000000 --- a/TODO.rst +++ /dev/null @@ -1,63 +0,0 @@ -DOCS 0.7.0 ----------- -- ??? no sort in groupby -- DONE concat with dict -- Gotchas re: integer indexing - -DONE ----- -- SparseSeries name integration + tests -- Refactor Series.repr - -TODO ----- -- _consolidate, does it always copy? -- Series.align with fill method. Will have to generate more Cython code -- TYPE inference in Index-- more than just datetime! - -TODO docs ---------- - -- DONE read_csv / read_table - - auto-sniff delimiter - - MultiIndex - - generally more documentation -- DONE pivot_table -- DONE Set mixed-type values with .ix -- DONE get_dtype_counts / dtypes -- DONE save / load functions -- DONE isnull/notnull as instance methods -- DONE DataFrame.to_string -- DONE IPython tab complete hook -- DONE ignore_index in DataFrame.append -- DONE describe for Series with dtype=object -- DONE as_index=False in groupby -- DONOTWANT is_monotonic -- DONE DataFrame.to_csv: different delimiters -- DONE combine_first -- DONE groupby with level name -- DONE MultiIndex get_level_values -- DONE & and | for intersection / union -- DONE Update to reflect Python 3 support in intro -- DONE Index / MultiIndex names -- DONE Unstack / stack by level name -- DONE name attribute on Series -- DONE Multi-key joining -- DONE Inner join on key -- DONE align functions -- DONE df[col_list] -- DONE Panel.rename_axis - -Performance blog ----------------- -- Series / Time series data alignment -- DataFrame alignment -- Groupby -- joining -- Take - -git log v0.6.1..master --pretty=format:%aN | sort | uniq -c | sort -rn - -git log 7ddfbd4..master --pretty=format:%aN | sort | uniq -c | sort -rn -git log a0257f5..master --pretty=format:%aN | sort | uniq -c | sort -rn -
any sentimental value there?
https://api.github.com/repos/pandas-dev/pandas/pulls/3271
2013-04-07T20:20:33Z
2013-04-08T05:06:11Z
2013-04-08T05:06:11Z
2013-04-08T05:06:12Z
ENH: Fix for #1512, added StataReader and StataWriter to pandas.io.parsers
diff --git a/RELEASE.rst b/RELEASE.rst index 1f5bd2591470b..8a3ab284b26a6 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -33,6 +33,7 @@ pandas 0.11.1 - pd.read_html() can now parse HTML string, files or urls and return dataframes courtesy of @cpcloud. (GH3477_) - Support for reading Amazon S3 files. (GH3504_) + - Added module for reading and writing Stata files: pandas.io.stata (GH1512_) **Improvements to existing features** @@ -166,6 +167,7 @@ pandas 0.11.1 .. _GH3610: https://github.com/pydata/pandas/issues/3610 .. _GH3596: https://github.com/pydata/pandas/issues/3596 .. _GH3435: https://github.com/pydata/pandas/issues/3435 +.. _GH1512: https://github.com/pydata/pandas/issues/1512 pandas 0.11.0 diff --git a/doc/source/io.rst b/doc/source/io.rst index 39f860c63e0e6..d390f46fcd39d 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1829,3 +1829,44 @@ There are a few other available functions: For now, writing your DataFrame into a database works only with **SQLite**. Moreover, the **index** will currently be **dropped**. + + +Reading from STATA format +~~~~~~~~~~~~~~~~~~~~~~ + +.. _io.StataReader: + +.. versionadded:: 0.11.1 + +The class StataReader will read the header of the given dta file at +initialization. Its function :func:'~pandas.io.StataReader.data' will +read the observations, converting them to a DataFrame which is returned: + +.. ipython:: python + reader = StataReader(dta_filepath) + dataframe = reader.data() + +The parameter convert_categoricals indicates wheter value labels should be +read and used to create a Categorical variable from them. Value labels can +also be retrieved by the function variable_labels, which requires data to be +called before. +The StataReader supports .dta Formats 104, 105, 108, 113-115. + +Alternatively, the function :func:'~pandas.io.read_stata' can be used: + +.. ipython:: python + dataframe = read_stata(dta_filepath) + + +Writing to STATA format +~~~~~~~~~~~~~~~~~~~~~~ + +.. _io.StataWriter: + +The function :func:'~pandas.io.StataWriter.write_file' will write a DataFrame +into a .dta file. The format version of this file is always the latest one, +115. + +.. ipython:: python + writer = StataWriter(filename, dataframe) + writer.write_file() diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt index c89118298a675..4d983905f9aaa 100644 --- a/doc/source/v0.11.1.txt +++ b/doc/source/v0.11.1.txt @@ -54,6 +54,7 @@ Enhancements - support datelike columns with a timezone as data_columns (GH2852_) - ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is a list or tuple. + - Added module for reading and writing Stata files: pandas.io.stata (GH1512_) See the `full release notes <https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker @@ -68,3 +69,4 @@ on GitHub for a complete list. .. _GH3596: https://github.com/pydata/pandas/issues/3596 .. _GH3590: https://github.com/pydata/pandas/issues/3590 .. _GH3435: https://github.com/pydata/pandas/issues/3435 +.. _GH1512: https://github.com/pydata/pandas/issues/1512 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c1f2f38dabd8b..73f789a9425c6 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1280,6 +1280,35 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=parse_dates, index_col=index_col, encoding=encoding) + @classmethod + def from_dta(dta, path, parse_dates=True, convert_categoricals=True, encoding=None, index_col=None): + """ + Read Stata file into DataFrame + + Parameters + ---------- + path : string file path or file handle / StringIO + parse_dates : boolean, default True + Convert date variables to DataFrame time values + convert_categoricals : boolean, default True + Read value labels and convert columns to Categorical/Factor variables + encoding : string, None or encoding, default None + Encoding used to parse the files. Note that Stata doesn't + support unicode. None defaults to cp1252. + index_col : int or sequence, default None + Column to use for index. If a sequence is given, a MultiIndex + is used. Different default from read_table + + Notes + ----- + + Returns + ------- + y : DataFrame + """ + from pandas.io.stata import read_stata + return read_stata(path, parse_dates=parse_dates, convert_categoricals=convert_categoricals, encoding=encoding, index=index_col) + def to_sparse(self, fill_value=None, kind='block'): """ Convert to SparseDataFrame diff --git a/pandas/io/stata.py b/pandas/io/stata.py new file mode 100644 index 0000000000000..3fc246c2ffbc7 --- /dev/null +++ b/pandas/io/stata.py @@ -0,0 +1,911 @@ +""" +Module contains tools for processing Stata files into DataFrames + +The StataReader below was originally written by Joe Presbrey as part of PyDTA. +It has been extended and improved by Skipper Seabold from the Statsmodels project +who also developed the StataWriter and was finally added to pandas in an once again +improved version. + +You can find more information on http://presbrey.mit.edu/PyDTA and +http://statsmodels.sourceforge.net/devel/ +""" + +from StringIO import StringIO +import numpy as np + +import sys +import struct +from pandas.core.frame import DataFrame +from pandas.core.series import Series +from pandas.core.categorical import Categorical +import datetime +from pandas.util import py3compat +from pandas import isnull +from pandas.io.parsers import _parser_params, _is_url, Appender + + +_read_stata_doc = """ +Read Stata file into DataFrame + +%s +""" % (_parser_params) + + +@Appender(_read_stata_doc) +def read_stata(filepath_or_buffer, convert_dates=True, convert_categoricals=True, encoding=None, index=None): + reader = StataReader(filepath_or_buffer, encoding) + + return reader.data(convert_dates, convert_categoricals, index) + + +_date_formats = ["%tc", "%tC", "%td", "%tw", "%tm", "%tq", "%th", "%ty"] + + +def _stata_elapsed_date_to_datetime(date, fmt): + """ + Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime + + Parameters + ---------- + date : int + The Stata Internal Format date to convert to datetime according to fmt + fmt : str + The format to convert to. Can be, tc, td, tw, tm, tq, th, ty + + Examples + -------- + >>> _stata_elapsed_date_to_datetime(52, "%tw") datetime.datetime(1961, 1, 1, 0, 0) + + Notes + ----- + datetime/c - tc + milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day + datetime/C - tC - NOT IMPLEMENTED + milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds + date - td + days since 01jan1960 (01jan1960 = 0) + weekly date - tw + weeks since 1960w1 + This assumes 52 weeks in a year, then adds 7 * remainder of the weeks. + The datetime value is the start of the week in terms of days in the + year, not ISO calendar weeks. + monthly date - tm + months since 1960m1 + quarterly date - tq + quarters since 1960q1 + half-yearly date - th + half-years since 1960h1 yearly + date - ty + years since 0000 + + If you don't have pandas with datetime support, then you can't do + milliseconds accurately. + """ + #NOTE: we could run into overflow / loss of precision situations here + # casting to int, but I'm not sure what to do. datetime won't deal with + # numpy types and numpy datetime isn't mature enough / we can't rely on + # pandas version > 0.7.1 + #TODO: IIRC relative delta doesn't play well with np.datetime? + if np.isnan(date): + return np.datetime64('nat') + + date = int(date) + stata_epoch = datetime.datetime(1960, 1, 1) + if fmt in ["%tc", "tc"]: + from dateutil.relativedelta import relativedelta + return stata_epoch + relativedelta(microseconds=date * 1000) + elif fmt in ["%tC", "tC"]: + from warnings import warn + warn("Encountered %tC format. Leaving in Stata Internal Format.") + return date + elif fmt in ["%td", "td"]: + return stata_epoch + datetime.timedelta(int(date)) + elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week + year = datetime.datetime(stata_epoch.year + date // 52, 1, 1) + day_delta = (date % 52) * 7 + return year + datetime.timedelta(int(day_delta)) + elif fmt in ["%tm", "tm"]: + year = stata_epoch.year + date // 12 + month_delta = (date % 12) + 1 + return datetime.datetime(year, month_delta, 1) + elif fmt in ["%tq", "tq"]: + year = stata_epoch.year + date // 4 + month_delta = (date % 4) * 3 + 1 + return datetime.datetime(year, month_delta, 1) + elif fmt in ["%th", "th"]: + year = stata_epoch.year + date // 2 + month_delta = (date % 2) * 6 + 1 + return datetime.datetime(year, month_delta, 1) + elif fmt in ["%ty", "ty"]: + if date > 0: + return datetime.datetime(date, 1, 1) + else: # don't do negative years bc can't mix dtypes in column + raise ValueError("Year 0 and before not implemented") + else: + raise ValueError("Date fmt %s not understood" % fmt) + + +def _datetime_to_stata_elapsed(date, fmt): + """ + Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime + + Parameters + ---------- + date : datetime.datetime + The date to convert to the Stata Internal Format given by fmt + fmt : str + The format to convert to. Can be, tc, td, tw, tm, tq, th, ty + """ + if not isinstance(date, datetime.datetime): + raise ValueError("date should be datetime.datetime format") + stata_epoch = datetime.datetime(1960, 1, 1) + if fmt in ["%tc", "tc"]: + delta = date - stata_epoch + return (delta.days * 86400000 + delta.seconds*1000 + + delta.microseconds/1000) + elif fmt in ["%tC", "tC"]: + from warnings import warn + warn("Stata Internal Format tC not supported.") + return date + elif fmt in ["%td", "td"]: + return (date - stata_epoch).days + elif fmt in ["%tw", "tw"]: + return (52*(date.year-stata_epoch.year) + + (date - datetime.datetime(date.year, 1, 1)).days / 7) + elif fmt in ["%tm", "tm"]: + return (12 * (date.year - stata_epoch.year) + date.month - 1) + elif fmt in ["%tq", "tq"]: + return 4*(date.year-stata_epoch.year) + int((date.month - 1)/3) + elif fmt in ["%th", "th"]: + return 2 * (date.year - stata_epoch.year) + int(date.month > 6) + elif fmt in ["%ty", "ty"]: + return date.year + else: + raise ValueError("fmt %s not understood" % fmt) + + +class StataMissingValue(object): + """ + An observation's missing value. + + Parameters + ----------- + offset + value + + Attributes + ---------- + string + value + + Notes + ----- + More information: <http://www.stata.com/help.cgi?missing> + """ + + def __init__(self, offset, value): + self._value = value + if type(value) is int or type(value) is long: + self._str = value - offset is 1 and \ + '.' or ('.' + chr(value - offset + 96)) + else: + self._str = '.' + string = property(lambda self: self._str, doc="The Stata representation of the missing value: '.', '.a'..'.z'") + value = property(lambda self: self._value, doc='The binary representation of the missing value.') + + def __str__(self): + return self._str + + __str__.__doc__ = string.__doc__ + + +class StataParser(object): + def __init__(self, encoding): + if(encoding is None): + self._encoding = 'cp1252' + else: + self._encoding = encoding + + #type code. + #-------------------- + #str1 1 = 0x01 + #str2 2 = 0x02 + #... + #str244 244 = 0xf4 + #byte 251 = 0xfb (sic) + #int 252 = 0xfc + #long 253 = 0xfd + #float 254 = 0xfe + #double 255 = 0xff + #-------------------- + #NOTE: the byte type seems to be reserved for categorical variables + # with a label, but the underlying variable is -127 to 100 + # we're going to drop the label and cast to int + self.DTYPE_MAP = \ + dict( + zip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) + + [ + (251, np.int16), + (252, np.int32), + (253, np.int64), + (254, np.float32), + (255, np.float64) + ] + ) + self.TYPE_MAP = range(251) + list('bhlfd') + #NOTE: technically, some of these are wrong. there are more numbers + # that can be represented. it's the 27 ABOVE and BELOW the max listed + # numeric data type in [U] 12.2.2 of the 11.2 manual + self.MISSING_VALUES = \ + { + 'b': (-127, 100), + 'h': (-32767, 32740), + 'l': (-2147483647, 2147483620), + 'f': (-1.701e+38, +1.701e+38), + 'd': (-1.798e+308, +8.988e+307) + } + + self.OLD_TYPE_MAPPING = \ + { + 'i': 252, + 'f': 254, + 'b': 251 + } + + def _decode_bytes(self, str, errors=None): + if py3compat.PY3: + return str.decode(self._encoding, errors) + else: + return str + + +class StataReader(StataParser): + """ + Class for working with a Stata dataset. There are two possibilities for usage: + + * The from_dta() method on the DataFrame class. + This will return a DataFrame with the Stata dataset. Note that when using the + from_dta() method, you will not have access to meta-information like variable + labels or the data label. + + * Work with this object directly. Upon instantiation, the header of the Stata data + file is read, giving you access to attributes like variable_labels(), data_label(), + nobs(), ... A DataFrame with the data is returned by the read() method; this will + also fill up the value_labels. Note that calling the value_labels() method will + result in an error if the read() method has not been called yet. This is because + the value labels are stored at the end of a Stata dataset, after the data. + + Parameters + ---------- + path_or_buf : string or file-like object + Path to .dta file or object implementing a binary read() functions + encoding : string, None or encoding + Encoding used to parse the files. Note that Stata doesn't + support unicode. None defaults to cp1252. + """ + def __init__(self, path_or_buf, encoding=None): + super(StataReader, self).__init__(encoding) + self.col_sizes = () + self._has_string_data = False + self._missing_values = False + self._data_read = False + self._value_labels_read = False + if isinstance(path_or_buf, str) and _is_url(path_or_buf): + from urllib.request import urlopen + path_or_buf = urlopen(path_or_buf) + if py3compat.PY3: # pragma: no cover + if self._encoding: + errors = 'strict' + else: + errors = 'replace' + self._encoding = 'cp1252' + bytes = path_or_buf.read() + self.path_or_buf = StringIO(self._decode_bytes(bytes, errors)) + elif type(path_or_buf) is str: + self.path_or_buf = open(path_or_buf, 'rb') + else: + self.path_or_buf = path_or_buf + + self._read_header() + + def _read_header(self): + # header + self.format_version = struct.unpack('b', self.path_or_buf.read(1))[0] + if self.format_version not in [104, 105, 108, 113, 114, 115]: + raise ValueError("Version of given Stata file is not 104, 105, 108, 113 (Stata 8/9), 114 (Stata 10/11) or 115 (Stata 12)") + self.byteorder = self.path_or_buf.read(1) == 0x1 and '>' or '<' + self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0] + self.path_or_buf.read(1) # unused + + self.nvar = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] + self.nobs = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] + if self.format_version > 105: + self.data_label = self.path_or_buf.read(81) + else: + self.data_label = self.path_or_buf.read(32) + if self.format_version > 104: + self.time_stamp = self.path_or_buf.read(18) + + # descriptors + if self.format_version > 108: + typlist = [ord(self.path_or_buf.read(1)) for i in range(self.nvar)] + else: + typlist = [self.OLD_TYPE_MAPPING[self._decode_bytes(self.path_or_buf.read(1))] for i in range(self.nvar)] + self.typlist = [self.TYPE_MAP[typ] for typ in typlist] + self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] + if self.format_version > 108: + self.varlist = [self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)] + else: + self.varlist = [self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)] + self.srtlist = struct.unpack(self.byteorder + ('h' * (self.nvar + 1)), self.path_or_buf.read(2 * (self.nvar + 1)))[:-1] + if self.format_version > 113: + self.fmtlist = [self._null_terminate(self.path_or_buf.read(49)) for i in range(self.nvar)] + elif self.format_version > 104: + self.fmtlist = [self._null_terminate(self.path_or_buf.read(12)) for i in range(self.nvar)] + else: + self.fmtlist = [self._null_terminate(self.path_or_buf.read(7)) for i in range(self.nvar)] + if self.format_version > 108: + self.lbllist = [self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)] + else: + self.lbllist = [self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)] + if self.format_version > 105: + self.vlblist = [self._null_terminate(self.path_or_buf.read(81)) for i in range(self.nvar)] + else: + self.vlblist = [self._null_terminate(self.path_or_buf.read(32)) for i in range(self.nvar)] + + # ignore expansion fields (Format 105 and later) + # When reading, read five bytes; the last four bytes now tell you the + # size of the next read, which you discard. You then continue like + # this until you read 5 bytes of zeros. + + if self.format_version > 104: + while True: + data_type = struct.unpack(self.byteorder + 'b', self.path_or_buf.read(1))[0] + if self.format_version > 108: + data_len = struct.unpack(self.byteorder + 'i', self.path_or_buf.read(4))[0] + else: + data_len = struct.unpack(self.byteorder + 'h', self.path_or_buf.read(2))[0] + if data_type == 0: + break + self.path_or_buf.read(data_len) + + # necessary data to continue parsing + self.data_location = self.path_or_buf.tell() + self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0 + self._col_size() + + def _calcsize(self, fmt): + return type(fmt) is int and fmt or struct.calcsize(self.byteorder + fmt) + + def _col_size(self, k=None): + """Calculate size of a data record.""" + if len(self.col_sizes) == 0: + self.col_sizes = map(lambda x: self._calcsize(x), self.typlist) + if k is None: + return self.col_sizes + else: + return self.col_sizes[k] + + def _unpack(self, fmt, byt): + d = struct.unpack(self.byteorder + fmt, byt)[0] + if fmt[-1] in self.MISSING_VALUES: + nmin, nmax = self.MISSING_VALUES[fmt[-1]] + if d < nmin or d > nmax: + if self._missing_values: + return StataMissingValue(nmax, d) + else: + return None + return d + + def _null_terminate(self, s): + if py3compat.PY3: # have bytes not strings, so must decode + null_byte = b"\0" + try: + s = s[:s.index(null_byte)] + except: + pass + return s.decode(self._encoding) + else: + null_byte = "\0" + try: + return s.lstrip(null_byte)[:s.index(null_byte)] + except: + return s + + def _next(self): + typlist = self.typlist + if self._has_string_data: + data = [None] * self.nvar + for i in range(len(data)): + if type(typlist[i]) is int: + data[i] = self._null_terminate(self.path_or_buf.read(typlist[i])) + else: + data[i] = self._unpack(typlist[i], self.path_or_buf.read(self._col_size(i))) + return data + else: + return map(lambda i: self._unpack(typlist[i], + self.path_or_buf.read(self._col_size(i))), + range(self.nvar)) + + def _dataset(self): + """ + Returns a Python generator object for iterating over the dataset. + + + Parameters + ---------- + + Returns + ------- + Generator object for iterating over the dataset. Yields each row of + observations as a list by default. + + Notes + ----- + If missing_values is True during instantiation of StataReader then + observations with _StataMissingValue(s) are not filtered and should + be handled by your applcation. + """ + + try: + self._file.seek(self._data_location) + except Exception: + pass + + for i in range(self.nobs): + yield self._next() + + def _read_value_labels(self): + if not self._data_read: + raise Exception("Data has not been read. Because of the layout of Stata files, this is necessary before reading value labels.") + if self._value_labels_read: + raise Exception("Value labels have already been read.") + + self.value_label_dict = dict() + + if self.format_version <= 108: + return # Value labels are not supported in version 108 and earlier. + + while True: + slength = self.path_or_buf.read(4) + if not slength: + break # end of variable lable table + labname = self._null_terminate(self.path_or_buf.read(33)) + self.path_or_buf.read(3) # padding + + n = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] + txtlen = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] + off = [] + for i in range(n): + off.append(struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0]) + val = [] + for i in range(n): + val.append(struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0]) + txt = self.path_or_buf.read(txtlen) + self.value_label_dict[labname] = dict() + for i in range(n): + self.value_label_dict[labname][val[i]] = self._null_terminate(txt[off[i]:]) + self._value_labels_read = True + + def data(self, convert_dates=True, convert_categoricals=True, index=None): + """ + Reads observations from Stata file, converting them into a dataframe + + Parameters + ---------- + convert_dates : boolean, defaults to True + Convert date variables to DataFrame time values + convert_categoricals : boolean, defaults to True + Read value labels and convert columns to Categorical/Factor variables + index : identifier of index column + identifier of column that should be used as index of the DataFrame + + Returns + ------- + y : DataFrame instance + """ + if self._data_read: + raise Exception("Data has already been read.") + self._data_read = True + + stata_dta = self._dataset() + + data = [] + for rownum, line in enumerate(stata_dta): + # doesn't handle missing value objects, just casts + # None will only work without missing value object. + for i, val in enumerate(line): + #NOTE: This will only be scalar types because missing strings + # are empty not None in Stata + if val is None: + line[i] = np.nan + data.append(tuple(line)) + + if convert_categoricals: + self._read_value_labels() + + data = DataFrame(data, columns=self.varlist, index=index) + + cols_ = np.where(self.dtyplist)[0] + for i in cols_: + if self.dtyplist[i] is not None: + col = data.columns[i] + data[col] = Series(data[col], data[col].index, self.dtyplist[i]) + + if convert_dates: + cols = np.where(map(lambda x: x in _date_formats, self.fmtlist))[0] + for i in cols: + col = data.columns[i] + data[col] = data[col].apply(_stata_elapsed_date_to_datetime, args=(self.fmtlist[i],)) + + if convert_categoricals: + cols = np.where(map(lambda x: x in self.value_label_dict.iterkeys(), self.lbllist))[0] + for i in cols: + col = data.columns[i] + labeled_data = np.copy(data[col]) + labeled_data = labeled_data.astype(object) + for k, v in self.value_label_dict[self.lbllist[i]].iteritems(): + labeled_data[data[col] == k] = v + data[col] = Categorical.from_array(labeled_data) + + return data + + def data_label(self): + """Returns data label of Stata file""" + return self.data_label + + def variable_labels(self): + """Returns variable labels as a dict, associating each variable name with corresponding label""" + return dict(zip(self.varlist, self.vlblist)) + + def value_labels(self): + """Returns a dict, associating each variable name a dict, associating each value its corresponding label""" + if not self._value_labels_read: + self._read_value_labels() + + return self.value_label_dict + + +def _open_file_binary_write(fname, encoding): + if hasattr(fname, 'write'): + #if 'b' not in fname.mode: + return fname + return open(fname, "wb") + + +def _set_endianness(endianness): + if endianness.lower() in ["<", "little"]: + return "<" + elif endianness.lower() in [">", "big"]: + return ">" + else: # pragma : no cover + raise ValueError("Endianness %s not understood" % endianness) + + +def _pad_bytes(name, length): + """ + Takes a char string and pads it wih null bytes until it's length chars + """ + return name + "\x00" * (length - len(name)) + + +def _default_names(nvar): + """ + Returns default Stata names v1, v2, ... vnvar + """ + return ["v%d" % i for i in range(1, nvar+1)] + + +def _convert_datetime_to_stata_type(fmt): + """ + Converts from one of the stata date formats to a type in TYPE_MAP + """ + if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq", + "%tq", "th", "%th", "ty", "%ty"]: + return np.float64 # Stata expects doubles for SIFs + else: + raise ValueError("fmt %s not understood" % fmt) + + +def _maybe_convert_to_int_keys(convert_dates, varlist): + new_dict = {} + for key in convert_dates: + if not convert_dates[key].startswith("%"): # make sure proper fmts + convert_dates[key] = "%" + convert_dates[key] + if key in varlist: + new_dict.update({varlist.index(key): convert_dates[key]}) + else: + if not isinstance(key, int): + raise ValueError("convery_dates key is not in varlist and is not an int") + new_dict.update({key: convert_dates[key]}) + return new_dict + + +def _dtype_to_stata_type(dtype): + """ + Converts dtype types to stata types. Returns the byte of the given ordinal. + See TYPE_MAP and comments for an explanation. This is also explained in + the dta spec. + 1 - 244 are strings of this length + 251 - chr(251) - for int8 and int16, byte + 252 - chr(252) - for int32, int + 253 - chr(253) - for int64, long + 254 - chr(254) - for float32, float + 255 - chr(255) - double, double + + If there are dates to convert, then dtype will already have the correct + type inserted. + """ + #TODO: expand to handle datetime to integer conversion + if dtype.type == np.string_: + return chr(dtype.itemsize) + elif dtype.type == np.object_: # try to coerce it to the biggest string + # not memory efficient, what else could we do? + return chr(244) + elif dtype == np.float64: + return chr(255) + elif dtype == np.float32: + return chr(254) + elif dtype == np.int64: + return chr(253) + elif dtype == np.int32: + return chr(252) + elif dtype == np.int8 or dtype == np.int16: + return chr(251) + else: # pragma : no cover + raise ValueError("Data type %s not currently understood. " + "Please report an error to the developers." % dtype) + + +def _dtype_to_default_stata_fmt(dtype): + """ + Maps numpy dtype to stata's default format for this type. Not terribly + important since users can change this in Stata. Semantics are + + string -> "%DDs" where DD is the length of the string + float64 -> "%10.0g" + float32 -> "%9.0g" + int64 -> "%9.0g" + int32 -> "%12.0g" + int16 -> "%8.0g" + int8 -> "%8.0g" + """ + #TODO: expand this to handle a default datetime format? + if dtype.type == np.string_: + return "%" + str(dtype.itemsize) + "s" + elif dtype.type == np.object_: + return "%244s" + elif dtype == np.float64: + return "%10.0g" + elif dtype == np.float32: + return "%9.0g" + elif dtype == np.int64: + return "%9.0g" + elif dtype == np.int32: + return "%12.0g" + elif dtype == np.int8 or dtype == np.int16: + return "%8.0g" + else: # pragma : no cover + raise ValueError("Data type %s not currently understood. " + "Please report an error to the developers." % dtype) + + +class StataWriter(StataParser): + """ + A class for writing Stata binary dta files from array-like objects + + Parameters + ---------- + fname : file path or buffer + Where to save the dta file. + data : array-like + Array-like input to save. Pandas objects are also accepted. + convert_dates : dict + Dictionary mapping column of datetime types to the stata internal + format that you want to use for the dates. Options are + 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a + number or a name. + encoding : str + Default is latin-1. Note that Stata does not support unicode. + byteorder : str + Can be ">", "<", "little", or "big". The default is None which uses + `sys.byteorder` + + Returns + ------- + writer : StataWriter instance + The StataWriter instance has a write_file method, which will + write the file to the given `fname`. + + Examples + -------- + >>> writer = StataWriter('./data_file.dta', data) + >>> writer.write_file() + + Or with dates + + >>> writer = StataWriter('./date_data_file.dta', date, {2 : 'tw'}) + >>> writer.write_file() + """ + def __init__(self, fname, data, convert_dates=None, write_index=True, encoding="latin-1", + byteorder=None): + super(StataWriter, self).__init__(encoding) + self._convert_dates = convert_dates + self._write_index = write_index + # attach nobs, nvars, data, varlist, typlist + self._prepare_pandas(data) + + if byteorder is None: + byteorder = sys.byteorder + self._byteorder = _set_endianness(byteorder) + self._file = _open_file_binary_write(fname, self._encoding) + self.type_converters = {253: np.long, 252: int} + + def _write(self, to_write): + """ + Helper to call encode before writing to file for Python 3 compat. + """ + if py3compat.PY3: + self._file.write(to_write.encode(self._encoding)) + else: + self._file.write(to_write) + + def _prepare_pandas(self, data): + #NOTE: we might need a different API / class for pandas objects so + # we can set different semantics - handle this with a PR to pandas.io + class DataFrameRowIter(object): + def __init__(self, data): + self.data = data + + def __iter__(self): + for i, row in data.iterrows(): + yield row + + if self._write_index: + data = data.reset_index() + self.datarows = DataFrameRowIter(data) + self.nobs, self.nvar = data.shape + self.data = data + self.varlist = data.columns.tolist() + dtypes = data.dtypes + if self._convert_dates is not None: + self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates, self.varlist) + for key in self._convert_dates: + new_type = _convert_datetime_to_stata_type(self._convert_dates[key]) + dtypes[key] = np.dtype(new_type) + self.typlist = [_dtype_to_stata_type(dt) for dt in dtypes] + self.fmtlist = [_dtype_to_default_stata_fmt(dt) for dt in dtypes] + # set the given format for the datetime cols + if self._convert_dates is not None: + for key in self._convert_dates: + self.fmtlist[key] = self._convert_dates[key] + + def write_file(self): + self._write_header() + self._write_descriptors() + self._write_variable_labels() + # write 5 zeros for expansion fields + self._write(_pad_bytes("", 5)) + if self._convert_dates is None: + self._write_data_nodates() + else: + self._write_data_dates() + #self._write_value_labels() + self._file.close() + + def _write_header(self, data_label=None, time_stamp=None): + byteorder = self._byteorder + # ds_format - just use 114 + self._file.write(struct.pack("b", 114)) + # byteorder + self._write(byteorder == ">" and "\x01" or "\x02") + # filetype + self._write("\x01") + # unused + self._write("\x00") + # number of vars, 2 bytes + self._file.write(struct.pack(byteorder+"h", self.nvar)[:2]) + # number of obs, 4 bytes + self._file.write(struct.pack(byteorder+"i", self.nobs)[:4]) + # data label 81 bytes, char, null terminated + if data_label is None: + self._file.write(self._null_terminate(_pad_bytes("", 80))) + else: + self._file.write(self._null_terminate(_pad_bytes(data_label[:80], 80))) + # time stamp, 18 bytes, char, null terminated + # format dd Mon yyyy hh:mm + if time_stamp is None: + time_stamp = datetime.datetime.now() + elif not isinstance(time_stamp, datetime): + raise ValueError("time_stamp should be datetime type") + self._file.write(self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))) + + def _write_descriptors(self, typlist=None, varlist=None, srtlist=None, + fmtlist=None, lbllist=None): + nvar = self.nvar + # typlist, length nvar, format byte array + for typ in self.typlist: + self._write(typ) + + # varlist, length 33*nvar, char array, null terminated + for name in self.varlist: + name = self._null_terminate(name, True) + name = _pad_bytes(name[:32], 33) + self._write(name) + + # srtlist, 2*(nvar+1), int array, encoded by byteorder + srtlist = _pad_bytes("", (2*(nvar+1))) + self._write(srtlist) + + # fmtlist, 49*nvar, char array + for fmt in self.fmtlist: + self._write(_pad_bytes(fmt, 49)) + + # lbllist, 33*nvar, char array + #NOTE: this is where you could get fancy with pandas categorical type + for i in range(nvar): + self._write(_pad_bytes("", 33)) + + def _write_variable_labels(self, labels=None): + nvar = self.nvar + if labels is None: + for i in range(nvar): + self._write(_pad_bytes("", 81)) + + def _write_data_nodates(self): + data = self.datarows + byteorder = self._byteorder + TYPE_MAP = self.TYPE_MAP + typlist = self.typlist + for row in data: + #row = row.squeeze().tolist() # needed for structured arrays + for i, var in enumerate(row): + typ = ord(typlist[i]) + if typ <= 244: # we've got a string + if len(var) < typ: + var = _pad_bytes(self._decode_bytes(var), len(var) + 1) + self._write(var) + else: + try: + self._file.write(struct.pack(byteorder + TYPE_MAP[typ], var)) + except struct.error: + # have to be strict about type pack won't do any + # kind of casting + self._file.write(struct.pack(byteorder+TYPE_MAP[typ], + self.type_converters[typ](var))) + + def _write_data_dates(self): + convert_dates = self._convert_dates + data = self.datarows + byteorder = self._byteorder + TYPE_MAP = self.TYPE_MAP + MISSING_VALUES = self.MISSING_VALUES + typlist = self.typlist + for row in data: + #row = row.squeeze().tolist() # needed for structured arrays + for i, var in enumerate(row): + typ = ord(typlist[i]) + #NOTE: If anyone finds this terribly slow, there is + # a vectorized way to convert dates, see genfromdta for going + # from int to datetime and reverse it. will copy data though + if i in convert_dates: + var = _datetime_to_stata_elapsed(var, self.fmtlist[i]) + if typ <= 244: # we've got a string + if isnull(var): + var = "" # missing string + if len(var) < typ: + var = _pad_bytes(var, len(var) + 1) + self._write(var) + else: + if isnull(var): # this only matters for floats + var = MISSING_VALUES[typ] + self._write(struct.pack(byteorder+TYPE_MAP[typ], var)) + + def _null_terminate(self, s, as_string=False): + null_byte = '\x00' + if py3compat.PY3 and not as_string: + s += null_byte + return s.encode(self._encoding) + else: + s += null_byte + return s diff --git a/pandas/io/tests/data/stata1.dta b/pandas/io/tests/data/stata1.dta new file mode 100644 index 0000000000000..7df75d0d0cded Binary files /dev/null and b/pandas/io/tests/data/stata1.dta differ diff --git a/pandas/io/tests/data/stata2.dta b/pandas/io/tests/data/stata2.dta new file mode 100644 index 0000000000000..c60cf480ad5dd Binary files /dev/null and b/pandas/io/tests/data/stata2.dta differ diff --git a/pandas/io/tests/data/stata3.csv b/pandas/io/tests/data/stata3.csv new file mode 100644 index 0000000000000..25175f7f706ed --- /dev/null +++ b/pandas/io/tests/data/stata3.csv @@ -0,0 +1,204 @@ +"year","quarter","realgdp","realcons","realinv","realgovt","realdpi","cpi","m1","tbilrate","unemp","pop","infl","realint" +1959,1,2710.349,1707.4,286.898,470.045,1886.9,28.980,139.7,2.82,5.8,177.146,0,0 +1959,2,2778.801,1733.7,310.859,481.301,1919.7,29.150,141.7,3.08,5.1,177.830,2.34,0.74 +1959,3,2775.488,1751.8,289.226,491.260,1916.4,29.350,140.5,3.82,5.3,178.657,2.74,1.09 +1959,4,2785.204,1753.7,299.356,484.052,1931.3,29.370,140,4.33,5.6,179.386,0.27,4.06 +1960,1,2847.699,1770.5,331.722,462.199,1955.5,29.540,139.6,3.50,5.2,180.007,2.31,1.19 +1960,2,2834.390,1792.9,298.152,460.400,1966.1,29.550,140.2,2.68,5.2,180.671,0.14,2.55 +1960,3,2839.022,1785.8,296.375,474.676,1967.8,29.750,140.9,2.36,5.6,181.528,2.7,-0.34 +1960,4,2802.616,1788.2,259.764,476.434,1966.6,29.840,141.1,2.29,6.3,182.287,1.21,1.08 +1961,1,2819.264,1787.7,266.405,475.854,1984.5,29.810,142.1,2.37,6.8,182.992,-0.4,2.77 +1961,2,2872.005,1814.3,286.246,480.328,2014.4,29.920,142.9,2.29,7,183.691,1.47,0.81 +1961,3,2918.419,1823.1,310.227,493.828,2041.9,29.980,144.1,2.32,6.8,184.524,0.8,1.52 +1961,4,2977.830,1859.6,315.463,502.521,2082.0,30.040,145.2,2.60,6.2,185.242,0.8,1.8 +1962,1,3031.241,1879.4,334.271,520.960,2101.7,30.210,146.4,2.73,5.6,185.874,2.26,0.47 +1962,2,3064.709,1902.5,331.039,523.066,2125.2,30.220,146.5,2.78,5.5,186.538,0.13,2.65 +1962,3,3093.047,1917.9,336.962,538.838,2137.0,30.380,146.7,2.78,5.6,187.323,2.11,0.67 +1962,4,3100.563,1945.1,325.650,535.912,2154.6,30.440,148.3,2.87,5.5,188.013,0.79,2.08 +1963,1,3141.087,1958.2,343.721,522.917,2172.5,30.480,149.7,2.90,5.8,188.580,0.53,2.38 +1963,2,3180.447,1976.9,348.730,518.108,2193.1,30.690,151.3,3.03,5.7,189.242,2.75,0.29 +1963,3,3240.332,2003.8,360.102,546.893,2217.9,30.750,152.6,3.38,5.5,190.028,0.78,2.6 +1963,4,3264.967,2020.6,364.534,532.383,2254.6,30.940,153.7,3.52,5.6,190.668,2.46,1.06 +1964,1,3338.246,2060.5,379.523,529.686,2299.6,30.950,154.8,3.51,5.5,191.245,0.13,3.38 +1964,2,3376.587,2096.7,377.778,526.175,2362.1,31.020,156.8,3.47,5.2,191.889,0.9,2.57 +1964,3,3422.469,2135.2,386.754,522.008,2392.7,31.120,159.2,3.53,5,192.631,1.29,2.25 +1964,4,3431.957,2141.2,389.910,514.603,2420.4,31.280,160.7,3.76,5,193.223,2.05,1.71 +1965,1,3516.251,2188.8,429.145,508.006,2447.4,31.380,162,3.93,4.9,193.709,1.28,2.65 +1965,2,3563.960,2213.0,429.119,508.931,2474.5,31.580,163.1,3.84,4.7,194.303,2.54,1.3 +1965,3,3636.285,2251.0,444.444,529.446,2542.6,31.650,166,3.93,4.4,194.997,0.89,3.04 +1965,4,3724.014,2314.3,446.493,544.121,2594.1,31.880,169.1,4.35,4.1,195.539,2.9,1.46 +1966,1,3815.423,2348.5,484.244,556.593,2618.4,32.280,171.8,4.62,3.9,195.999,4.99,-0.37 +1966,2,3828.124,2354.5,475.408,571.371,2624.7,32.450,170.3,4.65,3.8,196.560,2.1,2.55 +1966,3,3853.301,2381.5,470.697,594.514,2657.8,32.850,171.2,5.23,3.8,197.207,4.9,0.33 +1966,4,3884.520,2391.4,472.957,599.528,2688.2,32.900,171.9,5.00,3.7,197.736,0.61,4.39 +1967,1,3918.740,2405.3,460.007,640.682,2728.4,33.100,174.2,4.22,3.8,198.206,2.42,1.8 +1967,2,3919.556,2438.1,440.393,631.430,2750.8,33.400,178.1,3.78,3.8,198.712,3.61,0.17 +1967,3,3950.826,2450.6,453.033,641.504,2777.1,33.700,181.6,4.42,3.8,199.311,3.58,0.84 +1967,4,3980.970,2465.7,462.834,640.234,2797.4,34.100,184.3,4.90,3.9,199.808,4.72,0.18 +1968,1,4063.013,2524.6,472.907,651.378,2846.2,34.400,186.6,5.18,3.7,200.208,3.5,1.67 +1968,2,4131.998,2563.3,492.026,646.145,2893.5,34.900,190.5,5.50,3.5,200.706,5.77,-0.28 +1968,3,4160.267,2611.5,476.053,640.615,2899.3,35.300,194,5.21,3.5,201.290,4.56,0.65 +1968,4,4178.293,2623.5,480.998,636.729,2918.4,35.700,198.7,5.85,3.4,201.760,4.51,1.34 +1969,1,4244.100,2652.9,512.686,633.224,2923.4,36.300,200.7,6.08,3.4,202.161,6.67,-0.58 +1969,2,4256.460,2669.8,508.601,623.160,2952.9,36.800,201.7,6.49,3.4,202.677,5.47,1.02 +1969,3,4283.378,2682.7,520.360,623.613,3012.9,37.300,202.9,7.02,3.6,203.302,5.4,1.63 +1969,4,4263.261,2704.1,492.334,606.900,3034.9,37.900,206.2,7.64,3.6,203.849,6.38,1.26 +1970,1,4256.573,2720.7,476.925,594.888,3050.1,38.500,206.7,6.76,4.2,204.401,6.28,0.47 +1970,2,4264.289,2733.2,478.419,576.257,3103.5,38.900,208,6.66,4.8,205.052,4.13,2.52 +1970,3,4302.259,2757.1,486.594,567.743,3145.4,39.400,212.9,6.15,5.2,205.788,5.11,1.04 +1970,4,4256.637,2749.6,458.406,564.666,3135.1,39.900,215.5,4.86,5.8,206.466,5.04,-0.18 +1971,1,4374.016,2802.2,517.935,542.709,3197.3,40.100,220,3.65,5.9,207.065,2,1.65 +1971,2,4398.829,2827.9,533.986,534.905,3245.3,40.600,224.9,4.76,5.9,207.661,4.96,-0.19 +1971,3,4433.943,2850.4,541.010,532.646,3259.7,40.900,227.2,4.70,6,208.345,2.94,1.75 +1971,4,4446.264,2897.8,524.085,516.140,3294.2,41.200,230.1,3.87,6,208.917,2.92,0.95 +1972,1,4525.769,2936.5,561.147,518.192,3314.9,41.500,235.6,3.55,5.8,209.386,2.9,0.64 +1972,2,4633.101,2992.6,595.495,526.473,3346.1,41.800,238.8,3.86,5.7,209.896,2.88,0.98 +1972,3,4677.503,3038.8,603.970,498.116,3414.6,42.200,245,4.47,5.6,210.479,3.81,0.66 +1972,4,4754.546,3110.1,607.104,496.540,3550.5,42.700,251.5,5.09,5.3,210.985,4.71,0.38 +1973,1,4876.166,3167.0,645.654,504.838,3590.7,43.700,252.7,5.98,5,211.420,9.26,-3.28 +1973,2,4932.571,3165.4,675.837,497.033,3626.2,44.200,257.5,7.19,4.9,211.909,4.55,2.64 +1973,3,4906.252,3176.7,649.412,475.897,3644.4,45.600,259,8.06,4.8,212.475,12.47,-4.41 +1973,4,4953.050,3167.4,674.253,476.174,3688.9,46.800,263.8,7.68,4.8,212.932,10.39,-2.71 +1974,1,4909.617,3139.7,631.230,491.043,3632.3,48.100,267.2,7.80,5.1,213.361,10.96,-3.16 +1974,2,4922.188,3150.6,628.102,490.177,3601.1,49.300,269.3,7.89,5.2,213.854,9.86,-1.96 +1974,3,4873.520,3163.6,592.672,492.586,3612.4,51.000,272.3,8.16,5.6,214.451,13.56,-5.4 +1974,4,4854.340,3117.3,598.306,496.176,3596.0,52.300,273.9,6.96,6.6,214.931,10.07,-3.11 +1975,1,4795.295,3143.4,493.212,490.603,3581.9,53.000,276.2,5.53,8.2,215.353,5.32,0.22 +1975,2,4831.942,3195.8,476.085,486.679,3749.3,54.000,283.7,5.57,8.9,215.973,7.48,-1.91 +1975,3,4913.328,3241.4,516.402,498.836,3698.6,54.900,285.4,6.27,8.5,216.587,6.61,-0.34 +1975,4,4977.511,3275.7,530.596,500.141,3736.0,55.800,288.4,5.26,8.3,217.095,6.5,-1.24 +1976,1,5090.663,3341.2,585.541,495.568,3791.0,56.100,294.7,4.91,7.7,217.528,2.14,2.77 +1976,2,5128.947,3371.8,610.513,494.532,3822.2,57.000,297.2,5.28,7.6,218.035,6.37,-1.09 +1976,3,5154.072,3407.5,611.646,493.141,3856.7,57.900,302,5.05,7.7,218.644,6.27,-1.22 +1976,4,5191.499,3451.8,615.898,494.415,3884.4,58.700,308.3,4.57,7.8,219.179,5.49,-0.92 +1977,1,5251.762,3491.3,646.198,498.509,3887.5,60.000,316,4.60,7.5,219.684,8.76,-4.16 +1977,2,5356.131,3510.6,696.141,506.695,3931.8,60.800,320.2,5.06,7.1,220.239,5.3,-0.24 +1977,3,5451.921,3544.1,734.078,509.605,3990.8,61.600,326.4,5.82,6.9,220.904,5.23,0.59 +1977,4,5450.793,3597.5,713.356,504.584,4071.2,62.700,334.4,6.20,6.6,221.477,7.08,-0.88 +1978,1,5469.405,3618.5,727.504,506.314,4096.4,63.900,339.9,6.34,6.3,221.991,7.58,-1.24 +1978,2,5684.569,3695.9,777.454,518.366,4143.4,65.500,347.6,6.72,6,222.585,9.89,-3.18 +1978,3,5740.300,3711.4,801.452,520.199,4177.1,67.100,353.3,7.64,6,223.271,9.65,-2.01 +1978,4,5816.222,3741.3,819.689,524.782,4209.8,68.500,358.6,9.02,5.9,223.865,8.26,0.76 +1979,1,5825.949,3760.2,819.556,525.524,4255.9,70.600,368,9.42,5.9,224.438,12.08,-2.66 +1979,2,5831.418,3758.0,817.660,532.040,4226.1,73.000,377.2,9.30,5.7,225.055,13.37,-4.07 +1979,3,5873.335,3794.9,801.742,531.232,4250.3,75.200,380.8,10.49,5.9,225.801,11.88,-1.38 +1979,4,5889.495,3805.0,786.817,531.126,4284.3,78.000,385.8,11.94,5.9,226.451,14.62,-2.68 +1980,1,5908.467,3798.4,781.114,548.115,4296.2,80.900,383.8,13.75,6.3,227.061,14.6,-0.85 +1980,2,5787.373,3712.2,710.640,561.895,4236.1,82.600,394,7.90,7.3,227.726,8.32,-0.42 +1980,3,5776.617,3752.0,656.477,554.292,4279.7,84.700,409,10.34,7.7,228.417,10.04,0.3 +1980,4,5883.460,3802.0,723.220,556.130,4368.1,87.200,411.3,14.75,7.4,228.937,11.64,3.11 +1981,1,6005.717,3822.8,795.091,567.618,4358.1,89.100,427.4,13.95,7.4,229.403,8.62,5.32 +1981,2,5957.795,3822.8,757.240,584.540,4358.6,91.500,426.9,15.33,7.4,229.966,10.63,4.69 +1981,3,6030.184,3838.3,804.242,583.890,4455.4,93.400,428.4,14.58,7.4,230.641,8.22,6.36 +1981,4,5955.062,3809.3,773.053,590.125,4464.4,94.400,442.7,11.33,8.2,231.157,4.26,7.07 +1982,1,5857.333,3833.9,692.514,591.043,4469.6,95.000,447.1,12.95,8.8,231.645,2.53,10.42 +1982,2,5889.074,3847.7,691.900,596.403,4500.8,97.500,448,11.97,9.4,232.188,10.39,1.58 +1982,3,5866.370,3877.2,683.825,605.370,4520.6,98.100,464.5,8.10,9.9,232.816,2.45,5.65 +1982,4,5871.001,3947.9,622.930,623.307,4536.4,97.900,477.2,7.96,10.7,233.322,-0.82,8.77 +1983,1,5944.020,3986.6,645.110,630.873,4572.2,98.800,493.2,8.22,10.4,233.781,3.66,4.56 +1983,2,6077.619,4065.7,707.372,644.322,4605.5,99.800,507.8,8.69,10.1,234.307,4.03,4.66 +1983,3,6197.468,4137.6,754.937,662.412,4674.7,100.800,517.2,8.99,9.4,234.907,3.99,5.01 +1983,4,6325.574,4203.2,834.427,639.197,4771.1,102.100,525.1,8.89,8.5,235.385,5.13,3.76 +1984,1,6448.264,4239.2,921.763,644.635,4875.4,103.300,535,9.43,7.9,235.839,4.67,4.76 +1984,2,6559.594,4299.9,952.841,664.839,4959.4,104.100,540.9,9.94,7.5,236.348,3.09,6.85 +1984,3,6623.343,4333.0,974.989,662.294,5036.6,105.100,543.7,10.19,7.4,236.976,3.82,6.37 +1984,4,6677.264,4390.1,958.993,684.282,5084.5,105.700,557,8.14,7.3,237.468,2.28,5.87 +1985,1,6740.275,4464.6,927.375,691.613,5072.0,107.000,570.4,8.25,7.3,237.900,4.89,3.36 +1985,2,6797.344,4505.2,943.383,708.524,5172.7,107.700,589.1,7.17,7.3,238.466,2.61,4.56 +1985,3,6903.523,4590.8,932.959,732.305,5140.7,108.500,607.8,7.13,7.2,239.113,2.96,4.17 +1985,4,6955.918,4600.9,969.434,732.026,5193.9,109.900,621.4,7.14,7,239.638,5.13,2.01 +1986,1,7022.757,4639.3,967.442,728.125,5255.8,108.700,641,6.56,7,240.094,-4.39,10.95 +1986,2,7050.969,4688.7,945.972,751.334,5315.5,109.500,670.3,6.06,7.2,240.651,2.93,3.13 +1986,3,7118.950,4770.7,916.315,779.770,5343.3,110.200,694.9,5.31,7,241.274,2.55,2.76 +1986,4,7153.359,4799.4,917.736,767.671,5346.5,111.400,730.2,5.44,6.8,241.784,4.33,1.1 +1987,1,7193.019,4792.1,945.776,772.247,5379.4,112.700,743.9,5.61,6.6,242.252,4.64,0.97 +1987,2,7269.510,4856.3,947.100,782.962,5321.0,113.800,743,5.67,6.3,242.804,3.89,1.79 +1987,3,7332.558,4910.4,948.055,783.804,5416.2,115.000,756.2,6.19,6,243.446,4.2,1.99 +1987,4,7458.022,4922.2,1021.980,795.467,5493.1,116.000,756.2,5.76,5.9,243.981,3.46,2.29 +1988,1,7496.600,5004.4,964.398,773.851,5562.1,117.200,768.1,5.76,5.7,244.445,4.12,1.64 +1988,2,7592.881,5040.8,987.858,765.980,5614.3,118.500,781.4,6.48,5.5,245.021,4.41,2.07 +1988,3,7632.082,5080.6,994.204,760.245,5657.5,119.900,783.3,7.22,5.5,245.693,4.7,2.52 +1988,4,7733.991,5140.4,1007.371,783.065,5708.5,121.200,785.7,8.03,5.3,246.224,4.31,3.72 +1989,1,7806.603,5159.3,1045.975,767.024,5773.4,123.100,779.2,8.67,5.2,246.721,6.22,2.44 +1989,2,7865.016,5182.4,1033.753,784.275,5749.8,124.500,777.8,8.15,5.2,247.342,4.52,3.63 +1989,3,7927.393,5236.1,1021.604,791.819,5787.0,125.400,786.6,7.76,5.3,248.067,2.88,4.88 +1989,4,7944.697,5261.7,1011.119,787.844,5831.3,127.500,795.4,7.65,5.4,248.659,6.64,1.01 +1990,1,8027.693,5303.3,1021.070,799.681,5875.1,128.900,806.2,7.80,5.3,249.306,4.37,3.44 +1990,2,8059.598,5320.8,1021.360,800.639,5913.9,130.500,810.1,7.70,5.3,250.132,4.93,2.76 +1990,3,8059.476,5341.0,997.319,793.513,5918.1,133.400,819.8,7.33,5.7,251.057,8.79,-1.46 +1990,4,7988.864,5299.5,934.248,800.525,5878.2,134.700,827.2,6.67,6.1,251.889,3.88,2.79 +1991,1,7950.164,5284.4,896.210,806.775,5896.3,135.100,843.2,5.83,6.6,252.643,1.19,4.65 +1991,2,8003.822,5324.7,891.704,809.081,5941.1,136.200,861.5,5.54,6.8,253.493,3.24,2.29 +1991,3,8037.538,5345.0,913.904,793.987,5953.6,137.200,878,5.18,6.9,254.435,2.93,2.25 +1991,4,8069.046,5342.6,948.891,778.378,5992.4,138.300,910.4,4.14,7.1,255.214,3.19,0.95 +1992,1,8157.616,5434.5,927.796,778.568,6082.9,139.400,943.8,3.88,7.4,255.992,3.17,0.71 +1992,2,8244.294,5466.7,988.912,777.762,6129.5,140.500,963.2,3.50,7.6,256.894,3.14,0.36 +1992,3,8329.361,5527.1,999.135,786.639,6160.6,141.700,1003.8,2.97,7.6,257.861,3.4,-0.44 +1992,4,8417.016,5594.6,1030.758,787.064,6248.2,142.800,1030.4,3.12,7.4,258.679,3.09,0.02 +1993,1,8432.485,5617.2,1054.979,762.901,6156.5,143.800,1047.6,2.92,7.2,259.414,2.79,0.13 +1993,2,8486.435,5671.1,1063.263,752.158,6252.3,144.500,1084.5,3.02,7.1,260.255,1.94,1.08 +1993,3,8531.108,5732.7,1062.514,744.227,6265.7,145.600,1113,3.00,6.8,261.163,3.03,-0.04 +1993,4,8643.769,5783.7,1118.583,748.102,6358.1,146.300,1131.6,3.05,6.6,261.919,1.92,1.13 +1994,1,8727.919,5848.1,1166.845,721.288,6332.6,147.200,1141.1,3.48,6.6,262.631,2.45,1.02 +1994,2,8847.303,5891.5,1234.855,717.197,6440.6,148.400,1150.5,4.20,6.2,263.436,3.25,0.96 +1994,3,8904.289,5938.7,1212.655,736.890,6487.9,149.400,1150.1,4.68,6,264.301,2.69,2 +1994,4,9003.180,5997.3,1269.190,716.702,6574.0,150.500,1151.4,5.53,5.6,265.044,2.93,2.6 +1995,1,9025.267,6004.3,1282.090,715.326,6616.6,151.800,1149.3,5.72,5.5,265.755,3.44,2.28 +1995,2,9044.668,6053.5,1247.610,712.492,6617.2,152.600,1145.4,5.52,5.7,266.557,2.1,3.42 +1995,3,9120.684,6107.6,1235.601,707.649,6666.8,153.500,1137.3,5.32,5.7,267.456,2.35,2.97 +1995,4,9184.275,6150.6,1270.392,681.081,6706.2,154.700,1123.5,5.17,5.6,268.151,3.11,2.05 +1996,1,9247.188,6206.9,1287.128,695.265,6777.7,156.100,1124.8,4.91,5.5,268.853,3.6,1.31 +1996,2,9407.052,6277.1,1353.795,705.172,6850.6,157.000,1112.4,5.09,5.5,269.667,2.3,2.79 +1996,3,9488.879,6314.6,1422.059,692.741,6908.9,158.200,1086.1,5.04,5.3,270.581,3.05,2 +1996,4,9592.458,6366.1,1418.193,690.744,6946.8,159.400,1081.5,4.99,5.3,271.360,3.02,1.97 +1997,1,9666.235,6430.2,1451.304,681.445,7008.9,159.900,1063.8,5.10,5.2,272.083,1.25,3.85 +1997,2,9809.551,6456.2,1543.976,693.525,7061.5,160.400,1066.2,5.01,5,272.912,1.25,3.76 +1997,3,9932.672,6566.0,1571.426,691.261,7142.4,161.500,1065.5,5.02,4.9,273.852,2.73,2.29 +1997,4,10008.874,6641.1,1596.523,690.311,7241.5,162.000,1074.4,5.11,4.7,274.626,1.24,3.88 +1998,1,10103.425,6707.2,1672.732,668.783,7406.2,162.200,1076.1,5.02,4.6,275.304,0.49,4.53 +1998,2,10194.277,6822.6,1652.716,687.184,7512.0,163.200,1075,4.98,4.4,276.115,2.46,2.52 +1998,3,10328.787,6913.1,1700.071,681.472,7591.0,163.900,1086,4.49,4.5,277.003,1.71,2.78 +1998,4,10507.575,7019.1,1754.743,688.147,7646.5,164.700,1097.8,4.38,4.4,277.790,1.95,2.43 +1999,1,10601.179,7088.3,1809.993,683.601,7698.4,165.900,1101.9,4.39,4.3,278.451,2.9,1.49 +1999,2,10684.049,7199.9,1803.674,683.594,7716.0,166.700,1098.7,4.54,4.3,279.295,1.92,2.62 +1999,3,10819.914,7286.4,1848.949,697.936,7765.9,168.100,1102.3,4.75,4.2,280.203,3.35,1.41 +1999,4,11014.254,7389.2,1914.567,713.445,7887.7,169.300,1121.9,5.20,4.1,280.976,2.85,2.35 +2000,1,11043.044,7501.3,1887.836,685.216,8053.4,170.900,1113.5,5.63,4,281.653,3.76,1.87 +2000,2,11258.454,7571.8,2018.529,712.641,8135.9,172.700,1103,5.81,3.9,282.385,4.19,1.62 +2000,3,11267.867,7645.9,1986.956,698.827,8222.3,173.900,1098.7,6.07,4,283.190,2.77,3.3 +2000,4,11334.544,7713.5,1987.845,695.597,8234.6,175.600,1097.7,5.70,3.9,283.900,3.89,1.81 +2001,1,11297.171,7744.3,1882.691,710.403,8296.5,176.400,1114.9,4.39,4.2,284.550,1.82,2.57 +2001,2,11371.251,7773.5,1876.650,725.623,8273.7,177.400,1139.7,3.54,4.4,285.267,2.26,1.28 +2001,3,11340.075,7807.7,1837.074,730.493,8484.5,177.600,1166,2.72,4.8,286.047,0.45,2.27 +2001,4,11380.128,7930.0,1731.189,739.318,8385.5,177.700,1190.9,1.74,5.5,286.728,0.23,1.51 +2002,1,11477.868,7957.3,1789.327,756.915,8611.6,179.300,1185.9,1.75,5.7,287.328,3.59,-1.84 +2002,2,11538.770,7997.8,1810.779,774.408,8658.9,180.000,1199.5,1.70,5.8,288.028,1.56,0.14 +2002,3,11596.430,8052.0,1814.531,786.673,8629.2,181.200,1204,1.61,5.7,288.783,2.66,-1.05 +2002,4,11598.824,8080.6,1813.219,799.967,8649.6,182.600,1226.8,1.20,5.8,289.421,3.08,-1.88 +2003,1,11645.819,8122.3,1813.141,800.196,8681.3,183.200,1248.4,1.14,5.9,290.019,1.31,-0.17 +2003,2,11738.706,8197.8,1823.698,838.775,8812.5,183.700,1287.9,0.96,6.2,290.704,1.09,-0.13 +2003,3,11935.461,8312.1,1889.883,839.598,8935.4,184.900,1297.3,0.94,6.1,291.449,2.6,-1.67 +2003,4,12042.817,8358.0,1959.783,845.722,8986.4,186.300,1306.1,0.90,5.8,292.057,3.02,-2.11 +2004,1,12127.623,8437.6,1970.015,856.570,9025.9,187.400,1332.1,0.94,5.7,292.635,2.35,-1.42 +2004,2,12213.818,8483.2,2055.580,861.440,9115.0,189.100,1340.5,1.21,5.6,293.310,3.61,-2.41 +2004,3,12303.533,8555.8,2082.231,876.385,9175.9,190.800,1361,1.63,5.4,294.066,3.58,-1.95 +2004,4,12410.282,8654.2,2125.152,865.596,9303.4,191.800,1366.6,2.20,5.4,294.741,2.09,0.11 +2005,1,12534.113,8719.0,2170.299,869.204,9189.6,193.800,1357.8,2.69,5.3,295.308,4.15,-1.46 +2005,2,12587.535,8802.9,2131.468,870.044,9253.0,194.700,1366.6,3.01,5.1,295.994,1.85,1.16 +2005,3,12683.153,8865.6,2154.949,890.394,9308.0,199.200,1375,3.52,5,296.770,9.14,-5.62 +2005,4,12748.699,8888.5,2232.193,875.557,9358.7,199.400,1380.6,4.00,4.9,297.435,0.4,3.6 +2006,1,12915.938,8986.6,2264.721,900.511,9533.8,200.700,1380.5,4.51,4.7,298.061,2.6,1.91 +2006,2,12962.462,9035.0,2261.247,892.839,9617.3,202.700,1369.2,4.82,4.7,298.766,3.97,0.85 +2006,3,12965.916,9090.7,2229.636,892.002,9662.5,201.900,1369.4,4.90,4.7,299.593,-1.58,6.48 +2006,4,13060.679,9181.6,2165.966,894.404,9788.8,203.574,1373.6,4.92,4.4,300.320,3.3,1.62 +2007,1,13099.901,9265.1,2132.609,882.766,9830.2,205.920,1379.7,4.95,4.5,300.977,4.58,0.36 +2007,2,13203.977,9291.5,2162.214,898.713,9842.7,207.338,1370,4.72,4.5,301.714,2.75,1.97 +2007,3,13321.109,9335.6,2166.491,918.983,9883.9,209.133,1379.2,4.00,4.7,302.509,3.45,0.55 +2007,4,13391.249,9363.6,2123.426,925.110,9886.2,212.495,1377.4,3.01,4.8,303.204,6.38,-3.37 +2008,1,13366.865,9349.6,2082.886,943.372,9826.8,213.997,1384,1.56,4.9,303.803,2.82,-1.26 +2008,2,13415.266,9351.0,2026.518,961.280,10059.0,218.610,1409.3,1.74,5.4,304.483,8.53,-6.79 +2008,3,13324.600,9267.7,1990.693,991.551,9838.3,216.889,1474.7,1.17,6,305.270,-3.16,4.33 +2008,4,13141.920,9195.3,1857.661,1007.273,9920.4,212.174,1576.5,0.12,6.9,305.952,-8.79,8.91 +2009,1,12925.410,9209.2,1558.494,996.287,9926.4,212.671,1592.8,0.22,8.1,306.547,0.94,-0.71 +2009,2,12901.504,9189.0,1456.678,1023.528,10077.5,214.469,1653.6,0.18,9.2,307.226,3.37,-3.19 +2009,3,12990.341,9256.0,1486.398,1044.088,10040.6,216.385,1673.9,0.12,9.6,308.013,3.56,-3.44 diff --git a/pandas/io/tests/data/stata3.dta b/pandas/io/tests/data/stata3.dta new file mode 100644 index 0000000000000..265fbcc3a8187 Binary files /dev/null and b/pandas/io/tests/data/stata3.dta differ diff --git a/pandas/io/tests/data/stata4.dta b/pandas/io/tests/data/stata4.dta new file mode 100644 index 0000000000000..c5d7de8b42295 Binary files /dev/null and b/pandas/io/tests/data/stata4.dta differ diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py new file mode 100644 index 0000000000000..79cec2870d687 --- /dev/null +++ b/pandas/io/tests/test_stata.py @@ -0,0 +1,193 @@ +# pylint: disable=E1101 + +from datetime import datetime +import os +import unittest + +import warnings +import nose + +import numpy as np + +from pandas.core.frame import DataFrame +from pandas.io.parsers import read_csv +from pandas.io.stata import read_stata, StataReader, StataWriter +import pandas.util.testing as tm + + +def curpath(): + pth, _ = os.path.split(os.path.abspath(__file__)) + return pth + + +class StataTests(unittest.TestCase): + + def setUp(self): + # Unit test datasets for dta7 - dta9 (old stata formats 104, 105 and 107) can be downloaded from: + # http://stata-press.com/data/glmext.html + self.dirpath = tm.get_data_path() + self.dta1 = os.path.join(self.dirpath, 'stata1.dta') + self.dta2 = os.path.join(self.dirpath, 'stata2.dta') + self.dta3 = os.path.join(self.dirpath, 'stata3.dta') + self.csv3 = os.path.join(self.dirpath, 'stata3.csv') + self.dta4 = os.path.join(self.dirpath, 'stata4.dta') + self.dta5 = os.path.join(self.dirpath, 'stata5.dta') + self.dta6 = os.path.join(self.dirpath, 'stata6.dta') + self.dta7 = os.path.join(self.dirpath, 'cancer.dta') + self.csv7 = os.path.join(self.dirpath, 'cancer.csv') + self.dta8 = os.path.join(self.dirpath, 'tbl19-3.dta') + self.csv8 = os.path.join(self.dirpath, 'tbl19-3.csv') + self.dta9 = os.path.join(self.dirpath, 'lbw.dta') + self.csv9 = os.path.join(self.dirpath, 'lbw.csv') + + def read_dta(self, file): + return read_stata(file, convert_dates=True) + + def read_csv(self, file): + return read_csv(file, parse_dates=True) + + def test_read_dta1(self): + reader = StataReader(self.dta1) + parsed = reader.data() + # Pandas uses np.nan as missing value. Thus, all columns will be of type float, regardless of their name. + expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], + columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) + + for i, col in enumerate(parsed.columns): + np.testing.assert_almost_equal( + parsed[col], + expected[expected.columns[i]] + ) + + def test_read_dta2(self): + expected = DataFrame.from_records( + [ + ( + datetime(2006, 11, 19, 23, 13, 20), + 1479596223000, + datetime(2010, 1, 20), + datetime(2010, 1, 8), + datetime(2010, 1, 1), + datetime(1974, 7, 1), + datetime(2010, 1, 1), + datetime(2010, 1, 1) + ), + ( + datetime(1959, 12, 31, 20, 3, 20), + -1479590, + datetime(1953, 10, 2), + datetime(1948, 6, 10), + datetime(1955, 1, 1), + datetime(1955, 7, 1), + datetime(1955, 1, 1), + datetime(2, 1, 1) + ), + ( + np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('NaT') + ) + ], + columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date', 'monthly_date', 'quarterly_date', 'half_yearly_date', 'yearly_date'] + ) + + with warnings.catch_warnings(record=True) as w: + parsed = self.read_dta(self.dta2) + np.testing.assert_equal( + len(w), 1) # should get a warning for that format. + + tm.assert_frame_equal(parsed, expected) + + def test_read_dta3(self): + parsed = self.read_dta(self.dta3) + expected = self.read_csv(self.csv3) + for i, col in enumerate(parsed.columns): + np.testing.assert_almost_equal( + parsed[col], + expected[expected.columns[i]], + decimal=3 + ) + + def test_read_dta4(self): + parsed = self.read_dta(self.dta4) + expected = DataFrame.from_records( + [ + ["one", "ten", "one", "one", "one"], + ["two", "nine", "two", "two", "two"], + ["three", "eight", "three", "three", "three"], + ["four", "seven", 4, "four", "four"], + ["five", "six", 5, np.nan, "five"], + ["six", "five", 6, np.nan, "six"], + ["seven", "four", 7, np.nan, "seven"], + ["eight", "three", 8, np.nan, "eight"], + ["nine", "two", 9, np.nan, "nine"], + ["ten", "one", "ten", np.nan, "ten"] + ], + columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled', 'labeled_with_missings', 'float_labelled']) + + tm.assert_frame_equal(parsed, expected) + + def test_write_dta5(self): + original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], + columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) + + writer = StataWriter(self.dta5, original, None, False) + writer.write_file() + + written_and_read_again = self.read_dta(self.dta5) + tm.assert_frame_equal(written_and_read_again, original) + + def test_write_dta6(self): + original = self.read_csv(self.csv3) + + writer = StataWriter(self.dta6, original, None, False) + writer.write_file() + + written_and_read_again = self.read_dta(self.dta6) + tm.assert_frame_equal(written_and_read_again, original) + + @nose.tools.nottest + def test_read_dta7(self): + expected = read_csv(self.csv7, parse_dates=True, sep='\t') + parsed = self.read_dta(self.dta7) + + for i, col in enumerate(parsed.columns): + np.testing.assert_almost_equal( + parsed[col], + expected[expected.columns[i]], + decimal=3 + ) + + @nose.tools.nottest + def test_read_dta8(self): + expected = read_csv(self.csv8, parse_dates=True, sep='\t') + parsed = self.read_dta(self.dta8) + + for i, col in enumerate(parsed.columns): + np.testing.assert_almost_equal( + parsed[col], + expected[expected.columns[i]], + decimal=3 + ) + + @nose.tools.nottest + def test_read_dta9(self): + expected = read_csv(self.csv9, parse_dates=True, sep='\t') + parsed = self.read_dta(self.dta9) + + for i, col in enumerate(parsed.columns): + np.testing.assert_equal( + parsed[col], + expected[expected.columns[i]], + decimal=3 + ) + + +if __name__ == '__main__': + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) diff --git a/setup.py b/setup.py index 3e56144e25378..d1b1382e26dbc 100755 --- a/setup.py +++ b/setup.py @@ -506,6 +506,7 @@ def pxd(name): 'tests/data/legacy_pickle/0.10.1/*.pickle', 'tests/data/legacy_pickle/0.11.0/*.pickle', 'tests/data/*.csv', + 'tests/data/*.dta', 'tests/data/*.txt', 'tests/data/*.xls', 'tests/data/*.xlsx',
This pull request aims at fixing ticket #1512 and contains both a reader and a writer for Stata .dta files. The code basically comes from th statsmodels project, however, I adapted it to the needs of pandas and implemented support for reading out stata value labels. The writer does not write those labels back.
https://api.github.com/repos/pandas-dev/pandas/pulls/3270
2013-04-07T19:57:24Z
2013-05-16T11:31:47Z
2013-05-16T11:31:47Z
2014-06-13T17:39:52Z
DOC: fix docstring for date_parser option
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 164422f7572bd..9b1be2a658a6a 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -87,8 +87,9 @@ class DateConversionError(Exception): If True and parse_dates specifies combining multiple columns then keep the original columns. date_parser : function - Function to use for converting dates to strings. Defaults to - dateutil.parser + Function to use for converting a sequence of string columns to an + array of datetime instances. The default uses dateutil.parser.parser + to do the conversion. dayfirst : boolean, default False DD/MM format dates, international and European format thousands : str, default None
Just a small doc string fix, hopefully trivial to merge.
https://api.github.com/repos/pandas-dev/pandas/pulls/3266
2013-04-07T13:34:50Z
2013-04-08T23:54:20Z
2013-04-08T23:54:20Z
2014-06-21T21:02:26Z
DOC: update boxplot/truncate #2916
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 540fb1a721c3d..25e55f472b495 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5619,7 +5619,12 @@ def boxplot(self, column=None, by=None, ax=None, fontsize=None, Can be any valid input to groupby by : string or sequence Column in the DataFrame to group by + ax : matplotlib axis object, default None fontsize : int or string + rot : int, default None + Rotation for ticks + grid : boolean, default None (matlab style default) + Axis grid lines Returns ------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fdb12a79e0e08..ed90aab715cfd 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1057,6 +1057,7 @@ def truncate(self, before=None, after=None, copy=True): Truncate before date after : date Truncate after date + copy : boolean, default True Returns -------
https://api.github.com/repos/pandas-dev/pandas/pulls/3265
2013-04-07T03:25:05Z
2013-04-07T03:39:59Z
null
2013-04-07T03:39:59Z
ENH: update DataFrame to_latex for nicer typesetting
diff --git a/pandas/core/format.py b/pandas/core/format.py index 862b09f5e84e3..f18a449fb604b 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -347,6 +347,12 @@ def to_latex(self, force_unicode=None, column_format=None): """ Render a DataFrame to a LaTeX tabular environment output. """ + def get_col_type(dtype): + if issubclass(dtype.type, np.number): + return 'r' + else: + return 'l' + import warnings if force_unicode is not None: # pragma: no cover warnings.warn( @@ -362,27 +368,28 @@ def to_latex(self, force_unicode=None, column_format=None): strcols = [[info_line]] else: strcols = self._to_str_columns() - + if column_format is None: - column_format = '|l|%s|' % '|'.join('c' for _ in strcols) + dtypes = self.frame.dtypes.values + column_format = 'l%s' % ''.join(map(get_col_type, dtypes)) elif not isinstance(column_format, basestring): raise AssertionError(('column_format must be str or unicode, not %s' % type(column_format))) self.buf.write('\\begin{tabular}{%s}\n' % column_format) - self.buf.write('\\hline\n') + self.buf.write('\\toprule\n') nlevels = frame.index.nlevels for i, row in enumerate(izip(*strcols)): if i == nlevels: - self.buf.write('\\hline\n') # End of header + self.buf.write('\\midrule\n') # End of header crow = [(x.replace('_', '\\_') .replace('%', '\\%') .replace('&', '\\&') if x else '{}') for x in row] self.buf.write(' & '.join(crow)) self.buf.write(' \\\\\n') - self.buf.write('\\hline\n') + self.buf.write('\\bottomrule\n') self.buf.write('\\end{tabular}\n') def _format_col(self, i):
https://api.github.com/repos/pandas-dev/pandas/pulls/3264
2013-04-06T22:38:24Z
2013-04-12T04:13:19Z
2013-04-12T04:13:19Z
2014-02-03T12:47:12Z
DOC: update scipy.py #2916
diff --git a/pandas/compat/scipy.py b/pandas/compat/scipy.py index aab8bd89c5af4..59a9bbdfbdb9e 100644 --- a/pandas/compat/scipy.py +++ b/pandas/compat/scipy.py @@ -30,13 +30,13 @@ def scoreatpercentile(a, per, limit=(), interpolation_method='fraction'): limit : tuple, optional Tuple of two scalars, the lower and upper limits within which to compute the percentile. - interpolation : {'fraction', 'lower', 'higher'}, optional + interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. - -lower: `i`. + - lower: `i`. - higher: `j`. Returns
renamed interpolation -> interpolation_method.
https://api.github.com/repos/pandas-dev/pandas/pulls/3262
2013-04-06T05:46:04Z
2013-04-06T06:02:40Z
2013-04-06T06:02:40Z
2013-04-06T06:03:03Z
ENH: In HDFStore, add select_column method, deprecate unique method
diff --git a/RELEASE.rst b/RELEASE.rst index 87acddb74df8a..46f7c832ae149 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -163,6 +163,11 @@ pandas 0.11.0 when invalid shapes are passed - Methods return None when inplace=True (GH1893_) + - ``HDFStore`` + + - added the method ``select_column`` to select a single column from a table as a Series. + - deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` + **Bug Fixes** - Fix seg fault on empty data frame when fillna with ``pad`` or ``backfill`` diff --git a/doc/source/io.rst b/doc/source/io.rst index 8440f6f566659..25c42780afd65 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1352,16 +1352,17 @@ then the ``nrows`` of the table are considered. Advanced Queries ~~~~~~~~~~~~~~~~ -**Unique** +**Select a Single Column** -To retrieve the *unique* values of an indexable or data column, use the -method ``unique``. This will, for example, enable you to get the index -very quickly. Note ``nan`` are excluded from the result set. +To retrieve a single indexable or data column, use the +method ``select_column``. This will, for example, enable you to get the index +very quickly. These return a ``Series`` of the result, indexed by the row number. +These do not currently accept the ``where`` selector (coming soon) .. ipython:: python - store.unique('df_dc', 'index') - store.unique('df_dc', 'string') + store.select_column('df_dc', 'index') + store.select_column('df_dc', 'string') **Replicating or** diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt index 4c460849c0588..c6553b909f7a6 100644 --- a/doc/source/v0.11.0.txt +++ b/doc/source/v0.11.0.txt @@ -226,6 +226,10 @@ Astype conversion on ``datetime64[ns]`` to ``object``, implicity converts ``NaT` API changes ~~~~~~~~~~~ + - In ``HDFStore``, added the method ``select_column`` to select a single column from a table as a Series. + + - In ``HDFStore``, deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()`` + Enhancements ~~~~~~~~~~~~ diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 1c223f58471f0..0568ee7f7f8bf 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -423,8 +423,13 @@ def select_as_coordinates(self, key, where=None, start=None, stop=None, **kwargs return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs) def unique(self, key, column, **kwargs): + warnings.warn("unique(key,column) is deprecated\n" + "use select_column(key,column).unique() instead") + return self.get_storer(key).read_column(column = column, **kwargs).unique() + + def select_column(self, key, column, **kwargs): """ - return a single column uniquely from the table. This is generally only useful to select an indexable + return a single column from the table. This is generally only useful to select an indexable Parameters ---------- @@ -2525,7 +2530,7 @@ def read_coordinates(self, where=None, start=None, stop=None, **kwargs): self.selection = Selection(self, where=where, start=start, stop=stop, **kwargs) return Coordinates(self.selection.select_coords(), group=self.group, where=where) - def read_column(self, column, **kwargs): + def read_column(self, column, where = None, **kwargs): """ return a single column from the table, generally only indexables are interesting """ # validate the version @@ -2535,6 +2540,9 @@ def read_column(self, column, **kwargs): if not self.infer_axes(): return False + if where is not None: + raise Exception("read_column does not currently accept a where clause") + # find the axes for a in self.axes: if column == a.name: @@ -2544,7 +2552,7 @@ def read_column(self, column, **kwargs): # column must be an indexable or a data column c = getattr(self.table.cols, column) - return Categorical.from_array(a.convert(c[:], nan_rep=self.nan_rep).take_data()).levels + return Series(a.convert(c[:], nan_rep=self.nan_rep).take_data()) raise KeyError("column [%s] not found in the table" % column) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 071bcfbb8b3e9..1973c578cb9e6 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -2068,6 +2068,7 @@ def test_string_select(self): expected = df2[isnull(df2.x)] assert_frame_equal(result,expected) + # int ==/!= df['int'] = 1 df.ix[2:7,'int'] = 2 @@ -2083,42 +2084,44 @@ def test_string_select(self): assert_frame_equal(result,expected) - def test_unique(self): + def test_read_column(self): df = tm.makeTimeDataFrame() - def check(x, y): - self.assert_((np.unique(x) == np.unique(y)).all() == True) - with ensure_clean(self.path) as store: store.remove('df') store.append('df', df) # error - self.assertRaises(KeyError, store.unique, 'df', 'foo') + self.assertRaises(KeyError, store.select_column, 'df', 'foo') + + def f(): + store.select_column('df', 'index', where = ['index>5']) + self.assertRaises(Exception, f) # valid - result = store.unique('df', 'index') - check(result.values, df.index.values) - + result = store.select_column('df', 'index') + tm.assert_almost_equal(result.values, Series(df.index).values) + self.assert_(isinstance(result,Series)) + # not a data indexable column self.assertRaises( - ValueError, store.unique, 'df', 'values_block_0') + ValueError, store.select_column, 'df', 'values_block_0') # a data column df2 = df.copy() df2['string'] = 'foo' store.append('df2', df2, data_columns=['string']) - result = store.unique('df2', 'string') - check(result.values, df2['string'].unique()) + result = store.select_column('df2', 'string') + tm.assert_almost_equal(result.values, df2['string'].values) # a data column with NaNs, result excludes the NaNs df3 = df.copy() df3['string'] = 'foo' df3.ix[4:6, 'string'] = np.nan store.append('df3', df3, data_columns=['string']) - result = store.unique('df3', 'string') - check(result.values, df3['string'].valid().unique()) + result = store.select_column('df3', 'string') + tm.assert_almost_equal(result.values, df3['string'].values) def test_coordinates(self): df = tm.makeTimeDataFrame()
https://api.github.com/repos/pandas-dev/pandas/pulls/3256
2013-04-04T11:49:15Z
2013-04-04T12:11:15Z
2013-04-04T12:11:15Z
2014-06-15T00:21:31Z
BUG: honor repr() of proper subclasses of dict/ODict in pprint_thing
diff --git a/pandas/core/common.py b/pandas/core/common.py index 22fad2aed4e25..4231e9101e1ec 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -18,6 +18,7 @@ import pandas.tslib as tslib from pandas.util import py3compat +from pandas.util import compat import codecs import csv @@ -763,7 +764,7 @@ def changeit(): # our type is wrong here, need to upcast r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True) np.putmask(r, mask, other) - + # we need to actually change the dtype here if change is not None: change.dtype = r.dtype @@ -778,7 +779,7 @@ def changeit(): new_dtype, fill_value = _maybe_promote(result.dtype,other) if new_dtype != result.dtype: - # we have a scalar or len 0 ndarray + # we have a scalar or len 0 ndarray # and its nan and we are changing some values if np.isscalar(other) or (isinstance(other,np.ndarray) and other.ndim < 1): if isnull(other): @@ -1471,9 +1472,31 @@ def is_list_like(arg): def _is_sequence(x): try: - iter(x) + if isinstance(x, basestring): + return False + iter(x) # it's iterable len(x) # it has a length - return not isinstance(x, basestring) and True + + return True + except Exception: + return False + +def _is_pprint_sequence(x): + try: + if isinstance(x, basestring): + return False + + # proper subclass of stdlib dicts, let them format themselves + if (isinstance(x, dict) and + (not type(x) == dict) and + (not type(x) == compat.OrderedDict)): + return False + + iter(x) # it's iterable + len(x) # it has a length + + return True + except Exception: return False @@ -1818,9 +1841,16 @@ def _pprint_dict(seq, _nest_lvl=0): pairs = [] pfmt = u"%s: %s" - for k, v in seq.items(): - pairs.append(pfmt % (repr(k), repr(v))) - return fmt % ", ".join(pairs) + + nitems = get_option("max_seq_items") or len(seq) + + for k, v in seq.items()[:nitems]: + pairs.append(pfmt % (pprint_thing(k,_nest_lvl+1), pprint_thing(v,_nest_lvl+1))) + + if nitems < len(seq): + return fmt % (", ".join(pairs) + ", ...") + else: + return fmt % ", ".join(pairs) def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False): @@ -1849,15 +1879,17 @@ def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False): """ + if thing is None: result = '' elif (py3compat.PY3 and hasattr(thing, '__next__')) or \ hasattr(thing, 'next'): return unicode(thing) - elif (isinstance(thing, dict) and + elif ((type(thing) == dict) or + (type(thing) == compat.OrderedDict) and _nest_lvl < get_option("display.pprint_nest_depth")): result = _pprint_dict(thing, _nest_lvl) - elif _is_sequence(thing) and _nest_lvl < \ + elif _is_pprint_sequence(thing) and _nest_lvl < \ get_option("display.pprint_nest_depth"): result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars) else: @@ -1876,7 +1908,7 @@ def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False): result = str(thing).decode('utf-8', "replace") translate = {'\t': r'\t', - '\n': r'\n', + '\n': r'\n', '\r': r'\r', } if isinstance(escape_chars, dict): diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 89f6859a39bb0..5d4b24ad7359f 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -290,6 +290,8 @@ def test_ensure_platform_int(): def test_pprint_thing(): + from pandas.util.compat import OrderedDict + if py3compat.PY3: raise nose.SkipTest @@ -309,6 +311,20 @@ def test_pprint_thing(): # GH #2038 assert not "\t" in pp_t("a\tb", escape_chars=("\t",)) + assert u'{a: 1}' == pp_t(dict(a=1)) + assert u'{a: 1}' == pp_t(OrderedDict(a=1)) + + # allow proper subclasses of dict/OrderedDict to format themselves + class SubDict(dict): + def __unicode__(self): + return "kagles" + + class SubODict(OrderedDict): + def __unicode__(self): + return "kagles" + + assert u'kagles' == pp_t(SubDict()) + assert u'kagles' == pp_t(SubODict()) class TestTake(unittest.TestCase): @@ -684,7 +700,7 @@ def test_2d_float32(self): expected = arr.take(indexer, axis=1) expected[:, [2, 4]] = np.nan tm.assert_almost_equal(result, expected) - + def test_2d_datetime64(self): # 2005/01/01 - 2006/01/01 arr = np.random.randint(11045376L, 11360736L, (5,3))*100000000000 diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 0ae8934c898b0..f058333ace294 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -1192,8 +1192,9 @@ def test_dict_entries(self): df = DataFrame({'A': [{'a': 1, 'b': 2}]}) val = df.to_string() - self.assertTrue("'a': 1" in val) - self.assertTrue("'b': 2" in val) + # to be fixed ot 'a': 1 when #3038 comes to town + self.assertTrue("a: 1" in val) + self.assertTrue("b: 2" in val) def test_to_latex(self): # it works!
#3251 Have to consider corner cases before merging, rebased on top of #3252
https://api.github.com/repos/pandas-dev/pandas/pulls/3253
2013-04-03T19:58:42Z
2013-12-22T20:33:37Z
null
2014-06-22T16:48:52Z
BUG: pprint_thing should pprint and limit nested sequences in dicts GH3251
diff --git a/pandas/core/common.py b/pandas/core/common.py index 01b6dde7d1ecc..f4485cbf1154c 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1867,9 +1867,16 @@ def _pprint_dict(seq, _nest_lvl=0): pairs = [] pfmt = u"%s: %s" - for k, v in seq.items(): - pairs.append(pfmt % (repr(k), repr(v))) - return fmt % ", ".join(pairs) + + nitems = get_option("max_seq_items") or len(seq) + + for k, v in seq.items()[:nitems]: + pairs.append(pfmt % (pprint_thing(k,_nest_lvl+1), pprint_thing(v,_nest_lvl+1))) + + if nitems < len(seq): + return fmt % (", ".join(pairs) + ", ...") + else: + return fmt % ", ".join(pairs) def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False): diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index e7c5d0201ca1d..2e44a7c1fef2f 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -1316,8 +1316,9 @@ def test_dict_entries(self): df = DataFrame({'A': [{'a': 1, 'b': 2}]}) val = df.to_string() - self.assertTrue("'a': 1" in val) - self.assertTrue("'b': 2" in val) + # to be fixed ot 'a': 1 when #3038 comes to town + self.assertTrue("a: 1" in val) + self.assertTrue("b: 2" in val) def test_to_latex(self): # it works!
#3251 **before** ``` python pd.options.display.max_seq_items= 5 print pd.core.common.pprint_thing(dict(foo=range(100))) foo = range(5) foo[0] = foo print pd.core.common.pprint_thing(dict(foo=foo)) {'foo': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]} {'foo': [[...], 1, 2, 3, 4]} ``` **After**: ``` python In [16]: pd.options.display.max_seq_items= 5 ...: pd.core.common.pprint_thing(dict(foo=range(100))) ...: Out[16]: u'{foo: [0, 1, 2, 3, 4, ...]}' In [17]: foo = range(5) ...: foo[0] = foo ...: pd.core.common.pprint_thing(dict(foo=foo)) ...: Out[17]: u'{foo: [[[[...], 1, 2, 3, 4], 1, 2, 3, 4], 1, 2, 3, 4]}' ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3252
2013-04-03T19:58:04Z
2013-04-23T02:12:54Z
2013-04-23T02:12:54Z
2014-07-04T22:23:57Z
BOOK: keep code examples in wes's book current, until the 2nd ed.
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 692f5cfb85890..70f3fb045376e 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -184,7 +184,7 @@ def mpl_style_cb(key): cf.register_option('precision', 7, pc_precision_doc, validator=is_int) cf.register_option('float_format', None, float_format_doc) cf.register_option('column_space', 12, validator=is_int) - cf.register_option('max_info_rows', 1000000, pc_max_info_rows_doc, + cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc, validator=is_instance_factory((int, type(None)))) cf.register_option('max_rows', 100, pc_max_rows_doc, validator=is_int) cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)
;)
https://api.github.com/repos/pandas-dev/pandas/pulls/3250
2013-04-03T15:15:24Z
2013-04-03T15:15:46Z
2013-04-03T15:15:46Z
2013-04-03T15:15:46Z
DOC: io.rst cleanups
diff --git a/doc/source/io.rst b/doc/source/io.rst index 28572ae14d15e..8440f6f566659 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -35,7 +35,7 @@ CSV & Text files The two workhorse functions for reading text files (a.k.a. flat files) are :func:`~pandas.io.parsers.read_csv` and :func:`~pandas.io.parsers.read_table`. They both use the same parsing code to intelligently convert tabular -data into a DataFrame object. See the :ref:`cookbook<cookbook.csv>` +data into a DataFrame object. See the :ref:`cookbook<cookbook.csv>` for some advanced strategies They can take a number of arguments: @@ -917,7 +917,7 @@ Excel files The ``ExcelFile`` class can read an Excel 2003 file using the ``xlrd`` Python module and use the same parsing code as the above to convert tabular data into -a DataFrame. See the :ref:`cookbook<cookbook.excel>` for some +a DataFrame. See the :ref:`cookbook<cookbook.excel>` for some advanced strategies To use it, create the ``ExcelFile`` object: @@ -1248,9 +1248,8 @@ greater than the date 20000102 and the minor_axis must be A or B` store store.select('wp', [ Term('major_axis>20000102'), Term('minor_axis', '=', ['A', 'B']) ]) -The ``columns`` keyword can be supplied to select to filter a list of -the return columns, this is equivalent to passing a -``Term('columns', list_of_columns_to_filter)`` +The ``columns`` keyword can be supplied to select a list of columns to be returned, +this is equivalent to passing a ``Term('columns', list_of_columns_to_filter)``: .. ipython:: python @@ -1323,7 +1322,7 @@ be data_columns # this is in-memory version of this type of selection df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')] - # we have automagically created this index and that the B/C/string/string2 + # we have automagically created this index and the B/C/string/string2 # columns are stored separately as ``PyTables`` columns store.root.df_dc.table @@ -1395,9 +1394,9 @@ New in 0.10.1 are the methods ``append_to_multple`` and ``select_as_multiple``, that can perform appending/selecting from multiple tables at once. The idea is to have one table (call it the selector table) that you index most/all of the columns, and perform your -queries. The other table(s) are data tables that are indexed the same as -the selector table. You can then perform a very fast query on the -selector table, yet get lots of data back. This method works similar to +queries. The other table(s) are data tables with an index matching the +selector table's index. You can then perform a very fast query +on the selector table, yet get lots of data back. This method works similar to having a very wide table, but is more efficient in terms of queries. Note, **THE USER IS RESPONSIBLE FOR SYNCHRONIZING THE TABLES**. This @@ -1437,8 +1436,8 @@ deleting rows, it is important to understand the ``PyTables`` deletes rows by erasing the rows, then **moving** the following data. Thus deleting can potentially be a very expensive operation depending on the orientation of your data. This is especially true in higher dimensional -objects (``Panel`` and ``Panel4D``). To get optimal deletion speed, it -pays to have the dimension you are deleting be the first of the +objects (``Panel`` and ``Panel4D``). To get optimal performance, it's +worthwhile to have the dimension you are deleting be the first of the ``indexables``. Data is ordered (on the disk) in terms of the ``indexables``. Here's a @@ -1500,8 +1499,8 @@ off file compression for a specific table by passing ``complevel=0`` **ptrepack** -``PyTables`` offer better write performance when compressed after -writing them, as opposed to turning on compression at the very +``PyTables`` offers better write performance when tables are compressed after +they are written, as opposed to turning on compression at the very beginning. You can use the supplied ``PyTables`` utility ``ptrepack``. In addition, ``ptrepack`` can change compression levels after the fact. @@ -1615,10 +1614,10 @@ format store like this: Backwards Compatibility ~~~~~~~~~~~~~~~~~~~~~~~ -0.10.1 of ``HDFStore`` is backwards compatible for reading tables -created in a prior version of pandas however, query terms using the +0.10.1 of ``HDFStore`` can read tables created in a prior version of pandas, +however query terms using the prior (undocumented) methodology are unsupported. ``HDFStore`` will -issue a warning if you try to use a prior-version format file. You must +issue a warning if you try to use a legacy-format file. You must read in the entire file and write it out using the new format, using the method ``copy`` to take advantage of the updates. The group attribute ``pandas_version`` contains the version information. ``copy`` takes a @@ -1658,11 +1657,11 @@ Performance query (potentially very large amounts of data). Write times are generally longer as compared with regular stores. Query times can be quite fast, especially on an indexed axis. - - You can pass ``chunksize=an integer`` to ``append``, to change the - writing chunksize (default is 50000). This will signficantly lower + - You can pass ``chunksize=<int>`` to ``append``, specifying the + write chunksize (default is 50000). This will signficantly lower your memory usage on writing. - - You can pass ``expectedrows=an integer`` to the first ``append``, - to set the TOTAL number of expectedrows that ``PyTables`` will + - You can pass ``expectedrows=<int>`` to the first ``append``, + to set the TOTAL number of expected rows that ``PyTables`` will expected. This will optimize read/write performance. - Duplicate rows can be written to tables, but are filtered out in selection (with the last items being selected; thus a table is @@ -1688,7 +1687,7 @@ HDFStore supports ``Panel4D`` storage. These, by default, index the three axes ``items, major_axis, minor_axis``. On an ``AppendableTable`` it is possible to setup with the first append a different indexing scheme, depending on how you want to -store your data. Pass the ``axes`` keyword with a list of dimension +store your data. Pass the ``axes`` keyword with a list of dimensions (currently must by exactly 1 less than the total dimensions of the object). This cannot be changed after table creation. @@ -1712,12 +1711,14 @@ SQL Queries ----------- The :mod:`pandas.io.sql` module provides a collection of query wrappers to both -facilitate data retrieval and to reduce dependency on DB-specific API. There +facilitate data retrieval and to reduce dependency on DB-specific API. These wrappers only support the Python database adapters which respect the `Python -DB-API <http://www.python.org/dev/peps/pep-0249/>`_. See some +DB-API <http://www.python.org/dev/peps/pep-0249/>`_. See some :ref:`cookbook examples <cookbook.sql>` for some advanced strategies -Suppose you want to query some data with different types from a table such as: +For example, suppose you want to query some data with different types from a +table such as: + +-----+------------+-------+-------+-------+ | id | Date | Col_1 | Col_2 | Col_3 | @@ -1729,8 +1730,9 @@ Suppose you want to query some data with different types from a table such as: | 63 | 2012-10-20 | Z | 5.73 | True | +-----+------------+-------+-------+-------+ + Functions from :mod:`pandas.io.sql` can extract some data into a DataFrame. In -the following example, we use `SQlite <http://www.sqlite.org/>`_ SQL database +the following example, we use the `SQlite <http://www.sqlite.org/>`_ SQL database engine. You can use a temporary SQLite database where data are stored in "memory". Just do: @@ -1779,7 +1781,7 @@ You can also specify the name of the column as the DataFrame index: sql.read_frame("SELECT * FROM data;", cnx, index_col='id') sql.read_frame("SELECT * FROM data;", cnx, index_col='date') -Of course, you can specify more "complex" query. +Of course, you can specify a more "complex" query. .. ipython:: python @@ -1794,8 +1796,8 @@ Of course, you can specify more "complex" query. There are a few other available functions: - - ``tquery`` returns list of tuples corresponding to each row. - - ``uquery`` does the same thing as tquery, but instead of returning results, + - ``tquery`` returns a list of tuples corresponding to each row. + - ``uquery`` does the same thing as tquery, but instead of returning results it returns the number of related rows. - ``write_frame`` writes records stored in a DataFrame into the SQL table. - ``has_table`` checks if a given SQLite table exists.
@jreback, I read through this today and did some tweaks as I went along. Could you look this over and merge if these changes look ok to you?
https://api.github.com/repos/pandas-dev/pandas/pulls/3248
2013-04-03T11:41:55Z
2013-04-03T12:24:36Z
2013-04-03T12:24:36Z
2013-04-03T12:24:36Z
BUG: setitem with list elements in the indexer broken
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 5230bf20e60b4..b6a9034593a94 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -215,9 +215,9 @@ def _align_frame(self, indexer, df): ax = self.obj.axes[i] if com._is_sequence(ix) or isinstance(ix, slice): if idx is None: - idx = ax[ix] + idx = ax[ix].ravel() elif cols is None: - cols = ax[ix] + cols = ax[ix].ravel() else: break diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 297c744b96f28..c3d6faf6e71b7 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -735,6 +735,17 @@ def test_setitem_dtype_upcast(self): expected = DataFrame([{"a": 1, "c" : 'foo'}, {"a": 3, "b": 2, "c" : np.nan}]) assert_frame_equal(df,expected) + def test_setitem_iloc(self): + + + # setitem with an iloc list + df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]) + df.iloc[[0,1],[1,2]] + df.iloc[[0,1],[1,2]] += 100 + + expected = DataFrame(np.array([0,101,102,3,104,105,6,7,8]).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]) + assert_frame_equal(df,expected) + if __name__ == '__main__': import nose
``` In [4]: df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]) In [5]: df.iloc[[0,1],[1,2]] += 100 In [6]: df.iloc[[0,1],[1,2]] Out[6]: B C A 101 102 B 104 105 In [7]: df Out[7]: A B C A 0 101 102 B 3 104 105 C 6 7 8 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3246
2013-04-02T21:25:16Z
2013-04-02T22:03:03Z
2013-04-02T22:03:03Z
2014-06-13T20:33:26Z
BUG: GH3243 accept list of DataFrames as constructor input
diff --git a/pandas/core/common.py b/pandas/core/common.py index 7a78539c10a98..e016d6f7ef3fc 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1028,7 +1028,7 @@ def _possibly_convert_platform(values): if isinstance(values, (list,tuple)): values = lib.list_to_object_array(values) - if values.dtype == np.object_: + if getattr(values,'dtype',None) == np.object_: values = lib.maybe_convert_objects(values) return values diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index f4f04d5a53579..2d5183191c20b 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -2040,6 +2040,15 @@ def test_constructor_dtype_list_data(self): self.assert_(df.ix[1, 0] is None) self.assert_(df.ix[0, 1] == '2') + def test_constructor_list_frames(self): + + # GH 3243 + result = DataFrame([DataFrame([])]) + self.assert_(result.shape == (1,0)) + + result = DataFrame([DataFrame(dict(A = range(5)))]) + self.assert_(type(result.iloc[0,0]) == DataFrame) + def test_constructor_mixed_dtypes(self): def _make_mixed_dtypes_df(typ, ad = None):
closes #3243
https://api.github.com/repos/pandas-dev/pandas/pulls/3245
2013-04-02T20:35:16Z
2013-04-03T14:35:43Z
2013-04-03T14:35:43Z
2013-04-03T14:35:43Z
ENH: support top-level read/write API for HDFStore using read_hdf/to_hdf
diff --git a/RELEASE.rst b/RELEASE.rst index f870932aa0936..a4ccf64223da8 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -77,6 +77,7 @@ pandas 0.11.0 - New keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` are provided to support iteration on ``select`` and ``select_as_multiple`` (GH3076_) + - support ``read_hdf/to_hdf`` API similar to ``read_csv/to_csv`` (GH3222_) - Add ``squeeze`` method to possibly remove length 1 dimensions from an object. @@ -355,6 +356,7 @@ pandas 0.11.0 .. _GH2747: https://github.com/pydata/pandas/issues/2747 .. _GH2816: https://github.com/pydata/pandas/issues/2816 .. _GH3216: https://github.com/pydata/pandas/issues/3216 +.. _GH3222: https://github.com/pydata/pandas/issues/3222 .. _GH2641: https://github.com/pydata/pandas/issues/2641 pandas 0.10.1 diff --git a/doc/source/io.rst b/doc/source/io.rst index 1bcaf047561a5..28572ae14d15e 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1043,10 +1043,10 @@ Deletion of the object specified by the key del store['wp'] store -Closing a Store -.. ipython:: python +Closing a Store, Context Manager +.. ipython:: python # closing a store store.close() @@ -1068,6 +1068,24 @@ These stores are **not** appendable once written (though you can simply remove them and rewrite). Nor are they **queryable**; they must be retrieved in their entirety. +Read/Write API +~~~~~~~~~~~~~~ + +``HDFStore`` supports an top-level API using ``read_hdf`` for reading and ``to_hdf`` for writing, +similar to how ``read_csv`` and ``to_csv`` work. (new in 0.11.0) + +.. ipython:: python + + df_tl = DataFrame(dict(A=range(5), B=range(5))) + df_tl.to_hdf('store_tl.h5','table',append=True) + read_hdf('store_tl.h5', 'table', where = ['index>2']) + +.. ipython:: python + :suppress: + :okexcept: + + os.remove('store_tl.h5') + .. _io.hdf5-table: Storing in Table format diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt index e299ba43ad9ee..4c460849c0588 100644 --- a/doc/source/v0.11.0.txt +++ b/doc/source/v0.11.0.txt @@ -237,6 +237,20 @@ Enhancements - Bottleneck is now a :ref:`Recommended Dependencies <install.recommended_dependencies>`, to accelerate certain types of ``nan`` operations + - For ``HDFStore``, support ``read_hdf/to_hdf`` API similar to ``read_csv/to_csv`` + + .. ipython:: python + + df = DataFrame(dict(A=range(5), B=range(5))) + df.to_hdf('store.h5','table',append=True) + read_hdf('store.h5', 'table', where = ['index>2']) + + .. ipython:: python + :suppress: + :okexcept: + + os.remove('store.h5') + - In ``HDFStore``, provide dotted attribute access to ``get`` from stores (e.g. ``store.df == store['df']``) diff --git a/pandas/__init__.py b/pandas/__init__.py index 5780ddfbe1fdc..3c06db57a54ae 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -32,7 +32,7 @@ from pandas.io.parsers import (read_csv, read_table, read_clipboard, read_fwf, to_clipboard, ExcelFile, ExcelWriter) -from pandas.io.pytables import HDFStore, Term, get_store +from pandas.io.pytables import HDFStore, Term, get_store, read_hdf from pandas.util.testing import debug from pandas.tools.describe import value_range diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 580148e11cc7c..fdb12a79e0e08 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -482,6 +482,11 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, np.putmask(rs.values, mask, np.nan) return rs + def to_hdf(self, path_or_buf, key, **kwargs): + """ activate the HDFStore """ + from pandas.io import pytables + return pytables.to_hdf(path_or_buf, key, self, **kwargs) + # install the indexerse for _name, _indexer in indexing.get_indexers_list(): PandasObject._create_indexer(_name,_indexer) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b68a34c2af972..1c223f58471f0 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -147,6 +147,27 @@ def get_store(path, mode='a', complevel=None, complib=None, store.close() +### interface to/from ### + +def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, **kwargs): + """ store this object, close it if we opened it """ + f = lambda store: store.put(key, value, **kwargs) + + if isinstance(path_or_buf, basestring): + with get_store(path_or_buf, mode=mode, complevel=complevel, complib=complib) as store: + f(store) + else: + f(path_or_buf) + +def read_hdf(path_or_buf, key, **kwargs): + """ read from the store, closeit if we opened it """ + f = lambda store: store.select(key, **kwargs) + + if isinstance(path_or_buf, basestring): + with get_store(path_or_buf) as store: + return f(store) + f(path_or_buf) + class HDFStore(object): """ dict-like IO interface for storing pandas objects in PyTables @@ -190,7 +211,7 @@ class HDFStore(object): """ _quiet = False - def __init__(self, path, mode='a', complevel=None, complib=None, + def __init__(self, path, mode=None, complevel=None, complib=None, fletcher32=False): try: import tables as _ @@ -198,6 +219,8 @@ def __init__(self, path, mode='a', complevel=None, complib=None, raise Exception('HDFStore requires PyTables') self._path = path + if mode is None: + mode = 'a' self._mode = mode self._handle = None self._complevel = complevel diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 2cc80e7b399cf..071bcfbb8b3e9 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -94,6 +94,37 @@ def test_factory_fun(self): finally: safe_remove(self.path) + def test_conv_read_write(self): + + try: + + from pandas import read_hdf + + def roundtrip(key, obj,**kwargs): + obj.to_hdf(self.path, key,**kwargs) + return read_hdf(self.path, key) + + o = tm.makeTimeSeries() + assert_series_equal(o, roundtrip('series',o)) + + o = tm.makeStringSeries() + assert_series_equal(o, roundtrip('string_series',o)) + + o = tm.makeDataFrame() + assert_frame_equal(o, roundtrip('frame',o)) + + o = tm.makePanel() + tm.assert_panel_equal(o, roundtrip('panel',o)) + + # table + df = DataFrame(dict(A=range(5), B=range(5))) + df.to_hdf(self.path,'table',append=True) + result = read_hdf(self.path, 'table', where = ['index>2']) + assert_frame_equal(df[df.index>2],result) + + finally: + safe_remove(self.path) + def test_keys(self): with ensure_clean(self.path) as store:
closes #3222
https://api.github.com/repos/pandas-dev/pandas/pulls/3244
2013-04-02T19:09:01Z
2013-04-02T19:30:18Z
2013-04-02T19:30:18Z
2014-07-02T22:43:16Z
ENH: groupby().apply(f) accepts combine=0 arg, to return results unmolested #3241
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index cb0a03d306c53..63946feb02575 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -287,7 +287,7 @@ def apply(self, func, *args, **kwargs): """ Apply function and combine results together in an intelligent way. The split-apply-combine combination rules attempt to be as common sense - based as possible. For example: + based as possible. For example (overridable with combine=False): case 1: group DataFrame @@ -307,6 +307,9 @@ def apply(self, func, *args, **kwargs): Parameters ---------- func : function + combine : (default: True), You may pass in a combine=True argument to get back + the values exactly as returned by func, as long as func doesn't itself + use a `combine` keyword or capture all kwd args using **kwds. Notes ----- @@ -320,14 +323,34 @@ def apply(self, func, *args, **kwargs): ------- applied : type depending on grouped object and function """ + import inspect + func = _intercept_function(func) + + # make sure f doesn't expect a "combine" keyword + # and if not, hijack it if specified + combine = True + try: + fargs=inspect.getargspec(func) + if not fargs.keywords and 'combine' not in fargs.args[len(fargs.defaults or []):]: + combine = kwargs.pop('combine',True) + except TypeError: # func is not a python function? + pass + f = lambda g: func(g, *args, **kwargs) - return self._python_apply_general(f) - def _python_apply_general(self, f): + return self._python_apply_general(f,combine=combine) + + def _python_apply_general(self, f,combine=True): keys, values, mutated = self.grouper.apply(f, self.obj, self.axis) - return self._wrap_applied_output(keys, values, + if not combine: + if len(keys) == 0: + return Series([]) + else: + return zip(keys,values) + else: + return self._wrap_applied_output(keys, values, not_indexed_same=mutated) def aggregate(self, func, *args, **kwargs): diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 7aad2e0b734b1..cab4231bfe67c 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -11,7 +11,8 @@ from pandas.core.groupby import GroupByError, SpecificationError, DataError from pandas.core.series import Series from pandas.util.testing import (assert_panel_equal, assert_frame_equal, - assert_series_equal, assert_almost_equal) + assert_series_equal, assert_almost_equal, + makeCustomDataframe as mkdf) from pandas.core.panel import Panel from pandas.tools.merge import concat from collections import defaultdict @@ -2369,6 +2370,35 @@ def noddy(value, weight): # don't die no_toes = df_grouped.apply(lambda x: noddy(x.value, x.weight )) + def test_groupby_apply_raw(self): + from random import randint + df=mkdf(10,2,data_gen_f=lambda x,y: randint(1,10)) + df + def f1(g): + return g.sort('C_l0_g0') + def f2(g,combine=None): + return g.sort('C_l0_g0') + def f3(g,**kwds): + return g.sort('C_l0_g0') + + g=df.groupby(lambda key: int(key.split("g")[-1]) >= 5) + r=g.apply(f1) # default result without using combine + + r1=g.apply(f1,combine=False) + r2=g.apply(f2,combine=False) + r3=g.apply(f3,combine=False) + + # if the combine keyword is in the transformer signature, don't mess with it + assert_frame_equal(r,r2) + # if the transformer catches all keywords, don't mess with it + assert_frame_equal(r,r3) + + # else, make sure we get a kv pair, with the values + # being exactly what the transformer returns + self.assertEqual(len(r1),2) + assert_frame_equal(r1[0][1],f1(list(g)[0][1])) + assert_frame_equal(r1[1][1],f1(list(g)[1][1])) + def assert_fp_equal(a, b): assert((np.abs(a - b) < 1e-12).all())
#3241 Right now: ``` python In [16]: df=mkdf(10,2,data_gen_f=lambda x,y: randint(1,10)) ...: df ...: ...: ...: Out[16]: C0 C_l0_g0 C_l0_g1 R0 R_l0_g0 9 1 R_l0_g1 3 7 R_l0_g2 8 1 R_l0_g3 4 3 R_l0_g4 5 3 R_l0_g5 7 2 R_l0_g6 4 1 R_l0_g7 5 4 R_l0_g8 9 7 R_l0_g9 4 8 In [17]: def f1(g): ...: return g.sort('C_l0_g0') ...: # group on the suffix of the running index ...: g=df.groupby(lambda key: int(key.split("g")[-1]) >= 5) ...: r=g.apply(f1) ...: # we want to return each group dataframe sorted, but we get concatted against our will In [18]: r Out[18]: C0 C_l0_g0 C_l0_g1 R0 False R_l0_g1 3 7 R_l0_g3 4 3 R_l0_g4 5 3 R_l0_g2 8 1 R_l0_g0 9 1 True R_l0_g6 4 1 R_l0_g9 4 8 R_l0_g7 5 4 R_l0_g5 7 2 R_l0_g8 9 7 # what we want really, is a couple of sorted dataframes: In [20]: map(lambda r: r[1].sort('C_l0_g0'),g) Out[20]: [C0 C_l0_g0 C_l0_g1 R0 R_l0_g1 3 7 R_l0_g3 4 3 R_l0_g4 5 3 R_l0_g2 8 1 R_l0_g0 9 1, C0 C_l0_g0 C_l0_g1 R0 R_l0_g6 4 1 R_l0_g9 4 8 R_l0_g7 5 4 R_l0_g5 7 2 R_l0_g8 9 7] ``` #### With this PR: ``` python In [21]: def f1(g): # same f1 as above ...: return g.sort('C_l0_g0') ...: def f2(g,raw=None): ...: return g.sort('C_l0_g0') ...: def f3(g,**kwds): ...: return g.sort('C_l0_g0') ...: # the `raw` keyword is the new bit ...: r1=g.apply(f1,raw=True) ...: r2=g.apply(f2,raw=True) ...: r3=g.apply(f2,raw=True) ...: # a bunch of sorted frames In [22]: print r1 [(False, C0 C_l0_g0 C_l0_g1 R0 R_l0_g1 3 7 R_l0_g3 4 3 R_l0_g4 5 3 R_l0_g2 8 1 R_l0_g0 9 1), (True, C0 C_l0_g0 C_l0_g1 R0 R_l0_g6 4 1 R_l0_g9 4 8 R_l0_g7 5 4 R_l0_g5 7 2 R_l0_g8 9 7)] # but not if the transformer function signature uses **kwds, or 'raw' already In [23]: print r2 C0 C_l0_g0 C_l0_g1 R0 False R_l0_g1 3 7 R_l0_g3 4 3 R_l0_g4 5 3 R_l0_g2 8 1 R_l0_g0 9 1 True R_l0_g6 4 1 R_l0_g9 4 8 R_l0_g7 5 4 R_l0_g5 7 2 R_l0_g8 9 7 In [24]: print r3 C0 C_l0_g0 C_l0_g1 R0 False R_l0_g1 3 7 R_l0_g3 4 3 R_l0_g4 5 3 R_l0_g2 8 1 R_l0_g0 9 1 True R_l0_g6 4 1 R_l0_g9 4 8 R_l0_g7 5 4 R_l0_g5 7 2 R_l0_g8 9 7 ``` ``` ```
https://api.github.com/repos/pandas-dev/pandas/pulls/3242
2013-04-02T17:42:34Z
2014-01-26T21:32:08Z
null
2014-06-18T13:12:19Z