title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
BUG: GH3216 Upcast when needed to DataFrame when setitem with indexer | diff --git a/RELEASE.rst b/RELEASE.rst
index fa19e8c9eb475..c672fb65ee96f 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -122,6 +122,8 @@ pandas 0.11.0
- Handle "ragged" CSV files missing trailing delimiters in rows with missing
fields when also providing explicit list of column names (so the parser
knows how many columns to expect in the result) (GH2981_)
+ - On a mixed DataFrame, allow setting with indexers with ndarray/DataFrame
+ on rhs (GH3216_)
**API Changes**
@@ -249,9 +251,11 @@ pandas 0.11.0
- Add comparison operators to Period object (GH2781_)
- Fix bug when concatenating two Series into a DataFrame when they have the
same name (GH2797_)
- - fix automatic color cycling when plotting consecutive timeseries
+ - Fix automatic color cycling when plotting consecutive timeseries
without color arguments (GH2816_)
- fixed bug in the pickling of PeriodIndex (GH2891_)
+ - Upcast/split blocks when needed in a mixed DataFrame when setitem
+ with an indexer (GH3216_)
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
@@ -340,6 +344,7 @@ pandas 0.11.0
.. _GH2751: https://github.com/pydata/pandas/issues/2751
.. _GH2747: https://github.com/pydata/pandas/issues/2747
.. _GH2816: https://github.com/pydata/pandas/issues/2816
+.. _GH3216: https://github.com/pydata/pandas/issues/2816
pandas 0.10.1
=============
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 28f3a19ab5298..7a78539c10a98 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -694,6 +694,11 @@ def _maybe_promote(dtype, fill_value=np.nan):
if issubclass(fill_value.dtype.type, (np.datetime64,np.timedelta64)):
fill_value = tslib.iNaT
else:
+
+ # we need to change to object type as our
+ # fill_value is of object type
+ if fill_value.dtype == np.object_:
+ dtype = np.dtype(np.object_)
fill_value = np.nan
# returns tuple of (dtype, fill_value)
@@ -763,7 +768,7 @@ def changeit():
if change is not None:
change.dtype = r.dtype
change[:] = r
-
+
return r, True
# we want to decide whether putmask will work
@@ -792,6 +797,34 @@ def changeit():
return result, False
+def _maybe_upcast_indexer(result, indexer, other, dtype=None):
+ """ a safe version of setitem that (potentially upcasts the result
+ return the result and a changed flag
+ """
+
+ def changeit():
+ # our type is wrong here, need to upcast
+ r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True)
+ try:
+ r[indexer] = other
+ except:
+
+ # if we hit this then we still have an incompatible type
+ r[indexer] = fill_value
+
+ return r, True
+
+ new_dtype, fill_value = _maybe_promote(result.dtype,other)
+ if new_dtype != result.dtype:
+ return changeit()
+
+ try:
+ result[indexer] = other
+ except:
+ return changeit()
+
+ return result, False
+
def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explicty type promotion and coercion
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 790fe87af364a..5230bf20e60b4 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -119,24 +119,54 @@ def _setitem_with_indexer(self, indexer, value):
plane_indexer = indexer[:het_axis] + indexer[het_axis + 1:]
item_labels = self.obj._get_axis(het_axis)
- if isinstance(value, (np.ndarray, DataFrame)) and value.ndim > 1:
- raise ValueError('Setting mixed-type DataFrames with '
- 'array/DataFrame pieces not yet supported')
+ def setter(item, v):
+ data = self.obj[item]
+ values = data.values
+ if np.prod(values.shape):
+ result, changed = com._maybe_upcast_indexer(values,plane_indexer,v,dtype=getattr(data,'dtype',None))
+ if changed:
+ self.obj[item] = result
- try:
- for item in item_labels[het_idx]:
- data = self.obj[item]
- values = data.values
- if np.prod(values.shape):
- value = com._possibly_cast_to_datetime(
- value, getattr(data, 'dtype', None))
- values[plane_indexer] = value
- except ValueError:
- for item, v in zip(item_labels[het_idx], value):
- data = self.obj[item]
- values = data.values
- if np.prod(values.shape):
- values[plane_indexer] = v
+ labels = item_labels[het_idx]
+
+ if _is_list_like(value):
+
+ # we have an equal len Frame
+ if isinstance(value, DataFrame) and value.ndim > 1:
+
+ for item in labels:
+
+ # align to
+ if item in value:
+ v = value[item]
+ v = v.reindex(self.obj[item].reindex(v.index).dropna().index)
+ setter(item, v.values)
+ else:
+ setter(item, np.nan)
+
+ # we have an equal len ndarray
+ elif isinstance(value, np.ndarray) and value.ndim > 1:
+ if len(labels) != len(value):
+ raise ValueError('Must have equal len keys and value when'
+ ' setting with an ndarray')
+
+ for i, item in enumerate(labels):
+ setter(item, value[:,i])
+
+ # we have an equal len list/ndarray
+ elif len(labels) == 1 and len(self.obj[labels[0]]) == len(value):
+ setter(labels[0], value)
+
+ # per label values
+ else:
+
+ for item, v in zip(labels, value):
+ setter(item, v)
+ else:
+
+ # scalar
+ for item in labels:
+ setter(item, value)
else:
if isinstance(indexer, tuple):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index acfc875fa45a1..4656054344ddb 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2065,7 +2065,7 @@ def update(self, other):
"""
other = other.reindex_like(self)
mask = notnull(other)
- np.putmask(self.values, mask, other.values)
+ com._maybe_upcast_putmask(self.values,mask,other,change=self.values)
#----------------------------------------------------------------------
# Reindexing, sorting
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ced4b23b7e4fa..586b6030ec2da 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1275,9 +1275,10 @@ def test_setitem_single_column_mixed_datetime(self):
df.ix['d', :] = nan
self.assert_(com.isnull(df.ix['c', :]).all() == False)
+ # as of GH 3216 this will now work!
# try to set with a list like item
- self.assertRaises(
- Exception, df.ix.__setitem__, ('d', 'timestamp'), [nan])
+ #self.assertRaises(
+ # Exception, df.ix.__setitem__, ('d', 'timestamp'), [nan])
def test_setitem_frame(self):
piece = self.frame.ix[:2, ['A', 'B']]
@@ -1285,10 +1286,50 @@ def test_setitem_frame(self):
assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,
piece.values)
+ # GH 3216
+
+ # already aligned
+ f = self.mixed_frame.copy()
+ piece = DataFrame([[ 1, 2], [3, 4]], index=f.index[0:2],columns=['A', 'B'])
+ key = (slice(None,2), ['A', 'B'])
+ f.ix[key] = piece
+ assert_almost_equal(f.ix[0:2, ['A', 'B']].values,
+ piece.values)
+
+ # rows unaligned
+ f = self.mixed_frame.copy()
+ piece = DataFrame([[ 1, 2 ], [3, 4], [5, 6], [7, 8]], index=list(f.index[0:2]) + ['foo','bar'],columns=['A', 'B'])
+ key = (slice(None,2), ['A', 'B'])
+ f.ix[key] = piece
+ assert_almost_equal(f.ix[0:2:, ['A', 'B']].values,
+ piece.values[0:2])
+
+ # key is unaligned with values
+ f = self.mixed_frame.copy()
+ piece = f.ix[:2, ['A']]
+ key = (slice(-2, None), ['A', 'B'])
+ f.ix[key] = piece
+ piece['B'] = np.nan
+ assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
+ piece.values)
+
+ # ndarray
+ f = self.mixed_frame.copy()
piece = self.mixed_frame.ix[:2, ['A', 'B']]
- f = self.mixed_frame.ix.__setitem__
key = (slice(-2, None), ['A', 'B'])
- self.assertRaises(ValueError, f, key, piece)
+ f.ix[key] = piece.values
+ assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
+ piece.values)
+
+
+ # needs upcasting
+ df = DataFrame([[1,2,'foo'],[3,4,'bar']],columns=['A','B','C'])
+ df2 = df.copy()
+ df2.ix[:,['A','B']] = df.ix[:,['A','B']]+0.5
+ expected = df.reindex(columns=['A','B'])
+ expected += 0.5
+ expected['C'] = df['C']
+ assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.ix[:2, ['A', 'B']]
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 75aa208e0c6b2..297c744b96f28 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -724,6 +724,18 @@ def test_xs_multiindex(self):
expected = df.iloc[:,0:2].loc[:,'a']
assert_frame_equal(result,expected)
+ def test_setitem_dtype_upcast(self):
+
+ # GH3216
+ df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
+ df['c'] = np.nan
+ self.assert_(df['c'].dtype == np.float64)
+
+ df.ix[0,'c'] = 'foo'
+ expected = DataFrame([{"a": 1, "c" : 'foo'}, {"a": 3, "b": 2, "c" : np.nan}])
+ assert_frame_equal(df,expected)
+
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index bd9e61ba5d89b..830c8c07c24da 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -2314,6 +2314,13 @@ def test_update(self):
expected = Series([1.5, 3.5, 3., 5., np.nan])
assert_series_equal(s, expected)
+ # GH 3217
+ df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
+ df['c'] = np.nan
+
+ # this will fail as long as series is a sub-class of ndarray
+ ##### df['c'].update(Series(['foo'],index=[0])) #####
+
def test_corr(self):
_skip_if_no_scipy()
| closes #3216
BUG: GH3216 Upcast when needed to DataFrame when setitem with indexer
The following would raise previously
```
In [8]: df = pd.DataFrame([{"a": 1}, {"a": 3, "b": 2}])
In [9]: df['c'] = np.nan
In [10]: df.ix[0,'c'] = 'foo'
In [11]: df
Out[11]:
a b c
0 1 NaN foo
1 3 2 NaN
In [12]: df.dtypes
Out[12]:
a int64
b float64
c object
dtype: object
```
ENH: On a mixed DataFrame, allow setting with indexers with
ndarray/DataFrame on rhs (this was disallowed in the code previously)
```
In [8]: df = DataFrame([[1,2,'foo'],[3,4,'bar']],columns=['A','B','C'])
In [9]: df2 = df.copy()
In [10]: df2.ix[:,['A','B']] = df.ix[:,['A','B']]+0.5
In [11]: df2
Out[11]:
A B C
0 1.5 2.5 foo
1 3.5 4.5 bar
In [12]: df2.dtypes
Out[12]:
A float64
B float64
C object
dtype: object
In [13]: df.dtypes
Out[13]:
A int64
B int64
C object
dtype: object
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3219 | 2013-03-29T17:47:58Z | 2013-03-31T00:12:48Z | 2013-03-31T00:12:48Z | 2014-06-20T15:07:41Z |
BUG: stacking with MultiIndex column with some unused level uniques fail... | diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 2e7ec3ad9c280..adc824544b8c7 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -17,7 +17,7 @@
import pandas.algos as algos
-from pandas.core.index import MultiIndex
+from pandas.core.index import MultiIndex, Index
class ReshapeError(Exception):
@@ -159,7 +159,7 @@ def get_new_values(self):
dtype, fill_value = _maybe_promote(values.dtype)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
-
+
new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
@@ -506,11 +506,16 @@ def _stack_multi_columns(frame, level=-1, dropna=True):
new_data = {}
level_vals = this.columns.levels[-1]
levsize = len(level_vals)
+ drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
-
+ slice_len = loc.stop - loc.start
# can make more efficient?
- if loc.stop - loc.start != levsize:
+
+ if slice_len == 0:
+ drop_cols.append(key)
+ continue
+ elif slice_len != levsize:
chunk = this.ix[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals).values
@@ -522,6 +527,9 @@ def _stack_multi_columns(frame, level=-1, dropna=True):
new_data[key] = value_slice.ravel()
+ if len(drop_cols) > 0:
+ new_columns = new_columns - drop_cols
+
N = len(this)
if isinstance(this.index, MultiIndex):
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index c93dcf386e1c9..08d214570d9cc 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -930,6 +930,22 @@ def test_stack_unstack_multiple(self):
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
+ def test_stack_multiple_bug(self):
+ """ bug when some uniques are not present in the data #3170"""
+ id_col = ([1] * 3) + ([2] * 3)
+ name = (['a'] * 3) + (['b'] * 3)
+ date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
+ var1 = np.random.randint(0, 100, 6)
+ df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
+
+ multi = df.set_index(['DATE', 'ID'])
+ unst = multi.unstack('ID')
+ down = unst.resample('W-THU')
+
+ rs = down.stack('ID')
+ xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
+ assert_frame_equal(rs, xp)
+
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
| ...s #3170
| https://api.github.com/repos/pandas-dev/pandas/pulls/3218 | 2013-03-29T16:36:17Z | 2013-03-31T15:07:17Z | 2013-03-31T15:07:17Z | 2014-06-21T02:55:24Z |
API: return None when inplace=True. re #1893 | diff --git a/RELEASE.rst b/RELEASE.rst
index d26b599de05b1..a8769cd4bbc91 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -153,6 +153,7 @@ pandas 0.11.0
- util.testing.assert_frame_equal now checks the column and index names (GH2964_)
- Constructors will now return a more informative ValueError on failures
when invalid shapes are passed
+ - Methods return None when inplace=True (GH1893_)
**Bug Fixes**
@@ -251,6 +252,7 @@ pandas 0.11.0
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
+.. _GH1893: https://github.com/pydata/pandas/issues/1893
.. _GH1978: https://github.com/pydata/pandas/issues/1978
.. _GH2758: https://github.com/pydata/pandas/issues/2758
.. _GH2121: https://github.com/pydata/pandas/issues/2121
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index bd9c609f6ef38..530ac3f539d3f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2708,16 +2708,9 @@ def set_index(self, keys, drop=True, append=False, inplace=False,
frame.index = index
- if inplace:
- import warnings
- warnings.warn("set_index with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
- return self
- else:
+ if not inplace:
return frame
- return frame if not inplace else None
-
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
@@ -2815,12 +2808,7 @@ def _maybe_cast(values):
new_obj.insert(0, name, _maybe_cast(values))
new_obj.index = new_index
- if inplace:
- import warnings
- warnings.warn("reset_index with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
- return self
- else:
+ if not inplace:
return new_obj
delevel = deprecate('delevel', reset_index)
@@ -2988,10 +2976,6 @@ def drop_duplicates(self, cols=None, take_last=False, inplace=False):
inds, = (-duplicated).nonzero()
self._data = self._data.take(inds)
self._clear_item_cache()
- import warnings
- warnings.warn("drop_duplicates with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
- return self
else:
return self[-duplicated]
@@ -3147,10 +3131,6 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False):
self._data = self._data.take(indexer)
self._clear_item_cache()
- import warnings
- warnings.warn("sort/sort_index with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
- return self
else:
return self.take(indexer, axis=axis, convert=False)
@@ -3194,10 +3174,6 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False):
self._data = self._data.take(indexer)
self._clear_item_cache()
- import warnings
- warnings.warn("sortlevel with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
- return self
else:
return self.take(indexer, axis=axis, convert=False)
@@ -3328,10 +3304,6 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
if inplace:
self._data = new_data
- import warnings
- warnings.warn("fillna with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
- return self
else:
return self._constructor(new_data)
@@ -3380,10 +3352,6 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
self._consolidate_inplace()
axis = self._get_axis_number(axis)
- if inplace:
- import warnings
- warnings.warn("replace with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
if value is None:
return self._interpolate(to_replace, method, axis, inplace, limit)
@@ -3397,13 +3365,17 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
new_data = self._data
for c, src in to_replace.iteritems():
if c in value and c in self:
- new_data = new_data.replace(src, value[c], filter = [ c ], inplace=inplace)
+ new_data = new_data.replace(src, value[c],
+ filter=[ c ],
+ inplace=inplace)
elif not isinstance(value, (list, np.ndarray)):
new_data = self._data
for k, src in to_replace.iteritems():
if k in self:
- new_data = new_data.replace(src, value, filter = [ k ], inplace=inplace)
+ new_data = new_data.replace(src, value,
+ filter = [ k ],
+ inplace=inplace)
else:
raise ValueError('Fill value must be scalar or dict or Series')
@@ -3430,7 +3402,9 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
new_data = self._data
for k, v in value.iteritems():
if k in self:
- new_data = new_data.replace(to_replace, v, filter = [ k ], inplace=inplace)
+ new_data = new_data.replace(to_replace, v,
+ filter=[ k ],
+ inplace=inplace)
elif not isinstance(value, (list, np.ndarray)): # NA -> 0
new_data = self._data.replace(to_replace, value,
@@ -3442,7 +3416,6 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
if inplace:
self._data = new_data
- return self
else:
return self._constructor(new_data)
@@ -3525,12 +3498,7 @@ def rename(self, index=None, columns=None, copy=True, inplace=False):
if columns is not None:
result._rename_columns_inplace(columns_f)
- if inplace:
- import warnings
- warnings.warn("rename with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
- return self
- else:
+ if not inplace:
return result
def _rename_index_inplace(self, mapper):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index acfc875fa45a1..8e4d75af43fc9 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -122,7 +122,7 @@ def convert_to_array(values):
# 2 datetimes or 2 timedeltas
if (is_timedelta_lhs and is_timedelta_rhs) or (is_datetime_lhs and is_datetime_rhs):
-
+
dtype = 'timedelta64[ns]'
# we may have to convert to object unfortunately here
@@ -601,7 +601,7 @@ def _is_mixed_type(self):
def _slice(self, slobj, axis=0, raise_on_error=False):
if raise_on_error:
_check_slice_bounds(slobj, self.values)
-
+
return self._constructor(self.values[slobj], index=self.index[slobj])
def __getitem__(self, key):
@@ -1047,11 +1047,6 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
- import warnings
- warnings.warn("Series.reset_index with inplace=True will "
- "return None from pandas 0.11 onward",
- FutureWarning)
- return self
else:
return Series(self.values.copy(), index=new_index,
name=self.name)
@@ -2615,13 +2610,8 @@ def fillna(self, value=None, method=None, inplace=False,
-------
filled : Series
"""
- if inplace:
- import warnings
- warnings.warn("Series.fillna with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
-
if not self._can_hold_na:
- return self.copy() if not inplace else self
+ return self.copy() if not inplace else None
if value is not None:
if method is not None:
@@ -2647,9 +2637,7 @@ def fillna(self, value=None, method=None, inplace=False,
else:
result = Series(values, index=self.index, name=self.name)
- if inplace:
- return self
- else:
+ if not inplace:
return result
def ffill(self, inplace=False, limit=None):
@@ -2756,12 +2744,7 @@ def _rep_dict(rs, to_rep): # replace {[src] -> dest}
raise ValueError('Unrecognized to_replace type %s' %
type(to_replace))
- if inplace:
- import warnings
- warnings.warn("Series.replace with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
- return self
- else:
+ if not inplace:
return result
def isin(self, values):
@@ -3110,12 +3093,7 @@ def rename(self, mapper, inplace=False):
result = self if inplace else self.copy()
result.index = Index([mapper_f(x) for x in self.index], name=self.index.name)
- if inplace:
- import warnings
- warnings.warn("Series.rename with inplace=True will return None"
- " from pandas 0.11 onward", FutureWarning)
- return self
- else:
+ if not inplace:
return result
@property
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ced4b23b7e4fa..c17a39dd55d18 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -9427,7 +9427,7 @@ def test_strange_column_corruption_issue(self):
self.assertTrue(first == second == 0)
def test_inplace_return_self(self):
- # re #1893, TODO: remove in 0.11
+ # re #1893
data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],
'b': [0, 0, 1, 1],
@@ -9435,7 +9435,7 @@ def test_inplace_return_self(self):
def _check_f(base, f):
result = f(base)
- self.assertTrue(result is base)
+ self.assertTrue(result is None)
# -----DataFrame-----
| @jseabold et al, speak now or forever hold your peace!
| https://api.github.com/repos/pandas-dev/pandas/pulls/3212 | 2013-03-29T04:25:46Z | 2013-03-31T20:37:45Z | 2013-03-31T20:37:45Z | 2013-05-06T07:56:08Z |
BUG: take into account adjoin width, closes #3201 | diff --git a/pandas/core/format.py b/pandas/core/format.py
index 862b09f5e84e3..c98d2649c20f7 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -317,10 +317,11 @@ def to_string(self, force_unicode=None):
def _join_multiline(self, *strcols):
lwidth = self.line_width
+ adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
- lwidth -= np.array([len(x) for x in idx]).max()
+ lwidth -= np.array([len(x) for x in idx]).max() + adjoin_width
col_widths = [np.array([len(x) for x in col]).max()
if len(col) > 0 else 0
@@ -339,7 +340,7 @@ def _join_multiline(self, *strcols):
else:
row.append([' '] * len(self.frame))
- str_lst.append(adjoin(1, *row))
+ str_lst.append(adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
@@ -1765,14 +1766,21 @@ def _put_lines(buf, lines):
buf.write('\n'.join(lines))
-def _binify(cols, width):
+def _binify(cols, line_width):
+ adjoin_width = 1
bins = []
curr_width = 0
+ i_last_column = len(cols) - 1
for i, w in enumerate(cols):
- curr_width += w
- if curr_width + 2 > width and i > 0:
+ w_adjoined = w + adjoin_width
+ curr_width += w_adjoined
+ if i_last_column == i:
+ wrap = curr_width + 1 > line_width and i > 0
+ else:
+ wrap = curr_width + 2 > line_width and i > 0
+ if wrap:
bins.append(i)
- curr_width = w
+ curr_width = w_adjoined
bins.append(len(cols))
return bins
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 0ae8934c898b0..42743c49b1e8a 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -909,6 +909,11 @@ def test_to_string_format_na(self):
'4 4 bar')
self.assertEqual(result, expected)
+ def test_to_string_line_width(self):
+ df = pd.DataFrame(123, range(10, 15), range(30))
+ s = df.to_string(line_width=80)
+ self.assertEqual(max(len(l) for l in s.split('\n')), 80)
+
def test_to_html(self):
# big mixed
biggie = DataFrame({'A': randn(200),
| https://api.github.com/repos/pandas-dev/pandas/pulls/3208 | 2013-03-28T21:56:29Z | 2013-04-10T06:21:13Z | 2013-04-10T06:21:13Z | 2013-04-10T06:21:14Z | |
Gh2836 resolve merge conflict | diff --git a/RELEASE.rst b/RELEASE.rst
index 7a8e8d583f8e3..d26b599de05b1 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -246,6 +246,8 @@ pandas 0.11.0
- Add comparison operators to Period object (GH2781_)
- Fix bug when concatenating two Series into a DataFrame when they have the
same name (GH2797_)
+ - fix automatic color cycling when plotting consecutive timeseries
+ without color arguments (GH2816_)
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
@@ -329,6 +331,9 @@ pandas 0.11.0
.. _GH3178: https://github.com/pydata/pandas/issues/3178
.. _GH3179: https://github.com/pydata/pandas/issues/3179
.. _GH3189: https://github.com/pydata/pandas/issues/3189
+.. _GH2751: https://github.com/pydata/pandas/issues/2751
+.. _GH2747: https://github.com/pydata/pandas/issues/2747
+.. _GH2816: https://github.com/pydata/pandas/issues/2816
pandas 0.10.1
=============
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index b264e8696ec4b..2a8a772c1db93 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -678,6 +678,18 @@ def test_time_series_plot_color_kwargs(self):
line = ax.get_lines()[0]
self.assert_(line.get_color() == 'green')
+ @slow
+ def test_time_series_plot_color_with_empty_kwargs(self):
+ import matplotlib.pyplot as plt
+
+ plt.close('all')
+ for i in range(3):
+ ax = Series(np.arange(12) + 1, index=date_range(
+ '1/1/2000', periods=12)).plot()
+
+ line_colors = [ l.get_color() for l in ax.get_lines() ]
+ self.assert_(line_colors == ['b', 'g', 'r'])
+
@slow
def test_grouped_hist(self):
import matplotlib.pyplot as plt
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 137eab65620c6..2b471cbf13192 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -807,6 +807,8 @@ def _maybe_right_yaxis(self, ax):
if (sec_true or has_sec) and not hasattr(ax, 'right_ax'):
orig_ax, new_ax = ax, ax.twinx()
+ new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
+
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if len(orig_ax.get_lines()) == 0: # no data on left y
@@ -1122,13 +1124,12 @@ def _get_colors(self):
cycle = plt.rcParams.get('axes.color_cycle', list('bgrcmyk'))
if isinstance(cycle, basestring):
cycle = list(cycle)
- has_colors = 'color' in self.kwds
colors = self.kwds.get('color', cycle)
return colors
def _maybe_add_color(self, colors, kwds, style, i):
- kwds.pop('color', None)
- if style is None or re.match('[a-z]+', style) is None:
+ has_color = 'color' in kwds
+ if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _make_plot(self):
@@ -2147,6 +2148,8 @@ def on_right(i):
if on_right(0):
orig_ax = ax0
ax0 = ax0.twinx()
+ ax0._get_lines.color_cycle = orig_ax._get_lines.color_cycle
+
orig_ax.get_yaxis().set_visible(False)
orig_ax.right_ax = ax0
ax0.left_ax = orig_ax
@@ -2164,6 +2167,8 @@ def on_right(i):
if on_right(i):
orig_ax = ax
ax = ax.twinx()
+ ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
+
orig_ax.get_yaxis().set_visible(False)
axarr[i] = ax
| Rebased and resolved merge conflict of #2836.
(Is the correct thing to do here to set up a new branch? I wasn't brave enough to try and force directly to the pr...)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3207 | 2013-03-28T21:52:41Z | 2013-03-28T22:17:44Z | 2013-03-28T22:17:44Z | 2013-03-28T22:17:44Z |
TST: fix tests in master | diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index b2dbca70f3b77..383b98bfc440d 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -329,7 +329,23 @@ def _set_columns(self, cols):
if len(cols) != len(self._series):
raise Exception('Columns length %d did not match data %d!' %
(len(cols), len(self._series)))
- self._columns = _ensure_index(cols)
+
+ cols = _ensure_index(cols)
+
+ # rename the _series if needed
+ existing = getattr(self,'_columns',None)
+ if existing is not None and len(existing) == len(cols):
+
+ new_series = {}
+ for i, col in enumerate(existing):
+ new_col = cols[i]
+ if new_col in new_series: # pragma: no cover
+ raise Exception('Non-unique mapping!')
+ new_series[new_col] = self._series.get(col)
+
+ self._series = new_series
+
+ self._columns = cols
index = property(fget=_get_index, fset=_set_index)
columns = property(fget=_get_columns, fset=_set_columns)
@@ -619,7 +635,7 @@ def _reindex_with_indexers(self, index, row_indexer, columns, col_indexer,
def _rename_index_inplace(self, mapper):
self.index = [mapper(x) for x in self.index]
-
+
def _rename_columns_inplace(self, mapper):
new_series = {}
new_columns = []
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 9ce7850857d50..947a2ffac6039 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -136,6 +136,9 @@ def __eq__(self, other):
raise TypeError(other)
return False
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
def __hash__(self):
return hash((self.ordinal, self.freq))
| ENH: add **ne** method to period comparisons
BUG: fix in spare_frame for issue with running apply on a SparseFrame didn't
allow the setting of the columns with a rename to work properly
something broke this......
| https://api.github.com/repos/pandas-dev/pandas/pulls/3205 | 2013-03-28T19:10:59Z | 2013-03-28T19:27:59Z | 2013-03-28T19:27:59Z | 2013-03-28T19:27:59Z |
TST: fix for tseries/test_period for py3k | diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 7fbce6106dbb8..3bb9008f7b863 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -99,7 +99,7 @@ def test_period_constructor(self):
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
- self.assert_(i1 != i4)
+ self.assertRaises(ValueError, i1.__ne__, i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
| https://api.github.com/repos/pandas-dev/pandas/pulls/3204 | 2013-03-28T18:03:31Z | 2013-03-28T18:03:36Z | 2013-03-28T18:03:36Z | 2013-03-28T20:35:02Z | |
TST fix broken tests (which were using fancy asserts, broke travis py2.6... | diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 1db06a072fbbc..7fbce6106dbb8 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -2039,7 +2039,7 @@ def test_notEqual(self):
self.assertNotEqual(self.january1, self.february)
def test_greater(self):
- self.assertGreater(self.february, self.january1)
+ self.assert_(self.february > self.january1)
def test_greater_Raises_Value(self):
self.assertRaises(ValueError, self.january1.__gt__, self.day)
@@ -2048,7 +2048,7 @@ def test_greater_Raises_Type(self):
self.assertRaises(TypeError, self.january1.__gt__, 1)
def test_greaterEqual(self):
- self.assertGreaterEqual(self.january1, self.january2)
+ self.assert_(self.january1 >= self.january2)
def test_greaterEqual_Raises_Value(self):
self.assertRaises(ValueError, self.january1.__ge__, self.day)
@@ -2057,7 +2057,7 @@ def test_greaterEqual_Raises_Value(self):
self.assertRaises(TypeError, self.january1.__ge__, 1)
def test_smallerEqual(self):
- self.assertLessEqual(self.january1, self.january2)
+ self.assert_(self.january1 <= self.january2)
def test_smallerEqual_Raises_Value(self):
self.assertRaises(ValueError, self.january1.__le__, self.day)
@@ -2066,7 +2066,7 @@ def test_smallerEqual_Raises_Type(self):
self.assertRaises(TypeError, self.january1.__le__, 1)
def test_smaller(self):
- self.assertLess(self.january1, self.february)
+ self.assert_(self.january1 < self.february)
def test_smaller_Raises_Value(self):
self.assertRaises(ValueError, self.january1.__lt__, self.day)
@@ -2077,7 +2077,7 @@ def test_smaller_Raises_Type(self):
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
- self.assertListEqual(sorted(periods), correctPeriods)
+ self.assertEqual(sorted(periods), correctPeriods)
if __name__ == '__main__':
import nose
| ... and py3)
Master is not passing travis.
For py2.6: cleared up assertGreater etc. which don't work in py2.6 :(.
For py3: one test is broken in py3 (not sure how to fix):
```
ERROR: test_period_constructor (pandas.tseries.tests.test_period.TestPeriodProperties)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/virtualenv/python3.3_with_system_site_packages/lib/python3.3/site-packages/pandas-0.11.0.dev_4b61349-py3.3-linux-x86_64.egg/pandas/tseries/tests/test_period.py", line 102, in test_period_constructor
self.assert_(i1 != i4)
File "/home/travis/virtualenv/python3.3_with_system_site_packages/lib/python3.3/site-packages/pandas-0.11.0.dev_4b61349-py3.3-linux-x86_64.egg/pandas/tseries/period.py", line 132, in __eq__
raise ValueError("Cannot compare non-conforming periods")
ValueError: Cannot compare non-conforming periods
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3200 | 2013-03-28T09:03:29Z | 2013-03-28T17:15:16Z | 2013-03-28T17:15:16Z | 2013-03-28T17:15:16Z |
BUG MultiIndex sometimes tupled on apply, 2902 | diff --git a/RELEASE.rst b/RELEASE.rst
index 0a6d9b7c474ec..7a8e8d583f8e3 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -292,6 +292,7 @@ pandas 0.11.0
.. _GH2850: https://github.com/pydata/pandas/issues/2850
.. _GH2898: https://github.com/pydata/pandas/issues/2898
.. _GH2892: https://github.com/pydata/pandas/issues/2892
+.. _GH2902: https://github.com/pydata/pandas/issues/2902
.. _GH2903: https://github.com/pydata/pandas/issues/2903
.. _GH2909: https://github.com/pydata/pandas/issues/2909
.. _GH2922: https://github.com/pydata/pandas/issues/2922
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 8a16d6b714dfb..bd9c609f6ef38 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4162,7 +4162,7 @@ def _apply_standard(self, func, axis, ignore_failures=False):
successes.append(i)
except Exception:
pass
- # so will work with MultiIndex, need test
+ # so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
@@ -4181,6 +4181,7 @@ def _apply_standard(self, func, axis, ignore_failures=False):
pass
raise e
+
if len(results) > 0 and _is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
@@ -4188,8 +4189,7 @@ def _apply_standard(self, func, axis, ignore_failures=False):
index = None
result = self._constructor(data=results, index=index)
- result.rename(columns=dict(zip(range(len(res_index)), res_index)),
- inplace=True)
+ result.columns = res_index
if axis == 1:
result = result.T
@@ -4199,6 +4199,7 @@ def _apply_standard(self, func, axis, ignore_failures=False):
else:
s = Series(results)
s.index = res_index
+
return s
def _apply_broadcast(self, func, axis):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 4e8bd892b3c2d..a7dd96fd2ce2b 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7262,6 +7262,13 @@ def test_apply_attach_name(self):
columns=self.frame.columns)
assert_frame_equal(result, expected)
+ def test_apply_multi_index(self):
+ s = DataFrame([[1,2], [3,4], [5,6]])
+ s.index = MultiIndex.from_arrays([['a','a','b'], ['c','d','d']])
+ s.columns = ['col1','col2']
+ res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
+ self.assert_(isinstance(res.index, MultiIndex))
+
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
assert_frame_equal(applied, self.frame * 2)
| Fixes #2902.
(broken tests stem from master I branched from, will rebase later to check.)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3199 | 2013-03-28T07:38:07Z | 2013-03-28T17:33:50Z | 2013-03-28T17:33:50Z | 2013-03-28T17:33:50Z |
CLN: refactor core/index and tseries/index,period to have their format, to_native_types methods consistent | diff --git a/pandas/core/index.py b/pandas/core/index.py
index 0c6e490f3eb50..38a97af572b1c 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -428,12 +428,10 @@ def take(self, indexer, axis=0):
taken = self.view(np.ndarray).take(indexer)
return self._constructor(taken, name=self.name)
- def format(self, name=False, formatter=None, na_rep='NaN'):
+ def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
- from pandas.core.format import format_array
-
header = []
if name:
header.append(com.pprint_thing(self.name,
@@ -443,11 +441,13 @@ def format(self, name=False, formatter=None, na_rep='NaN'):
if formatter is not None:
return header + list(self.map(formatter))
- if self.is_all_dates:
- return header + _date_formatter(self)
+ return self._format_with_header(header, **kwargs)
+ def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
+ from pandas.core.format import format_array
+
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values, safe=1)
@@ -466,17 +466,18 @@ def format(self, name=False, formatter=None, na_rep='NaN'):
result = _trim_front(format_array(values, None, justify='left'))
return header + result
- def to_native_types(self, slicer=None, na_rep='', float_format=None):
+ def to_native_types(self, slicer=None, **kwargs):
+ """ slice and dice then format """
values = self
if slicer is not None:
values = values[slicer]
- if self.is_all_dates:
- return _date_formatter(values)
- else:
- mask = isnull(values)
- values = np.array(values,dtype=object)
- values[mask] = na_rep
+ return values._format_native_types(**kwargs)
+ def _format_native_types(self, na_rep='', **kwargs):
+ """ actually format my specific types """
+ mask = isnull(self)
+ values = np.array(self,dtype=object,copy=True)
+ values[mask] = na_rep
return values.tolist()
def equals(self, other):
@@ -1320,6 +1321,11 @@ def inferred_type(self):
def _constructor(self):
return Int64Index
+ @property
+ def asi8(self):
+ # do not cache or you'll create a memory leak
+ return self.values.view('i8')
+
@property
def is_all_dates(self):
"""
@@ -1489,11 +1495,8 @@ def __repr__(self):
def __len__(self):
return len(self.labels[0])
- def to_native_types(self, slicer=None, na_rep='', float_format=None):
- ix = self
- if slicer:
- ix = self[slicer]
- return ix.tolist()
+ def _format_native_types(self, **kwargs):
+ return self.tolist()
@property
def _constructor(self):
@@ -1651,13 +1654,13 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
# we have some NA
mask = lab==-1
if mask.any():
- formatted = np.array(formatted)
+ formatted = np.array(formatted,dtype=object)
formatted[mask] = na_rep
formatted = formatted.tolist()
else:
# weird all NA case
- formatted = [com.pprint_thing(x, escape_chars=('\t', '\r', '\n'))
+ formatted = [com.pprint_thing(na_rep if isnull(x) else x, escape_chars=('\t', '\r', '\n'))
for x in com.take_1d(lev.values, lab)]
stringified_levels.append(formatted)
@@ -1669,6 +1672,7 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
level.append(com.pprint_thing(name, escape_chars=('\t', '\r', '\n'))
if name is not None else '')
+
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
@@ -2598,23 +2602,6 @@ def _wrap_joined_index(self, joined, other):
# For utility purposes
-def _date_formatter(obj, na_rep=u'NaT'):
- data = list(obj)
-
- # tz formatter or time formatter
- zero_time = time(0, 0)
- for d in data:
- if d.time() != zero_time or d.tzinfo is not None:
- return [u'%s' % x for x in data ]
-
- values = np.array(data,dtype=object)
- mask = isnull(obj.values)
- values[mask] = na_rep
-
- imask = -mask
- values[imask] = np.array([ u'%d-%.2d-%.2d' % (dt.year, dt.month, dt.day) for dt in values[imask] ])
- return values.tolist()
-
def _sparsify(label_list, start=0):
pivoted = zip(*label_list)
k = len(label_list)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 3473e5fffb34d..0ae8934c898b0 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -666,6 +666,22 @@ def test_index_with_nan(self):
expected = u' id1 id3 value\nid2 \nNaN 1a3 78d 123\nNaN 9h4 79d 64'
self.assert_(result == expected)
+ # partial nan in mi
+ df2 = df.copy()
+ df2.ix[:,'id2'] = np.nan
+ y = df2.set_index(['id2','id3'])
+ result = y.to_string()
+ expected = u' id1 value\nid2 id3 \nNaN 78d 1a3 123\n 79d 9h4 64'
+ self.assert_(result == expected)
+
+ df = DataFrame({'id1': {0: np.nan, 1: '9h4'}, 'id2': {0: np.nan, 1: 'd67'},
+ 'id3': {0: np.nan, 1: '79d'}, 'value': {0: 123, 1: 64}})
+
+ y = df.set_index(['id1','id2','id3'])
+ result = y.to_string()
+ expected = u' value\nid1 id2 id3 \nNaN NaN NaN 123\n9h4 d67 79d 64'
+ self.assert_(result == expected)
+
def test_to_string(self):
from pandas import read_table
import re
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index a7dd96fd2ce2b..ced4b23b7e4fa 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -21,7 +21,7 @@
import pandas.core.format as fmt
import pandas.core.datetools as datetools
from pandas.core.api import (DataFrame, Index, Series, notnull, isnull,
- MultiIndex, DatetimeIndex, Timestamp)
+ MultiIndex, DatetimeIndex, Timestamp, Period)
from pandas.io.parsers import read_csv
from pandas.util.testing import (assert_almost_equal,
@@ -4587,7 +4587,7 @@ def stuple_to_tuple(x):
cols=MultiIndex.from_tuples(map(stuple_to_tuple,recons.columns))
recons.columns = cols
- type_map = dict(i='i',f='f',s='O',u='O',dt='O')
+ type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')
if r_dtype:
if r_dtype == 'u': # unicode
r_dtype='O'
@@ -4599,6 +4599,11 @@ def stuple_to_tuple(x):
recons.index = np.array(map(Timestamp,recons.index),
dtype=r_dtype )
df.index = np.array(map(Timestamp,df.index),dtype=r_dtype )
+ elif r_dtype == 'p':
+ r_dtype='O'
+ recons.index = np.array(map(Timestamp,recons.index.to_datetime()),
+ dtype=r_dtype )
+ df.index = np.array(map(Timestamp,df.index.to_datetime()),dtype=r_dtype )
else:
r_dtype= type_map.get(r_dtype)
recons.index = np.array(recons.index,dtype=r_dtype )
@@ -4608,12 +4613,17 @@ def stuple_to_tuple(x):
c_dtype='O'
recons.columns = np.array(map(_to_uni,recons.columns),
dtype=c_dtype )
- df.Columns = np.array(map(_to_uni,df.columns),dtype=c_dtype )
+ df.columns = np.array(map(_to_uni,df.columns),dtype=c_dtype )
elif c_dtype == 'dt':
c_dtype='O'
recons.columns = np.array(map(Timestamp,recons.columns),
dtype=c_dtype )
- df.Columns = np.array(map(Timestamp,df.columns),dtype=c_dtype )
+ df.columns = np.array(map(Timestamp,df.columns),dtype=c_dtype )
+ elif c_dtype == 'p':
+ c_dtype='O'
+ recons.columns = np.array(map(Timestamp,recons.columns.to_datetime()),
+ dtype=c_dtype )
+ df.columns = np.array(map(Timestamp,df.columns.to_datetime()),dtype=c_dtype )
else:
c_dtype= type_map.get(c_dtype)
recons.columns = np.array(recons.columns,dtype=c_dtype )
@@ -4631,8 +4641,8 @@ def stuple_to_tuple(x):
_do_test(mkdf(nrows, ncols,r_idx_type='dt',
c_idx_type='s'),path, 'dt','s')
- for r_idx_type in ['i', 'f','s','u']:
- for c_idx_type in ['i', 'f','s','u','dt']:
+ for r_idx_type in ['i','s','u','p']:
+ for c_idx_type in ['i', 's','u','dt','p']:
for ncols in [1,2,128]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 0e552ab0e610f..d230f3d5c3c29 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -568,6 +568,26 @@ def __contains__(self, key):
except (KeyError, TypeError):
return False
+ def _format_with_header(self, header, **kwargs):
+ return header + self._format_native_types(**kwargs)
+
+ def _format_native_types(self, na_rep=u'NaT', **kwargs):
+ data = list(self)
+
+ # tz formatter or time formatter
+ zero_time = time(0, 0)
+ for d in data:
+ if d.time() != zero_time or d.tzinfo is not None:
+ return [u'%s' % x for x in data ]
+
+ values = np.array(data,dtype=object)
+ mask = isnull(self.values)
+ values[mask] = na_rep
+
+ imask = -mask
+ values[imask] = np.array([ u'%d-%.2d-%.2d' % (dt.year, dt.month, dt.day) for dt in values[imask] ])
+ return values.tolist()
+
def isin(self, values):
"""
Compute boolean array of whether each index value is found in the
@@ -627,11 +647,6 @@ def astype(self, dtype):
else: # pragma: no cover
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
- @property
- def asi8(self):
- # do not cache or you'll create a memory leak
- return self.values.view('i8')
-
def _get_time_micros(self):
utc = _utc()
values = self.asi8
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 947a2ffac6039..1e9aad7cf2d7b 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -12,6 +12,7 @@
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
+from pandas.core.common import isnull
from pandas.lib import Timestamp
import pandas.lib as lib
@@ -792,6 +793,15 @@ def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self._get_object_array()
+ def equals(self, other):
+ """
+ Determines if two Index objects contain the same elements.
+ """
+ if self is other:
+ return True
+
+ return np.array_equal(self.asi8, other.asi8)
+
def tolist(self):
"""
Return a list of Period objects
@@ -1029,16 +1039,18 @@ def __getitem__(self, key):
return PeriodIndex(result, name=self.name, freq=self.freq)
- def format(self, name=False, formatter=None):
- """
- Render a string representation of the Index
- """
- header = []
+ def _format_with_header(self, header, **kwargs):
+ return header + self._format_native_types(**kwargs)
- if name:
- header.append(str(self.name) if self.name is not None else '')
+ def _format_native_types(self, na_rep=u'NaT', **kwargs):
- return header + ['%s' % Period(x, freq=self.freq) for x in self]
+ values = np.array(list(self),dtype=object)
+ mask = isnull(self.values)
+ values[mask] = na_rep
+
+ imask = -mask
+ values[imask] = np.array([ u'%s' % dt for dt in values[imask] ])
+ return values.tolist()
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 59f01ba7ea074..bc2aa7628bf28 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -406,7 +406,7 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
- idx_type - "i"/"f"/"s"/"u"/"dt".
+ idx_type - "i"/"f"/"s"/"u"/"dt/"p".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
@@ -422,7 +422,7 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
assert (names is None or names is False
or names is True or len(names) is nlevels)
assert idx_type is None or \
- (idx_type in ('i', 'f', 's', 'u', 'dt') and nlevels == 1)
+ (idx_type in ('i', 'f', 's', 'u', 'dt', 'p') and nlevels == 1)
if names is True:
# build default names
@@ -437,7 +437,7 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex, s=makeStringIndex,
- u=makeUnicodeIndex, dt=makeDateIndex).get(idx_type)
+ u=makeUnicodeIndex, dt=makeDateIndex, p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
@@ -446,7 +446,7 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
return idx
elif idx_type is not None:
raise ValueError('"%s" is not a legal value for `idx_type`, use '
- '"i"/"f"/"s"/"u"/"dt".' % idx_type)
+ '"i"/"f"/"s"/"u"/"dt/"p".' % idx_type)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
@@ -540,9 +540,9 @@ def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
- (r_idx_type in ('i', 'f', 's', 'u', 'dt') and r_idx_nlevels == 1)
+ (r_idx_type in ('i', 'f', 's', 'u', 'dt', 'p') and r_idx_nlevels == 1)
assert c_idx_type is None or \
- (c_idx_type in ('i', 'f', 's', 'u', 'dt') and c_idx_nlevels == 1)
+ (c_idx_type in ('i', 'f', 's', 'u', 'dt', 'p') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
| - ENH: add support for Period formatting (as a native type), add `equals` comparison method
`__eq__` et al added by others
- TST: add period support to makeCustomDataFrame for testing purposes
| https://api.github.com/repos/pandas-dev/pandas/pulls/3193 | 2013-03-27T19:06:24Z | 2013-03-28T20:19:01Z | 2013-03-28T20:19:01Z | 2014-06-26T01:13:10Z |
BUG: Append the empty frame with columns, #3121 | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 28f3a19ab5298..54e4f574d7471 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1667,6 +1667,9 @@ def _concat_compat(to_concat, axis=0):
# filter empty arrays
to_concat = [x for x in to_concat if x.shape[axis] > 0]
+ # return the empty np array, if nothing to concatenate, #3121
+ if not to_concat: return np.array([], dtype=object)
+
is_datetime64 = [x.dtype == _NS_DTYPE for x in to_concat]
if all(is_datetime64):
# work around NumPy 1.6 bug
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 670b8d2dcfb8d..af6522605c8c0 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5108,6 +5108,36 @@ def test_append_list_of_series_dicts(self):
expected = df.append(DataFrame(dicts), ignore_index=True)
assert_frame_equal(result, expected)
+ def test_append_empty_dataframe(self):
+
+ # Empty df append empty df
+ df1 = DataFrame([])
+ df2 = DataFrame([])
+ result = df1.append(df2)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
+ # Non-empty df append empty df
+ df1 = DataFrame(np.random.randn(5, 2))
+ df2 = DataFrame()
+ result = df1.append(df2)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
+ # Empty df with columns append empty df
+ df1 = DataFrame(columns=['bar', 'foo'])
+ df2 = DataFrame()
+ result = df1.append(df2)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
+ # Non-Empty df with columns append empty df
+ df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo'])
+ df2 = DataFrame()
+ result = df1.append(df2)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)
rule_monthly = self.tsframe.asfreq('BM')
| From [issue3121](https://github.com/pydata/pandas/issues/3121)
### Bug
```
In [1]: import pandas as pd
In [2]: df1 = pd.DataFrame(columns=['test'])
In [3]: df2 = pd.DataFrame()
In [4]: df1.append(df2)
ValueError: need at least one array to concatenate
```
### Reason
numpy cannot concatenate nothing:
```
In [10]: import numpy as np
In [11]: np.concatenate([])
ValueError: need at least one array to concatenate
```
### Fix
Return an empty numpy array if we find that the arrays to be concatenated are empty
```
# return the empty np array, if nothing to concatenate, #3121
if not to_concat: return np.array([], dtype=object)
```
### Test
Test case `test_append_empty_frame` is added in `test_frame.py`
| https://api.github.com/repos/pandas-dev/pandas/pulls/3184 | 2013-03-27T07:32:36Z | 2013-04-03T08:08:51Z | 2013-04-03T08:08:51Z | 2014-06-12T23:15:15Z |
DOC: provide an axis alises example in the cookbook.rst | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 12140bcc66403..20f6937cf8cf5 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -297,3 +297,26 @@ The :ref:`Timedeltas <timeseries.timedeltas>` docs.
`Operating with timedeltas
<https://github.com/pydata/pandas/pull/2899>`__
+
+Aliasing Axis Names
+-------------------
+
+To globally provide aliases for axis names, one can define these 2 functions:
+
+.. ipython:: python
+
+ def set_axis_alias(cls, axis, alias):
+ if axis not in cls._AXIS_NUMBERS:
+ raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias))
+ cls._AXIS_ALIASES[alias] = axis
+
+ def clear_axis_alias(cls, axis, alias):
+ if axis not in cls._AXIS_NUMBERS:
+ raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias))
+ cls._AXIS_ALIASES.pop(alias,None)
+
+
+ set_axis_alias(DataFrame,'columns', 'myaxis2')
+ df2 = DataFrame(randn(3,2),columns=['c1','c2'],index=['i1','i2','i3'])
+ df2.sum(axis='myaxis2')
+ clear_axis_alias(DataFrame,'columns', 'myaxis2')
| dependent on #3110
| https://api.github.com/repos/pandas-dev/pandas/pulls/3177 | 2013-03-25T21:49:53Z | 2013-03-26T14:59:49Z | 2013-03-26T14:59:49Z | 2013-03-26T14:59:49Z |
TST: resample test custom_grouper haveing dtype comp error on 32-bit | diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index f1594b154f2cc..140f40fd611c8 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -44,8 +44,7 @@ def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
- data = np.array([1] * len(dti))
- s = Series(data, index=dti)
+ s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
@@ -75,8 +74,7 @@ def test_custom_grouper(self):
result = g.agg(np.sum)
assert_series_equal(result, expect)
- data = np.random.rand(len(dti), 10)
- df = DataFrame(data, index=dti)
+ df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
| from GH2763 changes
| https://api.github.com/repos/pandas-dev/pandas/pulls/3176 | 2013-03-25T20:12:15Z | 2013-03-25T20:12:20Z | 2013-03-25T20:12:20Z | 2014-06-18T10:05:48Z |
BUG: Fix the rename function for Series and DataFrame, #3165 | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 385695ec6cc50..9729cc76c4a60 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -5,7 +5,7 @@
import numpy as np
from pandas.core.common import _possibly_downcast_to_dtype, isnull
-from pandas.core.index import Index, _ensure_index, _handle_legacy_indexes
+from pandas.core.index import Index, MultiIndex, _ensure_index, _handle_legacy_indexes
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
import pandas.core.common as com
import pandas.lib as lib
@@ -1646,7 +1646,13 @@ def _is_indexed_like(self, other):
return True
def rename_axis(self, mapper, axis=1):
- new_axis = Index([mapper(x) for x in self.axes[axis]])
+
+ index = self.axes[axis]
+ if isinstance(index, MultiIndex):
+ new_axis = MultiIndex.from_tuples([tuple(mapper(y) for y in x) for x in index], names=index.names)
+ else:
+ new_axis = Index([mapper(x) for x in index], name=index.name)
+
if not new_axis.is_unique:
raise AssertionError('New axis must be unique to rename')
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 35448d26e4fd5..acfc875fa45a1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3108,7 +3108,7 @@ def rename(self, mapper, inplace=False):
"""
mapper_f = _get_rename_function(mapper)
result = self if inplace else self.copy()
- result.index = [mapper_f(x) for x in self.index]
+ result.index = Index([mapper_f(x) for x in self.index], name=self.index.name)
if inplace:
import warnings
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 881bc4c268af4..db7dac0ac7e98 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6742,6 +6742,20 @@ def test_rename(self):
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
self.assert_(np.array_equal(renamed.index, ['A', 'B', 'foo', 'bar']))
+ # index with name
+ index = Index(['foo', 'bar'], name='name')
+ renamer = DataFrame(data, index=index)
+ renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
+ self.assert_(np.array_equal(renamed.index, ['bar', 'foo']))
+ self.assertEquals(renamed.index.name, renamer.index.name)
+
+ # MultiIndex
+ index = MultiIndex.from_tuples([('foo1', 'bar1'), ('foo2', 'bar2')], names=['foo', 'bar'])
+ renamer = DataFrame(data, index=index)
+ renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'})
+ self.assert_(np.array_equal(renamed.index, MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')])))
+ self.assertEquals(renamed.index.names, renamer.index.names)
+
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index cc69649f24cdf..353f03eaf70a8 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3576,6 +3576,11 @@ def test_rename(self):
renamed = s.rename({'b': 'foo', 'd': 'bar'})
self.assert_(np.array_equal(renamed.index, ['a', 'foo', 'c', 'bar']))
+ # index with name
+ renamer = Series(np.arange(4), index=Index(['a', 'b', 'c', 'd'], name='name'))
+ renamed = renamer.rename({})
+ self.assertEqual(renamed.index.name, renamer.index.name)
+
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(self.ts.index[0])
| ### Bug 1: After `rename`, the `name` of the `index` is missing:
```
In [1]: import pandas as pd
In [2]: data = [1, 2]
In [3]: index = pd.Index(['a', 'b'], name='name')
In [4]: df = pd.DataFrame(data, index=index)
In [5]: df
Out[5]:
0
name
a 1
b 2
In [6]: df.rename({'a': 'c'})
Out[6]:
0
c 1
b 2
```
Both `Series` and `DataFrame` has the same problems. The reason is that we didn't set the `name` of the `index` while reconstructing the `DataFrame/Series` by `rename` .
### Bug 2: `rename` cannot work for the `MultiIndex` case, the `index` becomes tuples, the `names` of the index are gone
```
In [1]: import pandas as pd
In [2]: data = [1, 2]
In [3]: index = pd.MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['name1', 'name2'])
In [4]: df = pd.DataFrame(data, index=index)
In [5]: df
Out[5]:
0
name1 name2
a b 1
c d 2
In [6]: df.rename({'a': 'e'})
Out[6]:
0
(a, b) 1
(c, d) 2
```
The missing name issue is same as Bug1. The original `rename` function convert the dict to the mapper function and map each tuple but not each index, and doesn't reconstruct it as `MultiIndex`, so it becomes a `Index` with some tuples.
### Fix
I've fixed the above two bugs, the `rename` will also pass the `name` of the `index` too, the `rename` mapper will go through each index for the `MultiIndex` case now.
I also added some test cases in `test_series.py` and `test_frame.py`
| https://api.github.com/repos/pandas-dev/pandas/pulls/3175 | 2013-03-25T19:12:07Z | 2013-03-25T19:42:45Z | 2013-03-25T19:42:45Z | 2014-06-12T16:31:08Z |
DOC release notes add assert_frame_equal checks index and column names | diff --git a/RELEASE.rst b/RELEASE.rst
index a9c1378ff5eb1..d71fce70dd5d8 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -137,7 +137,7 @@ pandas 0.11.0
- timedelta64 are returned in appropriate cases (e.g. Series - Series,
when both are datetime64)
- - mixed datetimes and objects (GH2751_) in a constructor witll be casted
+ - mixed datetimes and objects (GH2751_) in a constructor will be cast
correctly
- astype on datetimes to object are now handled (as well as NaT
conversions to np.nan)
@@ -146,6 +146,7 @@ pandas 0.11.0
- arguments to DataFrame.clip were inconsistent to numpy and Series clipping
(GH2747_)
+ - util.testing.assert_frame_equal now checks the column and index names (GH2964_)
**Bug Fixes**
| Appended to release notes re #2964.
I put it in "API changes" (not sure if that is optimal but I wasn't sure where else it would fit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/3174 | 2013-03-25T18:21:55Z | 2013-03-25T19:36:32Z | 2013-03-25T19:36:32Z | 2013-03-25T19:36:32Z |
ENH: assert_X_equal with check_less_precise should apply to index/cols too | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index bcd3fb6a35cb6..1d6b69ee9758b 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -183,7 +183,10 @@ def assert_series_equal(left, right, check_dtype=True,
assert_almost_equal(left.values, right.values, check_less_precise)
if check_dtype:
assert(left.dtype == right.dtype)
- assert(left.index.equals(right.index))
+ if check_less_precise:
+ assert_almost_equal(left.index.values, right.index.values, check_less_precise)
+ else:
+ assert(left.index.equals(right.index))
if check_index_type:
assert(type(left.index) == type(right.index))
assert(left.index.dtype == right.index.dtype)
@@ -193,7 +196,7 @@ def assert_series_equal(left, right, check_dtype=True,
getattr(right, 'freqstr', None))
-def assert_frame_equal(left, right, check_dtype=True,
+def assert_frame_equal(left, right, check_dtype=True,
check_index_type=False,
check_column_type=False,
check_frame_type=False,
@@ -204,14 +207,18 @@ def assert_frame_equal(left, right, check_dtype=True,
assert(isinstance(left, DataFrame))
assert(isinstance(right, DataFrame))
- assert(left.columns.equals(right.columns))
- assert(left.index.equals(right.index))
+ if check_less_precise:
+ assert_almost_equal(left.columns,right.columns)
+ assert_almost_equal(left.index,right.index)
+ else:
+ assert(left.columns.equals(right.columns))
+ assert(left.index.equals(right.index))
for i, col in enumerate(left.columns):
assert(col in right)
lcol = left.icol(i)
rcol = right.icol(i)
- assert_series_equal(lcol, rcol,
+ assert_series_equal(lcol, rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3173 | 2013-03-25T17:40:52Z | 2013-03-27T18:27:55Z | 2013-03-27T18:27:55Z | 2013-03-27T18:27:55Z | |
DOC: minor edits in io.rst / cookbook.rst | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 1d8701368558f..12140bcc66403 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -127,12 +127,18 @@ Splitting
`Splitting a frame
<http://stackoverflow.com/questions/13353233/best-way-to-split-a-dataframe-given-an-edge/15449992#15449992>`__
+.. _cookbook.pivot:
+
Pivot
~~~~~
+The :ref:`Pivot <reshaping.pivot>` docs.
`Partial sums and subtotals
<http://stackoverflow.com/questions/15570099/pandas-pivot-tables-row-subtotals/15574875#15574875>`__
+`Frequency table like plyr in R
+<http://stackoverflow.com/questions/15589354/frequency-tables-in-pandas-like-plyr-in-r>`__
+
Timeseries
----------
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index 107200a82903a..c18cfbd01bbca 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -59,7 +59,7 @@ We aim to make operations like this natural and easy to express using
pandas. We'll address each area of GroupBy functionality then provide some
non-trivial examples / use cases.
-See some :ref:`cookbook examples <cookbook.grouping>` for some advanced strategies
+See the :ref:`cookbook<cookbook.grouping>` for some advanced strategies
.. _groupby.split:
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 272a2243125ec..3222176af2ee3 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -32,7 +32,7 @@ attention in this area. Expect more work to be invested higher-dimensional data
structures (including Panel) in the future, especially in label-based advanced
indexing.
-See some :ref:`cookbook examples <cookbook.selection>` for some advanced strategies
+See the :ref:`cookbook<cookbook.selection>` for some advanced strategies
Choice
------
@@ -920,7 +920,7 @@ described above and in prior sections. Later, when discussing :ref:`group by
non-trivial applications to illustrate how it aids in structuring data for
analysis.
-See some :ref:`cookbook examples <cookbook.multi_index>` for some advanced strategies
+See the :ref:`cookbook<cookbook.multi_index>` for some advanced strategies
.. note::
diff --git a/doc/source/io.rst b/doc/source/io.rst
index a244b9c545d88..1bcaf047561a5 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -35,10 +35,10 @@ CSV & Text files
The two workhorse functions for reading text files (a.k.a. flat files) are
:func:`~pandas.io.parsers.read_csv` and :func:`~pandas.io.parsers.read_table`.
They both use the same parsing code to intelligently convert tabular
-data into a DataFrame object. They can take a number of arguments:
+data into a DataFrame object. See the :ref:`cookbook<cookbook.csv>`
+for some advanced strategies
-See some :ref:`cookbook examples <cookbook.csv>` for some advanced strategies
-See some :ref:`cookbook examples <cookbook.csv>` for some advanced strategies
+They can take a number of arguments:
- ``filepath_or_buffer``: Either a string path to a file, or any object with a
``read`` method (such as an open file or ``StringIO``).
@@ -917,9 +917,10 @@ Excel files
The ``ExcelFile`` class can read an Excel 2003 file using the ``xlrd`` Python
module and use the same parsing code as the above to convert tabular data into
-a DataFrame. To use it, create the ``ExcelFile`` object:
+a DataFrame. See the :ref:`cookbook<cookbook.excel>` for some
+advanced strategies
-See some :ref:`cookbook examples <cookbook.excel>` for some advanced strategies
+To use it, create the ``ExcelFile`` object:
.. code-block:: python
@@ -985,9 +986,8 @@ HDF5 (PyTables)
``HDFStore`` is a dict-like object which reads and writes pandas using
the high performance HDF5 format using the excellent `PyTables
-<http://www.pytables.org/>`__ library.
-
-See some :ref:`cookbook examples <cookbook.hdf>` for some advanced strategies
+<http://www.pytables.org/>`__ library. See the :ref:`cookbook<cookbook.hdf>`
+for some advanced strategies
.. ipython:: python
:suppress:
@@ -1696,9 +1696,8 @@ SQL Queries
The :mod:`pandas.io.sql` module provides a collection of query wrappers to both
facilitate data retrieval and to reduce dependency on DB-specific API. There
wrappers only support the Python database adapters which respect the `Python
-DB-API <http://www.python.org/dev/peps/pep-0249/>`_.
-
-See some :ref:`cookbook examples <cookbook.sql>` for some advanced strategies
+DB-API <http://www.python.org/dev/peps/pep-0249/>`_. See some
+:ref:`cookbook examples <cookbook.sql>` for some advanced strategies
Suppose you want to query some data with different types from a table such as:
diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index 1495e9218ac41..b719f0c24e3f9 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -304,7 +304,7 @@ better) than other open source implementations (like ``base::merge.data.frame``
in R). The reason for this is careful algorithmic design and internal layout of
the data in DataFrame.
-See some :ref:`cookbook examples <cookbook.merge>` for some advanced strategies
+See the :ref:`cookbook<cookbook.merge>` for some advanced strategies
pandas provides a single function, ``merge``, as the entry point for all
standard database join operations between DataFrame objects:
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 7f799a8e8927f..9a7a9c2a87e52 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -239,7 +239,9 @@ Pivot tables and cross-tabulations
.. _reshaping.pivot:
The function ``pandas.pivot_table`` can be used to create spreadsheet-style pivot
-tables. It takes a number of arguments
+tables. See the :ref:`cookbook<cookbook.pivot>` for some advanced strategies
+
+It takes a number of arguments
- ``data``: A DataFrame object
- ``values``: a column or a list of columns to aggregate
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 97e7661cd1152..da1f092a1881f 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -33,7 +33,7 @@ We use the standard convention for referencing the matplotlib API:
Basic plotting: ``plot``
------------------------
-See some :ref:`cookbook examples <cookbook.plotting>` for some advanced strategies
+See the :ref:`cookbook<cookbook.plotting>` for some advanced strategies
The ``plot`` method on Series and DataFrame is just a simple wrapper around
``plt.plot``:
| https://api.github.com/repos/pandas-dev/pandas/pulls/3172 | 2013-03-25T17:25:47Z | 2013-03-25T19:34:58Z | 2013-03-25T19:34:58Z | 2014-07-12T18:47:08Z | |
BUG/CLN: Exception in HDFStore are now ValueError or TypeError | diff --git a/RELEASE.rst b/RELEASE.rst
index 009bcb8c5d5d1..a9c1378ff5eb1 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -167,9 +167,11 @@ pandas 0.11.0
(so not using numexpr filtering, but isin filtering)
- Internally, change all variables to be private-like (now have leading
underscore)
- - fixes for query parsing to correctly interpret boolean and != (GH2849_, GH2973_)
- - fixes for pathological case on SparseSeries with 0-len array and compression (GH2931_)
- - fixes bug with writing rows if part of a block was all-nan (GH3012_)
+ - Fixes for query parsing to correctly interpret boolean and != (GH2849_, GH2973_)
+ - Fixes for pathological case on SparseSeries with 0-len array and compression (GH2931_)
+ - Fixes bug with writing rows if part of a block was all-nan (GH3012_)
+ - Exceptions are now ValueError or TypeError as needed
+ - A table will now raise if min_itemsize contains fields which are not queryables
- Bug showing up in applymap where some object type columns are converted (GH2909_)
had an incorrect default in convert_objects
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index fd9127efa72df..84a4121387964 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -458,9 +458,9 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, star
nrows = tbls[0].nrows
for t in tbls:
if t.nrows != nrows:
- raise Exception("all tables must have exactly the same nrows!")
+ raise ValueError("all tables must have exactly the same nrows!")
if not t.is_table:
- raise Exception("object [%s] is not a table, and cannot be used in all select as multiple" % t.pathname)
+ raise TypeError("object [%s] is not a table, and cannot be used in all select as multiple" % t.pathname)
# select coordinates from the selector table
c = self.select_as_coordinates(selector, where, start=start, stop=stop)
@@ -526,7 +526,7 @@ def remove(self, key, where=None, start=None, stop=None):
except:
if where is not None:
- raise Exception("trying to remove a node with a non-None where clause!")
+ raise ValueError("trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
@@ -544,7 +544,7 @@ def remove(self, key, where=None, start=None, stop=None):
# delete from the table
else:
if not s.is_table:
- raise Exception('can only remove with where on objects written as tables')
+ raise ValueError('can only remove with where on objects written as tables')
return s.delete(where = where, start=start, stop=stop)
def append(self, key, value, columns=None, **kwargs):
@@ -597,10 +597,10 @@ def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, *
raise Exception("axes is currently not accepted as a paremter to append_to_multiple; you can create the tables indepdently instead")
if not isinstance(d, dict):
- raise Exception("append_to_multiple must have a dictionary specified as the way to split the value")
+ raise ValueError("append_to_multiple must have a dictionary specified as the way to split the value")
if selector not in d:
- raise Exception("append_to_multiple requires a selector that is in passed dict")
+ raise ValueError("append_to_multiple requires a selector that is in passed dict")
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
@@ -611,7 +611,7 @@ def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, *
for k, v in d.items():
if v is None:
if remain_key is not None:
- raise Exception("append_to_multiple can only have one value in d that is None")
+ raise ValueError("append_to_multiple can only have one value in d that is None")
remain_key = k
else:
remain_values.extend(v)
@@ -655,7 +655,7 @@ def create_table_index(self, key, **kwargs):
if s is None: return
if not s.is_table:
- raise Exception("cannot create table index on a non-table")
+ raise TypeError("cannot create table index on a non-table")
s.create_index(**kwargs)
def groups(self):
@@ -727,8 +727,8 @@ def _create_storer(self, group, value = None, table = False, append = False, **k
""" return a suitable Storer class to operate """
def error(t):
- raise NotImplementedError("cannot properly create the storer for: [%s] [group->%s,value->%s,table->%s,append->%s,kwargs->%s]" %
- (t,group,type(value),table,append,kwargs))
+ raise TypeError("cannot properly create the storer for: [%s] [group->%s,value->%s,table->%s,append->%s,kwargs->%s]" %
+ (t,group,type(value),table,append,kwargs))
pt = getattr(group._v_attrs,'pandas_type',None)
tt = getattr(group._v_attrs,'table_type',None)
@@ -742,7 +742,7 @@ def error(t):
pt = 'frame_table'
tt = 'generic_table'
else:
- raise Exception("cannot create a storer if the object is not existing nor a value are passed")
+ raise TypeError("cannot create a storer if the object is not existing nor a value are passed")
else:
try:
@@ -1044,8 +1044,10 @@ def validate_col(self, itemsize=None):
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
- raise Exception("[%s] column has a min_itemsize of [%s] but itemsize [%s] is required!"
- % (self.cname, itemsize, c.itemsize))
+ raise ValueError("Trying to store a string with len [%s] in [%s] column but\n"
+ "this column has a limit of [%s]!\n"
+ "Consider using min_itemsize to preset the sizes on these columns"
+ % (itemsize,self.cname, c.itemsize))
return c.itemsize
return None
@@ -1176,11 +1178,11 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, **kwargs):
if inferred_type == 'datetime64':
self.set_atom_datetime64(block)
elif inferred_type == 'date':
- raise NotImplementedError(
- "date is not implemented as a table column")
+ raise TypeError(
+ "[date] is not implemented as a table column")
elif inferred_type == 'unicode':
- raise NotImplementedError(
- "unicode is not implemented as a table column")
+ raise TypeError(
+ "[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
@@ -1209,9 +1211,9 @@ def set_atom_string(self, block, existing_col, min_itemsize, nan_rep):
col = block.get(item)
inferred_type = lib.infer_dtype(col.ravel())
if inferred_type != 'string':
- raise NotImplementedError("cannot serialize the column [%s] because "
- "its data contents are [%s] object dtype" %
- (item,inferred_type))
+ raise TypeError("Cannot serialize the column [%s] because\n"
+ "its data contents are [%s] object dtype" %
+ (item,inferred_type))
# itemsize is the maximum length of a string (along any dimension)
@@ -1268,13 +1270,13 @@ def validate_attr(self, append):
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
- raise Exception("appended items do not match existing items"
+ raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
- raise Exception("appended items dtype do not match existing items dtype"
+ raise ValueError("appended items dtype do not match existing items dtype"
" in table!")
def convert(self, values, nan_rep):
@@ -1497,7 +1499,7 @@ def delete(self, where = None, **kwargs):
self._handle.removeNode(self.group, recursive=True)
return None
- raise NotImplementedError("cannot delete on an abstract storer")
+ raise TypeError("cannot delete on an abstract storer")
class GenericStorer(Storer):
""" a generified storer version """
@@ -2045,7 +2047,7 @@ def validate(self, other):
for c in ['index_axes','non_index_axes','values_axes']:
if getattr(self,c,None) != getattr(other,c,None):
- raise Exception("invalid combinate of [%s] on appending data [%s] vs current table [%s]" % (c,getattr(self,c,None),getattr(other,c,None)))
+ raise ValueError("invalid combinate of [%s] on appending data [%s] vs current table [%s]" % (c,getattr(self,c,None),getattr(other,c,None)))
@property
def nrows_expected(self):
@@ -2132,6 +2134,21 @@ def validate_version(self, where = None):
ws = incompatibility_doc % '.'.join([ str(x) for x in self.version ])
warnings.warn(ws, IncompatibilityWarning)
+ def validate_min_itemsize(self, min_itemsize):
+ """ validate the min_itemisze doesn't contain items that are not in the axes
+ this needs data_columns to be defined """
+ if min_itemsize is None: return
+ if not isinstance(min_itemsize, dict): return
+
+ q = self.queryables()
+ for k, v in min_itemsize.items():
+
+ # ok, apply generally
+ if k == 'values':
+ continue
+ if k not in q:
+ raise ValueError("min_itemsize has [%s] which is not an axis or data_column" % k)
+
@property
def indexables(self):
""" create/cache the indexables if they don't exist """
@@ -2262,8 +2279,8 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
try:
axes = _AXES_MAP[type(obj)]
except:
- raise NotImplementedError("cannot properly create the storer for: [group->%s,value->%s]" %
- (self.group._v_name,type(obj)))
+ raise TypeError("cannot properly create the storer for: [group->%s,value->%s]" %
+ (self.group._v_name,type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
@@ -2280,7 +2297,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
- raise Exception("currently only support ndim-1 indexers in an AppendableTable")
+ raise ValueError("currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
@@ -2370,7 +2387,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
try:
existing_col = existing_table.values_axes[i]
except:
- raise Exception("Incompatible appended table [%s] with existing table [%s]" %
+ raise ValueError("Incompatible appended table [%s] with existing table [%s]" %
(blocks,existing_table.values_axes))
else:
existing_col = None
@@ -2386,12 +2403,15 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
col.set_pos(j)
self.values_axes.append(col)
- except (NotImplementedError):
- raise
+ except (NotImplementedError, ValueError, TypeError), e:
+ raise e
except (Exception), detail:
raise Exception("cannot find the correct atom type -> [dtype->%s,items->%s] %s" % (b.dtype.name, b.items, str(detail)))
j += 1
+ # validate our min_itemsize
+ self.validate_min_itemsize(min_itemsize)
+
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
@@ -2433,7 +2453,7 @@ def process_filter(field, filt):
takers = op(values,filt)
return obj.ix._getitem_axis(takers,axis=axis_number)
- raise Exception("cannot find the field [%s] for filtering!" % field)
+ raise ValueError("cannot find the field [%s] for filtering!" % field)
obj = process_filter(field, filt)
@@ -3111,12 +3131,12 @@ def __init__(self, field, op=None, value=None, queryables=None):
self.value = op
else:
- raise Exception(
+ raise ValueError(
"Term does not understand the supplied field [%s]" % field)
# we have valid fields
if self.field is None or self.op is None or self.value is None:
- raise Exception("Could not create this term [%s]" % str(self))
+ raise ValueError("Could not create this term [%s]" % str(self))
# = vs ==
if self.op == '=':
@@ -3125,7 +3145,7 @@ def __init__(self, field, op=None, value=None, queryables=None):
# we have valid conditions
if self.op in ['>', '>=', '<', '<=']:
if hasattr(self.value, '__iter__') and len(self.value) > 1:
- raise Exception("an inequality condition cannot have multiple values [%s]" % str(self))
+ raise ValueError("an inequality condition cannot have multiple values [%s]" % str(self))
if not hasattr(self.value, '__iter__'):
self.value = [self.value]
@@ -3157,7 +3177,7 @@ def eval(self):
""" set the numexpr expression for this term """
if not self.is_valid:
- raise Exception("query term is not valid [%s]" % str(self))
+ raise ValueError("query term is not valid [%s]" % str(self))
# convert values if we are in the table
if self.is_in_table:
@@ -3199,7 +3219,7 @@ def eval(self):
else:
- raise Exception("passing a filterable condition to a non-table indexer [%s]" % str(self))
+ raise TypeError("passing a filterable condition to a non-table indexer [%s]" % str(self))
def convert_value(self, v):
""" convert the expression that is in the term to something that is accepted by pytables """
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 158cb351678f3..bd90323daf4bf 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -511,7 +511,7 @@ def test_append_frame_column_oriented(self):
tm.assert_frame_equal(expected, result)
# this isn't supported
- self.assertRaises(Exception, store.select, 'df1', (
+ self.assertRaises(TypeError, store.select, 'df1', (
'columns=A', Term('index', '>', df.index[4])))
# selection on the non-indexable
@@ -551,7 +551,7 @@ def check_indexers(key, indexers):
# pass incorrect number of axes
store.remove('p4d')
- self.assertRaises(Exception, store.append, 'p4d', p4d.ix[
+ self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[
:, :, :10, :], axes=['major_axis', 'minor_axis'])
# different than default indexables #1
@@ -615,11 +615,11 @@ def check_col(key,name,size):
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
- self.assertRaises(Exception, store.append, 's3')
+ self.assertRaises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
- self.assertRaises(Exception, store.append, 's4', wp2)
+ self.assertRaises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
@@ -644,7 +644,7 @@ def check_col(key,name,size):
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
- self.assertRaises(Exception, store.append, 'df_new', df_new)
+ self.assertRaises(ValueError, store.append, 'df_new', df_new)
# with nans
store.remove('df')
@@ -668,6 +668,18 @@ def check_col(key,name,size):
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
+ # invalid min_itemsize keys
+
+ df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A'])
+
+ store.remove('df')
+ self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20})
+
+ # invalid sizes
+ store.remove('df')
+ store.append('df', df[:3], min_itemsize=3)
+ self.assertRaises(ValueError, store.append, 'df', df[3:])
+
def test_append_with_data_columns(self):
with ensure_clean(self.path) as store:
@@ -842,7 +854,7 @@ def col(t,column):
# try to index a non-table
store.remove('f2')
store.put('f2', df)
- self.assertRaises(Exception, store.create_table_index, 'f2')
+ self.assertRaises(TypeError, store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
@@ -970,7 +982,7 @@ def test_append_diff_item_order(self):
with ensure_clean(self.path) as store:
store.put('panel', wp1, table=True)
- self.assertRaises(Exception, store.put, 'panel', wp2,
+ self.assertRaises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
@@ -993,17 +1005,17 @@ def test_append_misc(self):
# unsuported data types for non-tables
p4d = tm.makePanel4D()
- self.assertRaises(Exception, store.put,'p4d',p4d)
+ self.assertRaises(TypeError, store.put,'p4d',p4d)
# unsupported data type for table
s = tm.makeStringSeries()
- self.assertRaises(Exception, store.append,'s',s)
+ self.assertRaises(TypeError, store.append,'s',s)
# unsuported data types
- self.assertRaises(Exception, store.put,'abc',None)
- self.assertRaises(Exception, store.put,'abc','123')
- self.assertRaises(Exception, store.put,'abc',123)
- self.assertRaises(Exception, store.put,'abc',np.arange(5))
+ self.assertRaises(TypeError, store.put,'abc',None)
+ self.assertRaises(TypeError, store.put,'abc','123')
+ self.assertRaises(TypeError, store.put,'abc',123)
+ self.assertRaises(TypeError, store.put,'abc',np.arange(5))
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
@@ -1024,12 +1036,12 @@ def test_append_raise(self):
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
self.assert_(df.dtypes['invalid'] == np.object_)
- self.assertRaises(NotImplementedError, store.append,'df',df)
+ self.assertRaises(TypeError, store.append,'df',df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
- self.assertRaises(NotImplementedError, store.append,'df',df)
+ self.assertRaises(TypeError, store.append,'df',df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
@@ -1037,20 +1049,20 @@ def test_append_raise(self):
s[0:5] = np.nan
df['invalid'] = s
self.assert_(df.dtypes['invalid'] == np.object_)
- self.assertRaises(NotImplementedError, store.append,'df', df)
+ self.assertRaises(TypeError, store.append,'df', df)
# directy ndarray
- self.assertRaises(NotImplementedError, store.append,'df',np.arange(10))
+ self.assertRaises(TypeError, store.append,'df',np.arange(10))
# series directly
- self.assertRaises(NotImplementedError, store.append,'df',Series(np.arange(10)))
+ self.assertRaises(TypeError, store.append,'df',Series(np.arange(10)))
# appending an incompatbile table
df = tm.makeDataFrame()
store.append('df',df)
df['foo'] = 'foo'
- self.assertRaises(Exception, store.append,'df',df)
+ self.assertRaises(ValueError, store.append,'df',df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
@@ -1059,7 +1071,7 @@ def test_table_index_incompatible_dtypes(self):
with ensure_clean(self.path) as store:
store.put('frame', df1, table=True)
- self.assertRaises(Exception, store.put, 'frame', df2,
+ self.assertRaises(TypeError, store.put, 'frame', df2,
table=True, append=True)
def test_table_values_dtypes_roundtrip(self):
@@ -1074,7 +1086,7 @@ def test_table_values_dtypes_roundtrip(self):
assert df2.dtypes == store['df_i8'].dtypes
# incompatible dtype
- self.assertRaises(Exception, store.append, 'df_i8', df1)
+ self.assertRaises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to actually create them thought)
df1 = DataFrame(np.array([[1],[2],[3]],dtype='f4'),columns = ['A'])
@@ -1157,7 +1169,7 @@ def test_unimplemented_dtypes_table_columns(self):
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
- NotImplementedError, store.append, 'df1_%s' % n, df)
+ TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
@@ -1168,7 +1180,7 @@ def test_unimplemented_dtypes_table_columns(self):
with ensure_clean(self.path) as store:
# this fails because we have a date in the object block......
- self.assertRaises(Exception, store.append, 'df_unimplemented', df)
+ self.assertRaises(TypeError, store.append, 'df_unimplemented', df)
def test_remove(self):
@@ -1232,12 +1244,12 @@ def test_remove_where(self):
# non - empty where
store.remove('wp')
store.put('wp', wp, table=True)
- self.assertRaises(Exception, store.remove,
+ self.assertRaises(ValueError, store.remove,
'wp', ['foo'])
# selectin non-table with a where
# store.put('wp2', wp, table=False)
- # self.assertRaises(Exception, store.remove,
+ # self.assertRaises(ValueError, store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
@@ -1753,7 +1765,7 @@ def test_select(self):
tm.assert_panel_equal(expected, result)
# selectin non-table with a where
- # self.assertRaises(Exception, store.select,
+ # self.assertRaises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
@@ -1983,11 +1995,11 @@ def test_frame_select(self):
df = tm.makeTimeDataFrame()
store.append('df_time', df)
self.assertRaises(
- Exception, store.select, 'df_time', [Term("index>0")])
+ ValueError, store.select, 'df_time', [Term("index>0")])
# can't select if not written as table
# store['frame'] = df
- # self.assertRaises(Exception, store.select,
+ # self.assertRaises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_string_select(self):
@@ -2130,12 +2142,12 @@ def test_append_to_multiple(self):
with ensure_clean(self.path) as store:
# exceptions
- self.assertRaises(Exception, store.append_to_multiple,
+ self.assertRaises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df, selector='df3')
- self.assertRaises(Exception, store.append_to_multiple,
+ self.assertRaises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
self.assertRaises(
- Exception, store.append_to_multiple, 'df1', df, 'df1')
+ ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
@@ -2191,7 +2203,7 @@ def test_select_as_multiple(self):
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
- self.assertRaises(Exception, store.select_as_multiple,
+ self.assertRaises(ValueError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
def test_start_stop(self):
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index e12b524dda736..05171523764c8 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -758,13 +758,16 @@ def max_len_string_array(ndarray[object, ndim=1] arr):
cdef:
int i, m, l
length = arr.shape[0]
+ object v
m = 0
for i from 0 <= i < length:
- l = len(arr[i])
+ v = arr[i]
+ if PyString_Check(v):
+ l = len(v)
- if l > m:
- m = l
+ if l > m:
+ m = l
return m
| A table will now raise if min_itemsize contains fields which are not queryables
| https://api.github.com/repos/pandas-dev/pandas/pulls/3167 | 2013-03-25T12:44:47Z | 2013-03-25T13:11:55Z | 2013-03-25T13:11:55Z | 2014-07-24T09:41:21Z |
BUG: GH3163 fixed to_csv with a boundry condition issue at the chunksize break | diff --git a/pandas/core/index.py b/pandas/core/index.py
index 0d65577546f8b..0c6e490f3eb50 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -470,12 +470,11 @@ def to_native_types(self, slicer=None, na_rep='', float_format=None):
values = self
if slicer is not None:
values = values[slicer]
- mask = isnull(values)
- values = np.array(values,dtype=object)
-
if self.is_all_dates:
- return _date_formatter(self)
+ return _date_formatter(values)
else:
+ mask = isnull(values)
+ values = np.array(values,dtype=object)
values[mask] = na_rep
return values.tolist()
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 313e4a96798d1..881bc4c268af4 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4697,6 +4697,22 @@ def test_to_csv_float32_nanrep(self):
lines = f.readlines()
self.assert_(lines[1].split(',')[2] == '999')
+ @slow
+ def test_to_csv_boundry_conditions(self):
+ from pandas.util.testing import makeTimeDataFrame
+
+ with ensure_clean() as path:
+
+ df = makeTimeDataFrame(25000)
+ df.to_csv(path)
+ rs = pan.read_csv(path, index_col=0, parse_dates=True)
+ assert_frame_equal(rs, df)
+
+ df = makeTimeDataFrame(25001)
+ df.to_csv(path)
+ rs = pan.read_csv(path, index_col=0, parse_dates=True)
+ assert_frame_equal(rs, df)
+
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
| closes #3163
| https://api.github.com/repos/pandas-dev/pandas/pulls/3166 | 2013-03-25T11:39:15Z | 2013-03-25T12:45:04Z | 2013-03-25T12:45:04Z | 2013-03-25T12:45:05Z |
ENH: Use xlrd >=0.9.0 for both xls/xlsx, sidesteps GH1629 | diff --git a/README.rst b/README.rst
index 5145f801fc6eb..c9b70f07b0862 100644
--- a/README.rst
+++ b/README.rst
@@ -87,7 +87,8 @@ Optional dependencies
* `statsmodels <http://statsmodels.sourceforge.net/>`__
* Needed for parts of :mod:`pandas.stats`
* `openpyxl <http://packages.python.org/openpyxl/>`__, `xlrd/xlwt <http://www.python-excel.org/>`__
- * openpyxl version 1.6.1 or higher
+ * openpyxl version 1.6.1 or higher, for writing .xlsx files
+ * xlrd >= 0.9.0
* Needed for Excel I/O
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 60798bacbc144..fda96248c2a75 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1803,64 +1803,32 @@ def _make_reader(self, f):
#----------------------------------------------------------------------
# ExcelFile class
-_openpyxl_msg = ("\nFor parsing .xlsx files 'openpyxl' is required.\n"
- "You can install it via 'easy_install openpyxl' or "
- "'pip install openpyxl'.\nAlternatively, you could save"
- " the .xlsx file as a .xls file.\n")
-
-
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
- Uses xlrd for parsing .xls files or openpyxl for .xlsx files.
- See ExcelFile.parse for more documentation
+ Uses xlrd. See ExcelFile.parse for more documentation
Parameters
----------
path : string or file-like object
Path to xls or xlsx file
- kind : {'xls', 'xlsx', None}, default None
"""
- def __init__(self, path_or_buf, kind=None):
+ def __init__(self, path_or_buf, kind=None, **kwds):
self.kind = kind
- self.use_xlsx = kind == 'xls'
+
+ import xlrd # throw an ImportError if we need to
+ ver = tuple(map(int,xlrd.__VERSION__.split(".")[:2]))
+ if ver < (0, 9):
+ raise ImportError("pandas requires xlrd >= 0.9.0 for excel support")
self.path_or_buf = path_or_buf
self.tmpfile = None
if isinstance(path_or_buf, basestring):
- if kind == 'xls' or (kind is None and
- path_or_buf.endswith('.xls')):
- self.use_xlsx = False
- import xlrd
- self.book = xlrd.open_workbook(path_or_buf)
- else:
- self.use_xlsx = True
- try:
- from openpyxl.reader.excel import load_workbook
- self.book = load_workbook(path_or_buf, use_iterators=True)
- except ImportError: # pragma: no cover
- raise ImportError(_openpyxl_msg)
+ self.book = xlrd.open_workbook(path_or_buf)
else:
data = path_or_buf.read()
-
- if self.kind == 'xls':
- import xlrd
- self.book = xlrd.open_workbook(file_contents=data)
- elif self.kind == 'xlsx':
- from openpyxl.reader.excel import load_workbook
- buf = py3compat.BytesIO(data)
- self.book = load_workbook(buf, use_iterators=True)
- else:
- try:
- import xlrd
- self.book = xlrd.open_workbook(file_contents=data)
- self.use_xlsx = False
- except Exception:
- self.use_xlsx = True
- from openpyxl.reader.excel import load_workbook
- buf = py3compat.BytesIO(data)
- self.book = load_workbook(buf, use_iterators=True)
+ self.book = xlrd.open_workbook(file_contents=data)
def __repr__(self):
return object.__repr__(self)
@@ -1908,9 +1876,7 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
if skipfooter is not None:
skip_footer = skipfooter
- choose = {True: self._parse_xlsx,
- False: self._parse_xls}
- return choose[self.use_xlsx](sheetname, header=header,
+ return self._parse_excel(sheetname, header=header,
skiprows=skiprows, index_col=index_col,
has_index_names=has_index_names,
parse_cols=parse_cols,
@@ -1953,47 +1919,12 @@ def _excel2num(x):
else:
return i in parse_cols
- def _parse_xlsx(self, sheetname, header=0, skiprows=None,
- skip_footer=0, index_col=None, has_index_names=False,
- parse_cols=None, parse_dates=False, date_parser=None,
- na_values=None, thousands=None, chunksize=None):
- sheet = self.book.get_sheet_by_name(name=sheetname)
- data = []
-
- # it brings a new method: iter_rows()
- should_parse = {}
-
- for row in sheet.iter_rows():
- row_data = []
- for j, cell in enumerate(row):
-
- if parse_cols is not None and j not in should_parse:
- should_parse[j] = self._should_parse(j, parse_cols)
-
- if parse_cols is None or should_parse[j]:
- row_data.append(cell.internal_value)
- data.append(row_data)
-
- if header is not None:
- data[header] = _trim_excel_header(data[header])
-
- parser = TextParser(data, header=header, index_col=index_col,
- has_index_names=has_index_names,
- na_values=na_values,
- thousands=thousands,
- parse_dates=parse_dates,
- date_parser=date_parser,
- skiprows=skiprows,
- skip_footer=skip_footer,
- chunksize=chunksize)
-
- return parser.read()
-
- def _parse_xls(self, sheetname, header=0, skiprows=None,
+ def _parse_excel(self, sheetname, header=0, skiprows=None,
skip_footer=0, index_col=None, has_index_names=None,
parse_cols=None, parse_dates=False, date_parser=None,
na_values=None, thousands=None, chunksize=None):
- from xlrd import xldate_as_tuple, XL_CELL_DATE, XL_CELL_ERROR
+ from xlrd import (xldate_as_tuple, XL_CELL_DATE,
+ XL_CELL_ERROR, XL_CELL_BOOLEAN)
datemode = self.book.datemode
sheet = self.book.sheet_by_name(sheetname)
@@ -2015,9 +1946,12 @@ def _parse_xls(self, sheetname, header=0, skiprows=None,
value = datetime.time(*dt[3:])
else:
value = datetime.datetime(*dt)
- if typ == XL_CELL_ERROR:
+ elif typ == XL_CELL_ERROR:
value = np.nan
+ elif typ == XL_CELL_BOOLEAN:
+ value = bool(value)
row.append(value)
+
data.append(row)
if header is not None:
@@ -2037,9 +1971,6 @@ def _parse_xls(self, sheetname, header=0, skiprows=None,
@property
def sheet_names(self):
- if self.use_xlsx:
- return self.book.get_sheet_names()
- else:
return self.book.sheet_names()
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index ccd9cbc56b2a5..ee2d265690221 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -245,18 +245,6 @@ def test_specify_kind_xls(self):
# self.assertRaises(Exception, ExcelFile, open(xlsx_file, 'rb'),
# kind='xls')
- def test_specify_kind_xlsx(self):
- _skip_if_no_openpyxl()
- xlsx_file = os.path.join(self.dirpath, 'test.xlsx')
- xls_file = os.path.join(self.dirpath, 'test.xls')
-
- self.assertRaises(Exception, ExcelFile, xls_file, kind='xlsx')
-
- ExcelFile(open(xlsx_file, 'rb'), kind='xlsx')
-
- self.assertRaises(Exception, ExcelFile, open(xls_file, 'rb'),
- kind='xlsx')
-
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
@@ -545,19 +533,6 @@ def test_excel_roundtrip_datetime(self):
recons = reader.parse('test1')
tm.assert_frame_equal(self.tsframe, recons)
- def test_excel_roundtrip_bool(self):
- _skip_if_no_openpyxl()
-
- # Test roundtrip np.bool8, does not seem to work for xls
- path = '__tmp_excel_roundtrip_bool__.xlsx'
- frame = (DataFrame(np.random.randn(10, 2)) >= 0)
- with ensure_clean(path) as path:
-
- frame.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1')
- tm.assert_frame_equal(frame, recons)
-
def test_to_excel_periodindex(self):
_skip_if_no_excelsuite()
| Ssers with xlrd<0.9 will get an error message.
unlike openpyxl, xlrd parse() leaves type inferencing to
TextReader, which looks at the first values only, but this
was already the behaviour for .xls files, so small price to pay.
Removed much code duplication, and xlrd 0.9.0 added py3
support (courtesy of @takluyver, Thanks!), though that's not
tested yet.
Also, closes #1629
| https://api.github.com/repos/pandas-dev/pandas/pulls/3164 | 2013-03-25T02:36:52Z | 2013-04-23T02:10:35Z | 2013-04-23T02:10:35Z | 2014-06-12T19:22:09Z |
ENH: Declare a BoolBlock as a NumericBlock | diff --git a/RELEASE.rst b/RELEASE.rst
index 3e935879c197e..c92f9fcd698ee 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -124,6 +124,8 @@ pandas 0.11.0
knows how many columns to expect in the result) (GH2981_)
- On a mixed DataFrame, allow setting with indexers with ndarray/DataFrame
on rhs (GH3216_)
+ - Treat boolean values as integers (values 1 and 0) for numeric
+ operations. (GH2641_)
**API Changes**
@@ -350,6 +352,7 @@ pandas 0.11.0
.. _GH2747: https://github.com/pydata/pandas/issues/2747
.. _GH2816: https://github.com/pydata/pandas/issues/2816
.. _GH3216: https://github.com/pydata/pandas/issues/3216
+.. _GH2641: https://github.com/pydata/pandas/issues/2641
pandas 0.10.1
=============
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt
index 0193714a5d30c..e299ba43ad9ee 100644
--- a/doc/source/v0.11.0.txt
+++ b/doc/source/v0.11.0.txt
@@ -304,6 +304,9 @@ Enhancements
- added option `display.with_wmp_style` providing a sleeker visual style
for plots. Based on https://gist.github.com/huyng/816622 (GH3075_).
+ - Treat boolean values as integers (values 1 and 0) for numeric
+ operations. (GH2641_)
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
@@ -328,3 +331,4 @@ on GitHub for a complete list.
.. _GH3059: https://github.com/pydata/pandas/issues/3059
.. _GH3070: https://github.com/pydata/pandas/issues/3070
.. _GH3075: https://github.com/pydata/pandas/issues/3075
+.. _GH2641: https://github.com/pydata/pandas/issues/2641
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 7441134aab351..a47d747216f49 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -628,7 +628,7 @@ def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
-class BoolBlock(Block):
+class BoolBlock(NumericBlock):
is_bool = True
_can_hold_na = False
@@ -641,9 +641,6 @@ def _try_cast(self, element):
except: # pragma: no cover
return element
- def _try_cast_result(self, result):
- return _possibly_downcast_to_dtype(result, self.dtype)
-
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index e7b5e266ad09f..f4f04d5a53579 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7899,15 +7899,14 @@ def test_dataframe_clip(self):
def test_get_X_columns(self):
# numeric and object columns
- # Booleans get casted to float in DataFrame, so skip for now
df = DataFrame({'a': [1, 2, 3],
- # 'b' : [True, False, True],
+ 'b' : [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
self.assert_(np.array_equal(df._get_numeric_data().columns,
- ['a', 'e']))
+ ['a', 'b', 'e']))
def test_get_numeric_data(self):
intname = np.dtype(np.int_).name
@@ -7939,6 +7938,30 @@ def test_get_numeric_data(self):
expected = df.ix[:, []]
assert_frame_equal(result, expected)
+ def test_bool_describe_in_mixed_frame(self):
+ df = DataFrame({
+ 'string_data': ['a', 'b', 'c', 'd', 'e'],
+ 'bool_data': [True, True, False, False, False],
+ 'int_data': [10, 20, 30, 40, 50],
+ })
+
+ # Boolean data and integer data is included in .describe() output, string data isn't
+ self.assert_(np.array_equal(df.describe().columns, ['bool_data', 'int_data']))
+
+ bool_describe = df.describe()['bool_data']
+
+ # Both the min and the max values should stay booleans
+ self.assert_(bool_describe['min'].dtype == np.bool_)
+ self.assert_(bool_describe['max'].dtype == np.bool_)
+
+ self.assert_(bool_describe['min'] == False)
+ self.assert_(bool_describe['max'] == True)
+
+ # For numeric operations, like mean or median, the values True/False are cast to
+ # the integer values 1 and 0
+ assert_almost_equal(bool_describe['mean'], 0.4)
+ assert_almost_equal(bool_describe['50%'], 0)
+
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f,
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 93e9b07558319..eec5f5632d36b 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -497,7 +497,7 @@ def test_get_numeric_data(self):
'bool': bool_ser, 'obj': obj_ser,
'dt': dt_ser})
xp = DataFrame({'int': int_ser, 'float': float_ser,
- 'complex': complex_ser})
+ 'complex': complex_ser, 'bool': bool_ser})
rs = DataFrame(df._data.get_numeric_data())
assert_frame_equal(xp, rs)
| BUG: #2641 fixes "df.decribe() with boolean column"
Numpy refers to a boolean type as a "numerical type", and both Python
and numpy cast True bools into the value 1 and False bools into the
value 0, so all numpy numerical operations always work.
---
This basically is to solve this issue, which I always found a bit puzzling:
import pandas
df = pandas.DataFrame({
'int1': [1, 2, 3],
'bool1': [False, False, True],
'bool2': [True, True, False],
})
print df.corr()
```
int1
int1 1
```
print df[['bool1', 'bool2']].corr()
```
bool1 bool2
bool1 1 -1
bool2 -1 1
```
After the change:
print df.corr()
```
bool1 bool2 int1
bool1 1.000000 -1.000000 0.866025
bool2 -1.000000 1.000000 -0.866025
int1 0.866025 -0.866025 1.000000
```
This also applies to quite a few other numeric operations on dataframes, which when the dataframe is mixed type defaults to using "is_numeric" to decide which ones to perform the operation on.
I'm not sure how deep the rabbit hole goes for this change and how much stuff it might affect, but all of the tests pass (after of course editing the one that specifically tested this functionality). If there are other potential issues I'd be happy to look into them and make other related fixes.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3162 | 2013-03-25T00:43:23Z | 2013-04-02T12:46:09Z | 2013-04-02T12:46:09Z | 2014-06-14T23:24:34Z |
DOC: Add more detailed description of commit prefixes | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 9f4e30817d7ed..6bc5700c49866 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -34,8 +34,15 @@ your contribution or address the issue you're having.
This can create all sorts of problems. Use "git rebase" instead. This ensures
no merge conflicts occur when you're code is merged by the core team.
- An informal commit message format is in effect for the project, please try
- and adhere to it. Use a "ENH: ", "TST:", "BUG:", "DOC:", etc' prefix in
- your commit title. Check the output of "git log" for examples.
+ and adhere to it. View "git log" for examples. Here are some common prefixes
+ along with general guidelines for when to use them:
+ - ENH: Enhancement, new functionality
+ - BUG: Bug fix
+ - DOC: Additions/updates to documentation
+ - TST: Additions/updates to tests
+ - BLD: Updates to the build process/scripts
+ - PERF: Performance improvement
+ - CLN: Code cleanup
- RELEASE.rst and doc/source/vx.y.z.txt contain an on-going changelog for each
release as it is worked on. Add entries to these files as needed in
a separate commit in your PR, documenting the fix, enhancement or (unavoidable)
| I was unfamiliar with these particular prefixes when I just contributed, so I thought it might be helpful for contributors to have some general guidelines for when to use them. I honestly just guessed at some of them, so if they are wrong please feel free to correct them.
However, I looked on the mailing list and didn't see any guidelines there either, so I think it would be nice to enumerate them here.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3160 | 2013-03-24T20:56:09Z | 2013-03-24T21:04:35Z | 2013-03-24T21:04:35Z | 2013-03-24T21:04:38Z |
ENH: Pass **kwargs through to matplotlib .scatter() | diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index b74b6b24262f9..137eab65620c6 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -1703,7 +1703,7 @@ def format_date_labels(ax, rot):
pass
-def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False):
+def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwargs):
"""
Returns
@@ -1715,7 +1715,7 @@ def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False):
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
- ax.scatter(xvals, yvals)
+ ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
| Allow for more configurability to making scatter plots. Most of the other plotting functions already do this, just giving .scatter_plot() the same power. Works for both single scatters and grouped scatters.
I didn't make a test because anything that would be tested would be matplotlib specific, but I'd be happy to do so if you'd like (test_graphics.py: TestDataFramePlots.test_scatter is a good spot).
| https://api.github.com/repos/pandas-dev/pandas/pulls/3159 | 2013-03-24T20:36:16Z | 2013-03-24T20:49:36Z | 2013-03-24T20:49:36Z | 2013-03-24T20:49:38Z |
ENH: added numexpr support for where operations | diff --git a/pandas/core/expressions.py b/pandas/core/expressions.py
index 4199c6f7f890c..de93394872e12 100644
--- a/pandas/core/expressions.py
+++ b/pandas/core/expressions.py
@@ -15,9 +15,11 @@
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
+_where = None
# the set of dtypes that we will allow pass to numexpr
-_ALLOWED_DTYPES = set(['int64','int32','float64','float32','bool'])
+_ALLOWED_DTYPES = dict(evaluate = set(['int64','int32','float64','float32','bool']),
+ where = set(['int64','float64','bool']))
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
@@ -26,17 +28,16 @@ def set_use_numexpr(v = True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
- #print "setting use_numexpr : was->%s, now->%s" % (_USE_NUMEXPR,v)
_USE_NUMEXPR = v
# choose what we are going to do
- global _evaluate
+ global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
+ _where = _where_standard
else:
_evaluate = _evaluate_numexpr
-
- #print "evaluate -> %s" % _evaluate
+ _where = _where_numexpr
def set_numexpr_threads(n = None):
# if we are using numexpr, set the threads to n
@@ -54,7 +55,7 @@ def _evaluate_standard(op, op_str, a, b, raise_on_error=True):
""" standard evaluation """
return op(a,b)
-def _can_use_numexpr(op, op_str, a, b):
+def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
@@ -73,7 +74,7 @@ def _can_use_numexpr(op, op_str, a, b):
dtypes |= set([o.dtype.name])
# allowed are a superset
- if not len(dtypes) or _ALLOWED_DTYPES >= dtypes:
+ if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
@@ -81,7 +82,7 @@ def _can_use_numexpr(op, op_str, a, b):
def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False):
result = None
- if _can_use_numexpr(op, op_str, a, b):
+ if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
try:
a_value, b_value = a, b
if hasattr(a_value,'values'):
@@ -104,6 +105,40 @@ def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False):
return result
+def _where_standard(cond, a, b, raise_on_error=True):
+ return np.where(cond, a, b)
+
+def _where_numexpr(cond, a, b, raise_on_error = False):
+ result = None
+
+ if _can_use_numexpr(None, 'where', a, b, 'where'):
+
+ try:
+ cond_value, a_value, b_value = cond, a, b
+ if hasattr(cond_value,'values'):
+ cond_value = cond_value.values
+ if hasattr(a_value,'values'):
+ a_value = a_value.values
+ if hasattr(b_value,'values'):
+ b_value = b_value.values
+ result = ne.evaluate('where(cond_value,a_value,b_value)',
+ local_dict={ 'cond_value' : cond_value,
+ 'a_value' : a_value,
+ 'b_value' : b_value },
+ casting='safe')
+ except (ValueError), detail:
+ if 'unknown type object' in str(detail):
+ pass
+ except (Exception), detail:
+ if raise_on_error:
+ raise TypeError(str(detail))
+
+ if result is None:
+ result = _where_standard(cond,a,b,raise_on_error)
+
+ return result
+
+
# turn myself on
set_use_numexpr(True)
@@ -126,4 +161,20 @@ def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True):
return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error)
return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error)
-
+def where(cond, a, b, raise_on_error=False, use_numexpr=True):
+ """ evaluate the where condition cond on a and b
+
+ Parameters
+ ----------
+
+ cond : a boolean array
+ a : return if cond is True
+ b : return if cond is False
+ raise_on_error : pass the error to the higher level if indicated (default is False),
+ otherwise evaluate the op with and return the results
+ use_numexpr : whether to try to use numexpr (default True)
+ """
+
+ if use_numexpr:
+ return _where(cond, a, b, raise_on_error=raise_on_error)
+ return _where_standard(cond, a, b, raise_on_error=raise_on_error)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index afb698221c48b..c3dc38d5d7187 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3720,7 +3720,8 @@ def combine_first(self, other):
-------
combined : DataFrame
"""
- combiner = lambda x, y: np.where(isnull(x), y, x)
+ def combiner(x, y):
+ return expressions.where(isnull(x), y, x, raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
@@ -3771,7 +3772,7 @@ def update(self, other, join='left', overwrite=True, filter_func=None,
else:
mask = notnull(this)
- self[col] = np.where(mask, this, that)
+ self[col] = expressions.where(mask, this, that, raise_on_error=True)
#----------------------------------------------------------------------
# Misc methods
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 385695ec6cc50..f7c560481cc5f 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -10,6 +10,7 @@
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
+import pandas.core.expressions as expressions
from pandas.tslib import Timestamp
from pandas.util import py3compat
@@ -506,7 +507,7 @@ def func(c,v,o):
return v
try:
- return np.where(c,v,o)
+ return expressions.where(c, v, o, raise_on_error=True)
except (Exception), detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values [%s]'
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index a0321d2dbe55f..a496785b0aed3 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -46,19 +46,19 @@ def setUp(self):
def test_invalid(self):
# no op
- result = expr._can_use_numexpr(operator.add, None, self.frame, self.frame)
+ result = expr._can_use_numexpr(operator.add, None, self.frame, self.frame, 'evaluate')
self.assert_(result == False)
# mixed
- result = expr._can_use_numexpr(operator.add, '+', self.mixed, self.frame)
+ result = expr._can_use_numexpr(operator.add, '+', self.mixed, self.frame, 'evaluate')
self.assert_(result == False)
# min elements
- result = expr._can_use_numexpr(operator.add, '+', self.frame2, self.frame2)
+ result = expr._can_use_numexpr(operator.add, '+', self.frame2, self.frame2, 'evaluate')
self.assert_(result == False)
# ok, we only check on first part of expression
- result = expr._can_use_numexpr(operator.add, '+', self.frame, self.frame2)
+ result = expr._can_use_numexpr(operator.add, '+', self.frame, self.frame2, 'evaluate')
self.assert_(result == True)
def test_binary_ops(self):
@@ -70,14 +70,14 @@ def testit():
for op, op_str in [('add','+'),('sub','-'),('mul','*'),('div','/'),('pow','**')]:
op = getattr(operator,op)
- result = expr._can_use_numexpr(op, op_str, f, f)
+ result = expr._can_use_numexpr(op, op_str, f, f, 'evaluate')
self.assert_(result == (not f._is_mixed_type))
result = expr.evaluate(op, op_str, f, f, use_numexpr=True)
expected = expr.evaluate(op, op_str, f, f, use_numexpr=False)
assert_array_equal(result,expected.values)
- result = expr._can_use_numexpr(op, op_str, f2, f2)
+ result = expr._can_use_numexpr(op, op_str, f2, f2, 'evaluate')
self.assert_(result == False)
@@ -105,14 +105,14 @@ def testit():
op = getattr(operator,op)
- result = expr._can_use_numexpr(op, op_str, f11, f12)
+ result = expr._can_use_numexpr(op, op_str, f11, f12, 'evaluate')
self.assert_(result == (not f11._is_mixed_type))
result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True)
expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False)
assert_array_equal(result,expected.values)
- result = expr._can_use_numexpr(op, op_str, f21, f22)
+ result = expr._can_use_numexpr(op, op_str, f21, f22, 'evaluate')
self.assert_(result == False)
expr.set_use_numexpr(False)
@@ -123,6 +123,28 @@ def testit():
expr.set_numexpr_threads()
testit()
+ def test_where(self):
+
+ def testit():
+ for f in [ self.frame, self.frame2, self.mixed, self.mixed2 ]:
+
+
+ for cond in [ True, False ]:
+
+ c = np.empty(f.shape,dtype=np.bool_)
+ c.fill(cond)
+ result = expr.where(c, f.values, f.values+1)
+ expected = np.where(c, f.values, f.values+1)
+ assert_array_equal(result,expected)
+
+ expr.set_use_numexpr(False)
+ testit()
+ expr.set_use_numexpr(True)
+ expr.set_numexpr_threads(1)
+ testit()
+ expr.set_numexpr_threads()
+ testit()
+
if __name__ == '__main__':
# unittest.main()
import nose
| Added support for numexpr to do where operations, so this will have an
effect on most boolean indexing operations themselves
(inital commit would speedup ops like `df>0`)
this PR is in effect speeding up `np.where`
`df[(df>0)&(df2>0)]` (100k x 100 columns)
`no_ne` is no numexpr at all
`st` is single threaded
This operation is restricted to int64/float64 dtypes
(others would be upcasted, which we could deal with, but
not now)
note: _the above operation could (and should) be much faster
if done as a single operation, but for now this is basically
4 calls to numexpr (3 boolean, then the where), but that's
for another day_
```
----------------------------------------------------------------
Test name | target[ms] | base[ms] | ratio
----------------------------------------------------------------
frame_multi_and 128.5169 385.2890 0.3336
frame_multi_and_st 215.7741 417.0649 0.5174
frame_multi_and_no_ne 397.3501 401.0870 0.9907
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3154 | 2013-03-24T02:47:06Z | 2013-03-25T21:24:30Z | 2013-03-25T21:24:30Z | 2014-06-17T09:44:18Z |
DOC: more cookbook examples | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 3fc0b5751e8d7..76093d83b32e7 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -17,7 +17,6 @@
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
-
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.extend([
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index f5b0c5e1feb5d..1d8701368558f 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -127,6 +127,12 @@ Splitting
`Splitting a frame
<http://stackoverflow.com/questions/13353233/best-way-to-split-a-dataframe-given-an-edge/15449992#15449992>`__
+Pivot
+~~~~~
+
+`Partial sums and subtotals
+<http://stackoverflow.com/questions/15570099/pandas-pivot-tables-row-subtotals/15574875#15574875>`__
+
Timeseries
----------
@@ -203,6 +209,9 @@ CSV
The :ref:`CSV <io.read_csv_table>` docs
+`read_csv in action
+<http://wesmckinney.com/blog/?p=635>`__
+
`Reading a csv chunk-by-chunk
<http://stackoverflow.com/questions/11622652/large-persistent-dataframe-in-pandas/12193309#12193309>`__
@@ -212,6 +221,9 @@ The :ref:`CSV <io.read_csv_table>` docs
`Inferring dtypes from a file
<http://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize>`__
+`Dealing with bad lines
+<https://github.com/pydata/pandas/issues/2886>`__
+
.. _cookbook.sql:
SQL
@@ -222,6 +234,16 @@ The :ref:`SQL <io.sql>` docs
`Reading from databases with SQL
<http://stackoverflow.com/questions/10065051/python-pandas-and-databases-like-mysql>`__
+.. _cookbook.excel:
+
+Excel
+~~~~~
+
+The :ref:`Excel <io.excel>` docs
+
+`Reading from a filelike handle
+<http://stackoverflow.com/questions/15588713/sheets-of-excel-workbook-from-a-url-into-a-pandas-dataframe>`__
+
.. _cookbook.hdf:
HDFStore
diff --git a/doc/source/io.rst b/doc/source/io.rst
index e936abd89b8d8..a244b9c545d88 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -919,6 +919,8 @@ The ``ExcelFile`` class can read an Excel 2003 file using the ``xlrd`` Python
module and use the same parsing code as the above to convert tabular data into
a DataFrame. To use it, create the ``ExcelFile`` object:
+See some :ref:`cookbook examples <cookbook.excel>` for some advanced strategies
+
.. code-block:: python
xls = ExcelFile('path_to_file.xls')
| https://api.github.com/repos/pandas-dev/pandas/pulls/3153 | 2013-03-24T02:05:50Z | 2013-03-24T02:05:57Z | 2013-03-24T02:05:57Z | 2013-03-24T02:05:57Z | |
BUG: GH2763 fixed downcasting of groupby results on SeriesGroupBy | diff --git a/RELEASE.rst b/RELEASE.rst
index eb2d30ca3e448..009bcb8c5d5d1 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -198,7 +198,7 @@ pandas 0.11.0
an irrecoverable state (GH3010_)
- Bug in DataFrame update, combine_first where non-specified values could cause
dtype changes (GH3016_, GH3041_)
- - Bug in groupby with first/last where dtypes could change (GH3041_)
+ - Bug in groupby with first/last where dtypes could change (GH3041_, GH2763_)
- Formatting of an index that has ``nan`` was inconsistent or wrong (would fill from
other values), (GH2850_)
- Unstack of a frame with no nans would always cause dtype upcasting (GH2929_)
@@ -251,6 +251,7 @@ pandas 0.11.0
.. _GH2746: https://github.com/pydata/pandas/issues/2746
.. _GH2747: https://github.com/pydata/pandas/issues/2747
.. _GH2751: https://github.com/pydata/pandas/issues/2751
+.. _GH2763: https://github.com/pydata/pandas/issues/2763
.. _GH2776: https://github.com/pydata/pandas/issues/2776
.. _GH2778: https://github.com/pydata/pandas/issues/2778
.. _GH2787: https://github.com/pydata/pandas/issues/2787
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 0eb64834fe1aa..053deaa550b06 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -13,6 +13,7 @@
from pandas.util.compat import OrderedDict
import pandas.core.algorithms as algos
import pandas.core.common as com
+from pandas.core.common import _possibly_downcast_to_dtype
import pandas.lib as lib
import pandas.algos as _algos
@@ -440,14 +441,7 @@ def _try_cast(self, result, obj):
# need to respect a non-number here (e.g. Decimal)
if len(result) and issubclass(type(result[0]),(np.number,float,int)):
- if issubclass(dtype.type, (np.integer, np.bool_)):
-
- # castable back to an int/bool as we don't have nans
- if com.notnull(result).all():
- result = result.astype(dtype)
- else:
-
- result = result.astype(dtype)
+ result = _possibly_downcast_to_dtype(result, dtype)
elif issubclass(dtype.type, np.datetime64):
if is_datetime64_dtype(obj.dtype):
@@ -468,7 +462,7 @@ def _cython_agg_general(self, how, numeric_only=True):
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
- output[name] = result
+ output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index d609080b833bd..9e623de5483ab 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -91,48 +91,51 @@ def setUp(self):
'F': np.random.randn(11)})
def test_basic(self):
- data = Series(np.arange(9) // 3, index=np.arange(9))
- index = np.arange(9)
- np.random.shuffle(index)
- data = data.reindex(index)
+ def checkit(dtype):
+ data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
- grouped = data.groupby(lambda x: x // 3)
+ index = np.arange(9)
+ np.random.shuffle(index)
+ data = data.reindex(index)
- for k, v in grouped:
- self.assertEqual(len(v), 3)
+ grouped = data.groupby(lambda x: x // 3)
- agged = grouped.aggregate(np.mean)
- self.assertEqual(agged[1], 1)
+ for k, v in grouped:
+ self.assertEqual(len(v), 3)
- assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
- assert_series_equal(agged, grouped.mean())
+ agged = grouped.aggregate(np.mean)
+ self.assertEqual(agged[1], 1)
- # Cython only returning floating point for now...
- assert_series_equal(grouped.agg(np.sum).astype(float),
- grouped.sum())
+ assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
+ assert_series_equal(agged, grouped.mean())
+ assert_series_equal(grouped.agg(np.sum),grouped.sum())
- transformed = grouped.transform(lambda x: x * x.sum())
- self.assertEqual(transformed[7], 12)
+ transformed = grouped.transform(lambda x: x * x.sum())
+ self.assertEqual(transformed[7], 12)
- value_grouped = data.groupby(data)
- assert_series_equal(value_grouped.aggregate(np.mean), agged)
+ value_grouped = data.groupby(data)
+ assert_series_equal(value_grouped.aggregate(np.mean), agged)
- # complex agg
- agged = grouped.aggregate([np.mean, np.std])
- agged = grouped.aggregate({'one': np.mean,
- 'two': np.std})
+ # complex agg
+ agged = grouped.aggregate([np.mean, np.std])
+ agged = grouped.aggregate({'one': np.mean,
+ 'two': np.std})
+
+ group_constants = {
+ 0: 10,
+ 1: 20,
+ 2: 30
+ }
+ agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
+ self.assertEqual(agged[1], 21)
- group_constants = {
- 0: 10,
- 1: 20,
- 2: 30
- }
- agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
- self.assertEqual(agged[1], 21)
+ # corner cases
+ self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
- # corner cases
- self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
+
+ for dtype in ['int64','int32','float64','float32']:
+ checkit(dtype)
def test_first_last_nth(self):
# tests for first / last / nth
@@ -185,6 +188,14 @@ def test_first_last_nth_dtypes(self):
expected.index = ['bar', 'foo']
assert_frame_equal(nth, expected, check_names=False)
+ # GH 2763, first/last shifting dtypes
+ idx = range(10)
+ idx.append(9)
+ s = Series(data=range(11), index=idx, name='IntCol')
+ self.assert_(s.dtype == 'int64')
+ f = s.groupby(level=0).first()
+ self.assert_(f.dtype == 'int64')
+
def test_grouper_iter(self):
self.assertEqual(sorted(self.df.groupby('A').grouper), ['bar', 'foo'])
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index c7d4f50649c34..f1594b154f2cc 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -71,9 +71,9 @@ def test_custom_grouper(self):
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
- # cython returns float for now
+ # GH2763 - return in put dtype if we can
result = g.agg(np.sum)
- assert_series_equal(result, expect.astype(float))
+ assert_series_equal(result, expect)
data = np.random.rand(len(dti), 10)
df = DataFrame(data, index=dti)
| closes #2763
e.g. was returning float64 on int64 input when possible to preserve
in first/last
more general cases were fixed in prior PRs (e.g. #3041)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3152 | 2013-03-23T22:09:03Z | 2013-03-24T03:07:37Z | 2013-03-24T03:07:37Z | 2014-06-19T17:29:39Z |
BUG: GH3094, timedelta64 failing on numpy 1.7.0 (on 2.7) | diff --git a/RELEASE.rst b/RELEASE.rst
index 14619cbed110a..eb2d30ca3e448 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -179,7 +179,7 @@ pandas 0.11.0
- Series ops with a Timestamp on the rhs was throwing an exception (GH2898_)
added tests for Series ops with datetimes,timedeltas,Timestamps, and datelike
Series on both lhs and rhs
- - Fixed subtle timedelta64 inference issue on py3
+ - Fixed subtle timedelta64 inference issue on py3 & numpy 1.7.0 (GH3094_)
- Fixed some formatting issues on timedelta when negative
- Support null checking on timedelta64, representing (and formatting) with NaT
- Support setitem with np.nan value, converts to NaT
@@ -293,6 +293,7 @@ pandas 0.11.0
.. _GH3115: https://github.com/pydata/pandas/issues/3115
.. _GH3070: https://github.com/pydata/pandas/issues/3070
.. _GH3075: https://github.com/pydata/pandas/issues/3075
+.. _GH3094: https://github.com/pydata/pandas/issues/3094
.. _GH3130: https://github.com/pydata/pandas/issues/3130
pandas 0.10.1
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6efa4026641c0..35448d26e4fd5 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -104,6 +104,11 @@ def convert_to_array(values):
pass
else:
values = com._possibly_cast_to_timedelta(values)
+ elif inferred_type in set(['integer']):
+ if values.dtype == 'timedelta64[ns]':
+ pass
+ elif values.dtype.kind == 'm':
+ values = values.astype('timedelta64[ns]')
else:
values = pa.array(values)
return values
| closes #3094
as np.array(timedelta64[ns] series) converts to int64 dtype for some weird reason
| https://api.github.com/repos/pandas-dev/pandas/pulls/3148 | 2013-03-23T17:36:49Z | 2013-03-23T17:37:43Z | 2013-03-23T17:37:43Z | 2014-08-16T17:44:41Z |
DOC: Added error_bad_lines to the summary options for csv | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 64b46b1a3480f..e936abd89b8d8 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -108,6 +108,7 @@ See some :ref:`cookbook examples <cookbook.csv>` for some advanced strategies
unicode data, e.g. ``'utf-8``` or ``'latin-1'``.
- ``verbose``: show number of NA values inserted in non-numeric columns
- ``squeeze``: if True then output with only one column is turned into Series
+ - ``error_bad_lines``: if False then any lines causing an error will be skipped :ref:`bad lines <io.bad_lines>`
.. ipython:: python
:suppress:
| Added a link to the reference example later in the document
| https://api.github.com/repos/pandas-dev/pandas/pulls/3147 | 2013-03-23T17:18:27Z | 2013-03-23T18:21:07Z | 2013-03-23T18:21:07Z | 2013-03-23T18:21:12Z |
PERF: GH2121 groupby transform | diff --git a/RELEASE.rst b/RELEASE.rst
index 45477610cabb2..4cd47ae384359 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -100,9 +100,6 @@ pandas 0.11.0
the collections.Mapping ABC.
- Allow selection semantics via a string with a datelike index to work in both
Series and DataFrames (GH3070_)
- - Improved performance across several core functions by taking memory
- ordering of arrays into account. Courtesy of @stephenwlin (GH3130_)
-
.. ipython:: python
@@ -116,6 +113,10 @@ pandas 0.11.0
for plots. Based on https://gist.github.com/huyng/816622 (GH3075_).
+ - Improved performance across several core functions by taking memory
+ ordering of arrays into account. Courtesy of @stephenwlin (GH3130_)
+ - Improved performance of groupby transform method (GH2121_)
+
**API Changes**
- Do not automatically upcast numeric specified dtypes to ``int64`` or
@@ -234,6 +235,7 @@ pandas 0.11.0
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
.. _GH2758: https://github.com/pydata/pandas/issues/2758
+.. _GH2121: https://github.com/pydata/pandas/issues/2121
.. _GH2809: https://github.com/pydata/pandas/issues/2809
.. _GH2810: https://github.com/pydata/pandas/issues/2810
.. _GH2837: https://github.com/pydata/pandas/issues/2837
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 053deaa550b06..cb0a03d306c53 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -13,7 +13,7 @@
from pandas.util.compat import OrderedDict
import pandas.core.algorithms as algos
import pandas.core.common as com
-from pandas.core.common import _possibly_downcast_to_dtype
+from pandas.core.common import _possibly_downcast_to_dtype, notnull
import pandas.lib as lib
import pandas.algos as _algos
@@ -75,7 +75,7 @@ def f(self):
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
- x = x[com.notnull(x)]
+ x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
@@ -89,7 +89,7 @@ def _first(x):
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
- x = x[com.notnull(x)]
+ x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
@@ -421,7 +421,7 @@ def ohlc(self):
def nth(self, n):
def picker(arr):
- arr = arr[com.notnull(arr)]
+ arr = arr[notnull(arr)]
if len(arr) >= n + 1:
return arr.iget(n)
else:
@@ -1897,19 +1897,46 @@ def transform(self, func, *args, **kwargs):
gen = self.grouper.get_iterator(obj, axis=self.axis)
if isinstance(func, basestring):
- wrapper = lambda x: getattr(x, func)(*args, **kwargs)
+ fast_path = lambda group: getattr(group, func)(*args, **kwargs)
+ slow_path = lambda group: group.apply(lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
- wrapper = lambda x: func(x, *args, **kwargs)
+ fast_path = lambda group: func(group, *args, **kwargs)
+ slow_path = lambda group: group.apply(lambda x: func(x, *args, **kwargs), axis=self.axis)
+ path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
- try:
- res = group.apply(wrapper, axis=self.axis)
- except TypeError:
- return self._transform_item_by_item(obj, wrapper)
- except Exception: # pragma: no cover
- res = wrapper(group)
+ # decide on a fast path
+ if path is None:
+
+ path = slow_path
+ try:
+ res = slow_path(group)
+
+ # if we make it here, test if we can use the fast path
+ try:
+ res_fast = fast_path(group)
+
+ # compare that we get the same results
+ if res.shape == res_fast.shape:
+ res_r = res.values.ravel()
+ res_fast_r = res_fast.values.ravel()
+ mask = notnull(res_r)
+ if (res_r[mask] == res_fast_r[mask]).all():
+ path = fast_path
+
+ except:
+ pass
+ except TypeError:
+ return self._transform_item_by_item(obj, fast_path)
+ except Exception: # pragma: no cover
+ res = fast_path(group)
+ path = fast_path
+
+ else:
+
+ res = path(group)
# broadcasting
if isinstance(res, Series):
@@ -1925,7 +1952,8 @@ def transform(self, func, *args, **kwargs):
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
- return concatenated.reindex_like(obj)
+ concatenated.sort_index(inplace=True)
+ return concatenated
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py
index caa09c219a866..f9f221ae752b5 100644
--- a/vb_suite/groupby.py
+++ b/vb_suite/groupby.py
@@ -273,3 +273,41 @@ def f(g):
"""
groupby_sum_booleans = Benchmark("df.groupby('ii').sum()", setup)
+
+#----------------------------------------------------------------------
+# Transform testing
+
+setup = common_setup + """
+n_dates = 1000
+n_securities = 500
+n_columns = 3
+share_na = 0.1
+
+dates = date_range('1997-12-31', periods=n_dates, freq='B')
+dates = Index(map(lambda x: x.year * 10000 + x.month * 100 + x.day, dates))
+
+secid_min = int('10000000', 16)
+secid_max = int('F0000000', 16)
+step = (secid_max - secid_min) // (n_securities - 1)
+security_ids = map(lambda x: hex(x)[2:10].upper(), range(secid_min, secid_max + 1, step))
+
+data_index = MultiIndex(levels=[dates.values, security_ids],
+ labels=[[i for i in xrange(n_dates) for _ in xrange(n_securities)], range(n_securities) * n_dates],
+ names=['date', 'security_id'])
+n_data = len(data_index)
+
+columns = Index(['factor{}'.format(i) for i in xrange(1, n_columns + 1)])
+
+data = DataFrame(np.random.randn(n_data, n_columns), index=data_index, columns=columns)
+
+step = int(n_data * share_na)
+for column_index in xrange(n_columns):
+ index = column_index
+ while index < n_data:
+ data.set_value(data_index[index], columns[column_index], np.nan)
+ index += step
+
+f_fillna = lambda x: x.fillna(method='pad')
+"""
+
+groupby_transform = Benchmark("data.groupby(level='security_id').transform(f_fillna)", setup)
| closes #2121
Two items were causing slowness
1) using apply for each group (which in this case is equivalent to directly calling
the function on the group). the function that is testing is `fillna` which we have defined as a function of a data frame so its ok here to use the direct function call
I create a slow_path/fast_path with the first group determining the path, not sure this is super robust, but it is a significant source of slowness
2) at the end of the groupby the concetated object has a reindex_like, this is way slow, replacing by sort_index is much faster (this is a multi-index), again not of the robustness, but all tests pass
This is a comparision of the bench/bench_transform.py (supplied in #2121)
The apply_by_group DOES include the sort_index (which is necessary for correctness)
```
In [2]: %timeit apply_by_group(grouped, f_fillna)
1 loops, best of 3: 2.11 s per loop
In [3]: %timeit grouped.transform(f_fillna)
1 loops, best of 3: 2.14 s per loop
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3145 | 2013-03-23T15:58:53Z | 2013-03-25T21:24:56Z | 2013-03-25T21:24:56Z | 2014-06-13T02:17:51Z |
TST: putmasking changing dtype when not necessary | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 586f36cbd0b0f..28f3a19ab5298 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -754,17 +754,36 @@ def _maybe_upcast_putmask(result, mask, other, dtype=None, change=None):
if mask.any():
def changeit():
+
# our type is wrong here, need to upcast
- if (-mask).any():
- r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True)
- np.putmask(r, mask, other)
+ r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True)
+ np.putmask(r, mask, other)
+
+ # we need to actually change the dtype here
+ if change is not None:
+ change.dtype = r.dtype
+ change[:] = r
- # we need to actually change the dtype here
- if change is not None:
- change.dtype = r.dtype
- change[:] = r
+ return r, True
+
+ # we want to decide whether putmask will work
+ # if we have nans in the False portion of our mask then we need to upcast (possibily)
+ # otherwise we DON't want to upcast (e.g. if we are have values, say integers in
+ # the success portion then its ok to not upcast)
+ new_dtype, fill_value = _maybe_promote(result.dtype,other)
+ if new_dtype != result.dtype:
+
+ # we have a scalar or len 0 ndarray
+ # and its nan and we are changing some values
+ if np.isscalar(other) or (isinstance(other,np.ndarray) and other.ndim < 1):
+ if isnull(other):
+ return changeit()
+
+ # we have an ndarray and the masking has nans in it
+ else:
- return r, True
+ if isnull(other[mask]).any():
+ return changeit()
try:
np.putmask(result, mask, other)
@@ -811,9 +830,9 @@ def _possibly_downcast_to_dtype(result, dtype):
return result
try:
- if dtype == np.float_:
+ if issubclass(dtype.type,np.floating):
return result.astype(dtype)
- elif dtype == np.bool_ or dtype == np.int_:
+ elif dtype == np.bool_ or issubclass(dtype.type,np.integer):
if issubclass(result.dtype.type, np.number) and notnull(result).all():
new_result = result.astype(dtype)
if (new_result == result).all():
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 467fe46e81efd..0d65577546f8b 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -107,7 +107,7 @@ def __new__(cls, data, dtype=None, copy=False, name=None):
return PeriodIndex(data, copy=copy, name=name)
if issubclass(data.dtype.type, np.integer):
- return Int64Index(data, copy=copy, name=name)
+ return Int64Index(data, copy=copy, dtype=dtype, name=name)
subarr = com._ensure_object(data)
elif np.isscalar(data):
@@ -1296,7 +1296,12 @@ def __new__(cls, data, dtype=None, copy=False, name=None):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to int')
elif issubclass(data.dtype.type, np.integer):
- subarr = np.array(data, dtype=np.int64, copy=copy)
+ # don't force the upcast as we may be dealing
+ # with a platform int
+ if dtype is None or not issubclass(np.dtype(dtype).type, np.integer):
+ dtype = np.int64
+
+ subarr = np.array(data, dtype=dtype, copy=copy)
else:
subarr = np.array(data, dtype=np.int64, copy=copy)
if len(data) > 0:
@@ -1316,10 +1321,6 @@ def inferred_type(self):
def _constructor(self):
return Int64Index
- @cache_readonly
- def dtype(self):
- return np.dtype('int64')
-
@property
def is_all_dates(self):
"""
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 7869d2627d581..89f6859a39bb0 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -253,6 +253,26 @@ def test_ensure_int32():
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
+def test_ensure_platform_int():
+
+ # verify that when we create certain types of indices
+ # they remain the correct type under platform conversions
+ from pandas.core.index import Int64Index
+
+ # int64
+ x = Int64Index([1, 2, 3], dtype='int64')
+ assert(x.dtype == np.int64)
+
+ pi = com._ensure_platform_int(x)
+ assert(pi.dtype == np.int_)
+
+ # int32
+ x = Int64Index([1, 2, 3], dtype='int32')
+ assert(x.dtype == np.int32)
+
+ pi = com._ensure_platform_int(x)
+ assert(pi.dtype == np.int_)
+
# TODO: fix this broken test
# def test_console_encode():
| https://api.github.com/repos/pandas-dev/pandas/pulls/3144 | 2013-03-23T13:21:08Z | 2013-03-23T14:52:14Z | 2013-03-23T14:52:14Z | 2013-03-23T14:52:14Z | |
TST: test for GH2623, object in frame upconverting a datetime64[ns] in another column | diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 1c3c1da8a80d7..313e4a96798d1 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3387,6 +3387,22 @@ def test_from_records_misc_brokenness(self):
exp = DataFrame(data, index=['a', 'b', 'c'])
assert_frame_equal(result, exp)
+
+ # GH 2623
+ rows = []
+ rows.append([datetime(2010, 1, 1), 1])
+ rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
+ df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
+ results = df2_obj.get_dtype_counts()
+ expected = Series({ 'datetime64[ns]' : 1, 'object' : 1 })
+
+ rows = []
+ rows.append([datetime(2010, 1, 1), 1])
+ rows.append([datetime(2010, 1, 2), 1])
+ df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
+ results = df2_obj.get_dtype_counts()
+ expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })
+
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
| this is tests only at #2623 is covered by other PRS
| https://api.github.com/repos/pandas-dev/pandas/pulls/3142 | 2013-03-22T22:35:04Z | 2013-03-22T22:43:27Z | 2013-03-22T22:43:27Z | 2013-03-23T01:54:17Z |
DOC: cross reference cookbook and docs | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index e38bb52ffce15..bc077caeb10fa 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -600,6 +600,8 @@ the quarter end:
Plotting
--------
+:ref:`Plotting <visualization>` docs.
+
.. ipython:: python
:suppress:
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 3bc80a36f5561..34411e79767a3 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -25,8 +25,13 @@ We encourage users to add to this documentation.
This is a great *First Pull Request* (to add interesting links and/or put short code inline
for existing links)
+.. _cookbook.selection:
+
Selection
---------
+
+The :ref:`indexing <indexing>` docs.
+
`Boolean Rows Indexing
<http://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing>`__
@@ -42,9 +47,13 @@ Selection
`Selecting via the complement
<http://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas>`__
+.. _cookbook.multi_index:
+
MultiIndexing
-------------
+The :ref:`multindexing <indexing.hierarchical>` docs.
+
`Creating a multi-index from a labeled frame
<http://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels>`__
@@ -75,9 +84,13 @@ Levels
`Flatten Hierarchical columns
<http://stackoverflow.com/questions/14507794/python-pandas-how-to-flatten-a-hierarchical-index-in-columns>`__
+.. _cookbook.grouping:
+
Grouping
--------
+The :ref:`grouping <groupby>` docs.
+
`Basic grouping with apply
<http://stackoverflow.com/questions/15322632/python-pandas-df-groupy-agg-column-reference-in-agg>`__
@@ -123,9 +136,13 @@ Timeseries
`Vectorized Lookup
<http://stackoverflow.com/questions/13893227/vectorized-look-up-of-values-in-pandas-dataframe>`__
+.. _cookbook.resample:
+
Resampling
~~~~~~~~~~
+The :ref:`Resample <timeseries.resampling>` docs.
+
`TimeGrouping of values grouped across time
<http://stackoverflow.com/questions/15297053/how-can-i-divide-single-values-of-a-dataframe-by-monthly-averages>`__
@@ -141,9 +158,13 @@ Resampling
`Resample minute data
<http://stackoverflow.com/questions/14861023/resampling-minute-data>`__
+.. _cookbook.merge:
+
Merge
-----
+The :ref:`Concat <merging.concatenation>` docs. The :ref:`Join <merging.join>` docs.
+
`emulate R rbind
<http://stackoverflow.com/questions/14988480/pandas-version-of-rbind>`__
@@ -153,9 +174,13 @@ Merge
`How to set the index and join
<http://stackoverflow.com/questions/14341805/pandas-merge-pd-merge-how-to-set-the-index-and-join>`__
+.. _cookbook.plotting:
+
Plotting
--------
+The :ref:`Plotting <visualization>` docs.
+
`Make Matplotlib look like R
<http://stackoverflow.com/questions/14349055/making-matplotlib-graphs-look-like-r-by-default>`__
@@ -165,9 +190,13 @@ Plotting
Data In/Out
-----------
+.. _cookbook.csv:
+
CSV
~~~
+The :ref:`CSV <io.read_csv_table>` docs
+
`Reading a csv chunk-by-chunk
<http://stackoverflow.com/questions/11622652/large-persistent-dataframe-in-pandas/12193309#12193309>`__
@@ -177,15 +206,23 @@ CSV
`Inferring dtypes from a file
<http://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize>`__
+.. _cookbook.sql:
+
SQL
~~~
+The :ref:`SQL <io.sql>` docs
+
`Reading from databases with SQL
<http://stackoverflow.com/questions/10065051/python-pandas-and-databases-like-mysql>`__
+.. _cookbook.hdf:
+
HDFStore
~~~~~~~~
+The :ref:`HDFStores <io.hdf5>` docs
+
`Simple Queries with a Timestamp Index
<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__
@@ -222,5 +259,7 @@ Storing Attributes to a group node
Miscellaneous
-------------
+The :ref:`Timedeltas <timeseries.timedeltas>` docs
+
`Operating with timedeltas
<https://github.com/pydata/pandas/pull/2899>`__
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index 141d3f9f6b4da..107200a82903a 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -59,6 +59,8 @@ We aim to make operations like this natural and easy to express using
pandas. We'll address each area of GroupBy functionality then provide some
non-trivial examples / use cases.
+See some :ref:`cookbook examples <cookbook.grouping>` for some advanced strategies
+
.. _groupby.split:
Splitting an object into groups
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 392768a21586b..544d2e44ba435 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -32,6 +32,8 @@ attention in this area. Expect more work to be invested higher-dimensional data
structures (including Panel) in the future, especially in label-based advanced
indexing.
+See some :ref:`cookbook examples <cookbook.selection>` for some advanced strategies
+
Choice
------
@@ -879,6 +881,8 @@ described above and in prior sections. Later, when discussing :ref:`group by
non-trivial applications to illustrate how it aids in structuring data for
analysis.
+See some :ref:`cookbook examples <cookbook.multi_index>` for some advanced strategies
+
.. note::
Given that hierarchical indexing is so new to the library, it is definitely
diff --git a/doc/source/io.rst b/doc/source/io.rst
index c30b64d9ae07a..64b46b1a3480f 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -37,6 +37,9 @@ The two workhorse functions for reading text files (a.k.a. flat files) are
They both use the same parsing code to intelligently convert tabular
data into a DataFrame object. They can take a number of arguments:
+See some :ref:`cookbook examples <cookbook.csv>` for some advanced strategies
+See some :ref:`cookbook examples <cookbook.csv>` for some advanced strategies
+
- ``filepath_or_buffer``: Either a string path to a file, or any object with a
``read`` method (such as an open file or ``StringIO``).
- ``sep`` or ``delimiter``: A delimiter / separator to split fields
@@ -981,6 +984,8 @@ HDF5 (PyTables)
the high performance HDF5 format using the excellent `PyTables
<http://www.pytables.org/>`__ library.
+See some :ref:`cookbook examples <cookbook.hdf>` for some advanced strategies
+
.. ipython:: python
:suppress:
:okexcept:
@@ -1690,6 +1695,8 @@ facilitate data retrieval and to reduce dependency on DB-specific API. There
wrappers only support the Python database adapters which respect the `Python
DB-API <http://www.python.org/dev/peps/pep-0249/>`_.
+See some :ref:`cookbook examples <cookbook.sql>` for some advanced strategies
+
Suppose you want to query some data with different types from a table such as:
+-----+------------+-------+-------+-------+
diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index b80947695bebd..1495e9218ac41 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -304,6 +304,8 @@ better) than other open source implementations (like ``base::merge.data.frame``
in R). The reason for this is careful algorithmic design and internal layout of
the data in DataFrame.
+See some :ref:`cookbook examples <cookbook.merge>` for some advanced strategies
+
pandas provides a single function, ``merge``, as the entry point for all
standard database join operations between DataFrame objects:
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 1c1a0680e1f28..266216d99206d 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -579,6 +579,8 @@ performing resampling operations during frequency conversion (e.g., converting
secondly data into 5-minutely data). This is extremely common in, but not
limited to, financial applications.
+See some :ref:`cookbook examples <cookbook.resample>` for some advanced strategies
+
.. ipython:: python
rng = date_range('1/1/2012', periods=100, freq='S')
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index ae6fd7b742b6d..97e7661cd1152 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -33,6 +33,8 @@ We use the standard convention for referencing the matplotlib API:
Basic plotting: ``plot``
------------------------
+See some :ref:`cookbook examples <cookbook.plotting>` for some advanced strategies
+
The ``plot`` method on Series and DataFrame is just a simple wrapper around
``plt.plot``:
| https://api.github.com/repos/pandas-dev/pandas/pulls/3140 | 2013-03-22T19:15:11Z | 2013-03-22T19:15:22Z | 2013-03-22T19:15:22Z | 2013-03-22T19:15:22Z | |
BUG: GH2745 Fix issue with indexing a series with a boolean key a 1-len list on rhs | diff --git a/RELEASE.rst b/RELEASE.rst
index 3927d1e7b0122..2b1708855db49 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -220,6 +220,7 @@ pandas 0.11.0
to an *ordered* timeseries (GH2437_).
- Fix implemented ``.xs`` when called with ``axes=1`` and a level parameter (GH2903_)
- Timestamp now supports the class method fromordinal similar to datetimes (GH3042_)
+ - Fix issue with indexing a series with a boolean key and specifiying a 1-len list on the rhs (GH2745_)
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
@@ -240,6 +241,7 @@ pandas 0.11.0
.. _GH3011: https://github.com/pydata/pandas/issues/3011
.. _GH2681: https://github.com/pydata/pandas/issues/2681
.. _GH2719: https://github.com/pydata/pandas/issues/2719
+.. _GH2745: https://github.com/pydata/pandas/issues/2745
.. _GH2746: https://github.com/pydata/pandas/issues/2746
.. _GH2747: https://github.com/pydata/pandas/issues/2747
.. _GH2751: https://github.com/pydata/pandas/issues/2751
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 332e38012fc25..586f36cbd0b0f 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -766,10 +766,6 @@ def changeit():
return r, True
- new_dtype, fill_value = _maybe_promote(result.dtype,other)
- if new_dtype != result.dtype:
- return changeit()
-
try:
np.putmask(result, mask, other)
except:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 53793d503403e..6efa4026641c0 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -729,11 +729,19 @@ def where(self, cond, other=nan, inplace=False):
if isinstance(other, Series):
other = other.reindex(ser.index)
+ elif isinstance(other, (tuple,list)):
+ other = np.array(other)
if len(other) != len(ser):
- raise ValueError('Length of replacements must equal series length')
+
+ # GH 2745
+ # treat like a scalar
+ if len(other) == 1:
+ other = np.array(other[0]*len(ser))
+ else:
+ raise ValueError('Length of replacements must equal series length')
change = ser if inplace else None
- result, changed = com._maybe_upcast_putmask(ser,~cond,other,change=change)
+ com._maybe_upcast_putmask(ser,~cond,other,change=change)
return None if inplace else ser
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index fbbb48966f754..cc69649f24cdf 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1087,6 +1087,21 @@ def test_where(self):
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
self.assertRaises(ValueError, s.where, cond, s[:3].values)
+ # GH 2745
+ s = Series([1,2])
+ s[[True, False]] = [0,1]
+ expected = Series([0,2])
+ assert_series_equal(s,expected)
+
+ s = Series([1,2])
+ s[[True, False]] = [0]
+ expected = Series([0,2])
+ assert_series_equal(s,expected)
+
+ # failures
+ self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]), [0,2,3])
+ self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]), [])
+
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
| closes #2745
| https://api.github.com/repos/pandas-dev/pandas/pulls/3139 | 2013-03-22T18:09:16Z | 2013-03-22T18:53:50Z | 2013-03-22T18:53:50Z | 2014-06-27T14:00:48Z |
BUG: GH3042 Timestamp now supports classmethod fromordinal similar to datetimes | diff --git a/RELEASE.rst b/RELEASE.rst
index 8fe7a33ea0c87..3927d1e7b0122 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -113,7 +113,6 @@ pandas 0.11.0
df = DataFrame(dict(A = ts))
df['2001']
-
**API Changes**
- Do not automatically upcast numeric specified dtypes to ``int64`` or
@@ -220,7 +219,10 @@ pandas 0.11.0
- Allow selection in an *unordered* timeseries to work similary
to an *ordered* timeseries (GH2437_).
- Fix implemented ``.xs`` when called with ``axes=1`` and a level parameter (GH2903_)
+ - Timestamp now supports the class method fromordinal similar to datetimes (GH3042_)
+.. _GH622: https://github.com/pydata/pandas/issues/622
+.. _GH797: https://github.com/pydata/pandas/issues/797
.. _GH2758: https://github.com/pydata/pandas/issues/2758
.. _GH2809: https://github.com/pydata/pandas/issues/2809
.. _GH2810: https://github.com/pydata/pandas/issues/2810
@@ -236,8 +238,6 @@ pandas 0.11.0
.. _GH2807: https://github.com/pydata/pandas/issues/2807
.. _GH2918: https://github.com/pydata/pandas/issues/2918
.. _GH3011: https://github.com/pydata/pandas/issues/3011
-.. _GH622: https://github.com/pydata/pandas/issues/622
-.. _GH797: https://github.com/pydata/pandas/issues/797
.. _GH2681: https://github.com/pydata/pandas/issues/2681
.. _GH2719: https://github.com/pydata/pandas/issues/2719
.. _GH2746: https://github.com/pydata/pandas/issues/2746
@@ -267,19 +267,20 @@ pandas 0.11.0
.. _GH2967: https://github.com/pydata/pandas/issues/2967
.. _GH2982: https://github.com/pydata/pandas/issues/2982
.. _GH2989: https://github.com/pydata/pandas/issues/2989
+.. _GH2993: https://github.com/pydata/pandas/issues/2993
.. _GH3002: https://github.com/pydata/pandas/issues/3002
.. _GH3010: https://github.com/pydata/pandas/issues/3010
.. _GH3012: https://github.com/pydata/pandas/issues/3012
.. _GH3029: https://github.com/pydata/pandas/issues/3029
.. _GH3037: https://github.com/pydata/pandas/issues/3037
.. _GH3041: https://github.com/pydata/pandas/issues/3041
+.. _GH3042: https://github.com/pydata/pandas/issues/3042
.. _GH3053: https://github.com/pydata/pandas/issues/3053
+.. _GH3070: https://github.com/pydata/pandas/issues/3070
.. _GH3076: https://github.com/pydata/pandas/issues/3076
.. _GH3063: https://github.com/pydata/pandas/issues/3063
.. _GH3059: https://github.com/pydata/pandas/issues/3059
-.. _GH2993: https://github.com/pydata/pandas/issues/2993
.. _GH3115: https://github.com/pydata/pandas/issues/3115
-.. _GH3070: https://github.com/pydata/pandas/issues/3070
.. _GH3130: https://github.com/pydata/pandas/issues/3130
pandas 0.10.1
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 43c81934aa182..b9f653bd51899 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -1262,6 +1262,19 @@ def test_timestamp_repr(self):
result = repr(stamp)
self.assert_(iso8601 in result)
+ def test_timestamp_from_ordinal(self):
+
+ # GH 3042
+ dt = datetime(2011, 4, 16, 0, 0)
+ ts = Timestamp.fromordinal(dt.toordinal())
+ self.assert_(ts.to_pydatetime() == dt)
+
+ # with a tzinfo
+ stamp = Timestamp('2011-4-16', tz='US/Eastern')
+ dt_tz = stamp.to_pydatetime()
+ ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
+ self.assert_(ts.to_pydatetime() == dt_tz)
+
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index bb4eff2294ded..324627ce91f46 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -120,6 +120,12 @@ def _is_fixed_offset(tz):
# This serves as the box for datetime64
class Timestamp(_Timestamp):
+ @classmethod
+ def fromordinal(cls, ordinal, offset=None, tz=None):
+ """ passed an ordinal, translate and convert to a ts
+ note: by definition there cannot be any tz info on the ordinal itself """
+ return cls(datetime.fromordinal(ordinal),offset=offset,tz=tz)
+
def __new__(cls, object ts_input, object offset=None, tz=None):
cdef _TSObject ts
cdef _Timestamp ts_base
| closes #3042
| https://api.github.com/repos/pandas-dev/pandas/pulls/3138 | 2013-03-22T16:15:06Z | 2013-03-22T17:37:03Z | 2013-03-22T17:37:03Z | 2014-06-13T01:10:13Z |
ENH: GH3070 allow string selection on a DataFrame with a datelike index, to have partial_string semantics (like Series) | diff --git a/RELEASE.rst b/RELEASE.rst
index 65411b9b69cad..224a9846bf864 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -98,6 +98,17 @@ pandas 0.11.0
histograms. (GH2710_).
- DataFrame.from_records now accepts not only dicts but any instance of
the collections.Mapping ABC.
+ - Allow selection semantics via a string with a datelike index to work in both
+ Series and DataFrames (GH3070_)
+
+ .. ipython:: python
+
+ idx = date_range("2001-10-1", periods=5, freq='M')
+ ts = Series(np.random.rand(len(idx)),index=idx)
+ ts['2001']
+
+ df = DataFrame(dict(A = ts))
+ df['2001']
**API Changes**
@@ -263,6 +274,7 @@ pandas 0.11.0
.. _GH3059: https://github.com/pydata/pandas/issues/3059
.. _GH2993: https://github.com/pydata/pandas/issues/2993
.. _GH3115: https://github.com/pydata/pandas/issues/3115
+.. _GH3070: https://github.com/pydata/pandas/issues/3070
pandas 0.10.1
=============
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 0a55d78dd24c3..3bc80a36f5561 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -105,6 +105,9 @@ Expanding Data
`Alignment and to-date
<http://stackoverflow.com/questions/15489011/python-time-series-alignment-and-to-date-functions>`__
+`Rolling Computation window based on values instead of counts
+<http://stackoverflow.com/questions/14300768/pandas-rolling-computation-with-window-based-on-values-instead-of-counts>`__
+
Splitting
~~~~~~~~~
@@ -171,6 +174,9 @@ CSV
`Reading the first few lines of a frame
<http://stackoverflow.com/questions/15008970/way-to-read-first-few-lines-for-pandas-dataframe>`__
+`Inferring dtypes from a file
+<http://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize>`__
+
SQL
~~~
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt
index f13fb50f1aa3c..87b861a45dbae 100644
--- a/doc/source/v0.11.0.txt
+++ b/doc/source/v0.11.0.txt
@@ -245,6 +245,17 @@ Enhancements
- You can now select timestamps from an *unordered* timeseries similarly to an *ordered* timeseries (GH2437_)
+ - You can now select with a string from a DataFrame with a datelike index, in a similar way to a Series (GH3070_)
+
+ .. ipython:: python
+
+ idx = date_range("2001-10-1", periods=5, freq='M')
+ ts = Series(np.random.rand(len(idx)),index=idx)
+ ts['2001']
+
+ df = DataFrame(dict(A = ts))
+ df['2001']
+
- ``Squeeze`` to possibly remove length 1 dimensions from an object.
.. ipython:: python
@@ -313,3 +324,4 @@ on GitHub for a complete list.
.. _GH3011: https://github.com/pydata/pandas/issues/3011
.. _GH3076: https://github.com/pydata/pandas/issues/3076
.. _GH3059: https://github.com/pydata/pandas/issues/3059
+.. _GH3070: https://github.com/pydata/pandas/issues/3070
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6ef2ad642612c..b47b77fdaeb6c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -28,7 +28,7 @@
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import (_NDFrameIndexer, _maybe_droplevels,
- _is_index_slice, _check_bool_indexer,
+ _convert_to_index_sliceable, _check_bool_indexer,
_maybe_convert_indices)
from pandas.core.internals import BlockManager, make_block, form_blocks
from pandas.core.series import Series, _radd_compat
@@ -1864,10 +1864,13 @@ def iget_value(self, i, j):
return self.iat[i,j]
def __getitem__(self, key):
- if isinstance(key, slice):
- # slice rows
- return self._getitem_slice(key)
- elif isinstance(key, (np.ndarray, list)):
+
+ # see if we can slice the rows
+ indexer = _convert_to_index_sliceable(self, key)
+ if indexer is not None:
+ return self._getitem_slice(indexer)
+
+ if isinstance(key, (np.ndarray, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
@@ -1879,14 +1882,7 @@ def __getitem__(self, key):
return self._get_item_cache(key)
def _getitem_slice(self, key):
- idx_type = self.index.inferred_type
- if idx_type == 'floating':
- indexer = self.ix._convert_to_indexer(key, axis=0)
- elif idx_type == 'integer' or _is_index_slice(key):
- indexer = key
- else:
- indexer = self.ix._convert_to_indexer(key, axis=0)
- return self._slice(indexer, axis=0)
+ return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
@@ -1982,10 +1978,12 @@ def __setattr__(self, name, value):
object.__setattr__(self, name, value)
def __setitem__(self, key, value):
- if isinstance(key, slice):
- # slice rows
- self._setitem_slice(key, value)
- elif isinstance(key, (np.ndarray, list)):
+ # see if we can slice the rows
+ indexer = _convert_to_index_sliceable(self, key)
+ if indexer is not None:
+ return self._setitem_slice(indexer, value)
+
+ if isinstance(key, (np.ndarray, list)):
self._setitem_array(key, value)
elif isinstance(key, DataFrame):
self._setitem_frame(key, value)
@@ -1994,14 +1992,7 @@ def __setitem__(self, key, value):
self._set_item(key, value)
def _setitem_slice(self, key, value):
- idx_type = self.index.inferred_type
- if idx_type == 'floating':
- indexer = self.ix._convert_to_indexer(key, axis=0)
- elif idx_type == 'integer' or _is_index_slice(key):
- indexer = key
- else:
- indexer = self.ix._convert_to_indexer(key, axis=0)
- self.ix._setitem_with_indexer(indexer, value)
+ self.ix._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 16259fd39c0a9..3d4ac12a4efd7 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -827,6 +827,30 @@ def _convert_key(self, key):
_eps = np.finfo('f4').eps
+def _convert_to_index_sliceable(obj, key):
+ """ if we are index sliceable, then return my slicer, otherwise return None """
+ idx = obj.index
+ if isinstance(key, slice):
+ idx_type = idx.inferred_type
+ if idx_type == 'floating':
+ indexer = obj.ix._convert_to_indexer(key, axis=0)
+ elif idx_type == 'integer' or _is_index_slice(key):
+ indexer = key
+ else:
+ indexer = obj.ix._convert_to_indexer(key, axis=0)
+ return indexer
+
+ elif isinstance(key, basestring):
+
+ # we need a timelike key here
+ if idx.is_all_dates:
+ try:
+ return idx._get_string_slice(key)
+ except:
+ return None
+
+ return None
+
def _is_index_slice(obj):
def _is_valid_index(x):
return (com.is_integer(x) or com.is_float(x)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 6155590100452..43c81934aa182 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -196,12 +196,34 @@ def test_indexing_unordered(self):
for t in result.index:
self.assertTrue(t.year == 2005)
+ def test_indexing(self):
+
+ idx = date_range("2001-1-1", periods=20, freq='M')
+ ts = Series(np.random.rand(len(idx)),index=idx)
+
+ # getting
+
+ # GH 3070, make sure semantics work on Series/Frame
+ expected = ts['2001']
+
+ df = DataFrame(dict(A = ts))
+ result = df['2001']['A']
+ assert_series_equal(expected,result)
+
+ # setting
+ ts['2001'] = 1
+ expected = ts['2001']
+
+ df.loc['2001','A'] = 1
+
+ result = df['2001']['A']
+ assert_series_equal(expected,result)
+
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
-
class TestTimeSeries(unittest.TestCase):
_multiprocess_can_split_ = True
| closes #3070
| https://api.github.com/repos/pandas-dev/pandas/pulls/3137 | 2013-03-22T14:44:36Z | 2013-03-22T16:20:31Z | 2013-03-22T16:20:31Z | 2014-07-10T09:20:30Z |
ENH: GH2437 added selection to an unordered timeseries | diff --git a/RELEASE.rst b/RELEASE.rst
index a1bb3a252ec6e..65411b9b69cad 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -203,6 +203,8 @@ pandas 0.11.0
- series.plot(kind='bar') now respects pylab color schem (GH3115_)
- Fixed bug in reshape if not passed correct input, now raises TypeError (GH2719_)
- Fix NameError issue on RESO_US (GH2787_)
+ - Allow selection in an *unordered* timeseries to work similary
+ to an *ordered* timeseries (GH2437_).
.. _GH2758: https://github.com/pydata/pandas/issues/2758
.. _GH2809: https://github.com/pydata/pandas/issues/2809
@@ -229,6 +231,7 @@ pandas 0.11.0
.. _GH2776: https://github.com/pydata/pandas/issues/2776
.. _GH2778: https://github.com/pydata/pandas/issues/2778
.. _GH2787: https://github.com/pydata/pandas/issues/2787
+.. _GH2437: https://github.com/pydata/pandas/issues/2437
.. _GH2793: https://github.com/pydata/pandas/issues/2793
.. _GH2795: https://github.com/pydata/pandas/issues/2795
.. _GH2819: https://github.com/pydata/pandas/issues/2819
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt
index eba37c02c6237..f13fb50f1aa3c 100644
--- a/doc/source/v0.11.0.txt
+++ b/doc/source/v0.11.0.txt
@@ -243,6 +243,8 @@ Enhancements
- In ``HDFStore``, new keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` are
provided to support iteration on ``select`` and ``select_as_multiple`` (GH3076_)
+ - You can now select timestamps from an *unordered* timeseries similarly to an *ordered* timeseries (GH2437_)
+
- ``Squeeze`` to possibly remove length 1 dimensions from an object.
.. ipython:: python
@@ -293,6 +295,7 @@ See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
+.. _GH2437: https://github.com/pydata/pandas/issues/2437
.. _GH2809: https://github.com/pydata/pandas/issues/2809
.. _GH2810: https://github.com/pydata/pandas/issues/2810
.. _GH2837: https://github.com/pydata/pandas/issues/2837
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index c91a1ebd5568f..25c94900d159c 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1042,9 +1042,6 @@ def intersection(self, other):
return self._view_like(left_chunk)
def _partial_date_slice(self, reso, parsed):
- if not self.is_monotonic:
- raise TimeSeriesError('Partial indexing only valid for ordered '
- 'time series.')
if reso == 'year':
t1 = Timestamp(datetime(parsed.year, 1, 1), tz=self.tz)
@@ -1079,11 +1076,19 @@ def _partial_date_slice(self, reso, parsed):
tz=self.tz).value - 1)
else:
raise KeyError
+
stamps = self.asi8
- left = stamps.searchsorted(t1.value, side='left')
- right = stamps.searchsorted(t2.value, side='right')
- return slice(left, right)
+
+ if self.is_monotonic:
+
+ # a monotonic (sorted) series can be sliced
+ left = stamps.searchsorted(t1.value, side='left')
+ right = stamps.searchsorted(t2.value, side='right')
+ return slice(left, right)
+
+ # try to find a the dates
+ return ((stamps>=t1.value) & (stamps<=t2.value)).nonzero()[0]
def _possibly_promote(self, other):
if other.inferred_type == 'date':
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 2ec4fd7ffd67b..6155590100452 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -18,6 +18,7 @@
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.frequencies as fmod
+from pandas.tseries.index import TimeSeriesError
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
@@ -168,6 +169,32 @@ def test_indexing_over_size_cutoff(self):
finally:
_index._SIZE_CUTOFF = old_cutoff
+ def test_indexing_unordered(self):
+
+ # GH 2437
+ from pandas import concat
+ rng = date_range(start='2011-01-01', end='2011-01-15')
+ ts = Series(randn(len(rng)), index=rng)
+ ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
+
+ for t in ts.index:
+ s = str(t)
+ expected = ts[t]
+ result = ts2[t]
+ self.assertTrue(expected == result)
+
+ result = ts2['2011'].sort_index()
+ expected = ts['2011']
+ assert_series_equal(result,expected)
+
+ # diff freq
+ rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
+ ts = Series(np.arange(len(rng)), index=rng)
+ ts = ts.take(np.random.permutation(20))
+
+ result = ts['2005']
+ for t in result.index:
+ self.assertTrue(t.year == 2005)
def assert_range_equal(left, right):
assert(left.equals(right))
@@ -2017,13 +2044,6 @@ def test_partial_slice_minutely(self):
self.assert_(s['2005-1-1 23:59:00'] == s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
- def test_partial_not_monotonic(self):
- rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
- ts = Series(np.arange(len(rng)), index=rng)
- ts = ts.take(np.random.permutation(20))
-
- self.assertRaises(Exception, ts.__getitem__, '2005')
-
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
| closes #2437
| https://api.github.com/repos/pandas-dev/pandas/pulls/3136 | 2013-03-22T14:42:10Z | 2013-03-22T14:57:52Z | 2013-03-22T14:57:52Z | 2014-06-16T01:24:35Z |
BUG: fix NameError issue in GH2787 | diff --git a/RELEASE.rst b/RELEASE.rst
index c1fa30e23bc5a..a1bb3a252ec6e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -202,6 +202,7 @@ pandas 0.11.0
- Fixed bug in Timestamp(d,tz=foo) when d is date() rather then datetime() (GH2993_)
- series.plot(kind='bar') now respects pylab color schem (GH3115_)
- Fixed bug in reshape if not passed correct input, now raises TypeError (GH2719_)
+ - Fix NameError issue on RESO_US (GH2787_)
.. _GH2758: https://github.com/pydata/pandas/issues/2758
.. _GH2809: https://github.com/pydata/pandas/issues/2809
@@ -227,6 +228,7 @@ pandas 0.11.0
.. _GH2751: https://github.com/pydata/pandas/issues/2751
.. _GH2776: https://github.com/pydata/pandas/issues/2776
.. _GH2778: https://github.com/pydata/pandas/issues/2778
+.. _GH2787: https://github.com/pydata/pandas/issues/2787
.. _GH2793: https://github.com/pydata/pandas/issues/2793
.. _GH2795: https://github.com/pydata/pandas/issues/2795
.. _GH2819: https://github.com/pydata/pandas/issues/2819
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 3bf29af8581a9..a43c80bf22158 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -34,11 +34,11 @@ class Resolution(object):
@classmethod
def get_str(cls, reso):
- return {RESO_US: 'microsecond',
- RESO_SEC: 'second',
- RESO_MIN: 'minute',
- RESO_HR: 'hour',
- RESO_DAY: 'day'}.get(reso, 'day')
+ return {cls.RESO_US: 'microsecond',
+ cls.RESO_SEC: 'second',
+ cls.RESO_MIN: 'minute',
+ cls.RESO_HR: 'hour',
+ cls.RESO_DAY: 'day'}.get(reso, 'day')
def get_reso_string(reso):
| closes #2787
| https://api.github.com/repos/pandas-dev/pandas/pulls/3135 | 2013-03-22T14:33:51Z | 2013-03-22T14:39:18Z | 2013-03-22T14:39:18Z | 2014-07-07T01:00:55Z |
DOC: GH3105 better error message on ndarray construction | diff --git a/RELEASE.rst b/RELEASE.rst
index d71fce70dd5d8..45477610cabb2 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -147,6 +147,8 @@ pandas 0.11.0
- arguments to DataFrame.clip were inconsistent to numpy and Series clipping
(GH2747_)
- util.testing.assert_frame_equal now checks the column and index names (GH2964_)
+ - Constructors will now return a more informative ValueError on failures
+ when invalid shapes are passed
**Bug Fixes**
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index afb698221c48b..b1241d7fc12a4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -30,7 +30,9 @@
from pandas.core.indexing import (_NDFrameIndexer, _maybe_droplevels,
_convert_to_index_sliceable, _check_bool_indexer,
_maybe_convert_indices)
-from pandas.core.internals import BlockManager, make_block, form_blocks
+from pandas.core.internals import (BlockManager,
+ create_block_manager_from_arrays,
+ create_block_manager_from_blocks)
from pandas.core.series import Series, _radd_compat
import pandas.core.expressions as expressions
from pandas.compat.scipy import scoreatpercentile as _quantile
@@ -553,9 +555,8 @@ def _init_ndarray(self, values, index, columns, dtype=None,
else:
columns = _ensure_index(columns)
- block = make_block(values.T, columns, columns)
- return BlockManager([block], [columns, index])
-
+ return create_block_manager_from_blocks([ values.T ], [ columns, index ])
+
def _wrap_array(self, arr, axes, copy=False):
index, columns = axes
return self._constructor(arr, index=index, columns=columns, copy=copy)
@@ -1283,7 +1284,7 @@ def to_panel(self):
minor_axis.name = self.index.names[1]
new_axes = [selfsorted.columns, major_axis, minor_axis]
- new_mgr = BlockManager(new_blocks, new_axes)
+ new_mgr = create_block_manager_from_blocks(new_blocks, new_axes)
return Panel(new_mgr)
@@ -5300,13 +5301,7 @@ def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
- # segregates dtypes and forms blocks matching to columns
- blocks = form_blocks(arrays, arr_names, axes)
-
- # consolidate for now
- mgr = BlockManager(blocks, axes)
- return mgr.consolidate()
-
+ return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
@@ -5384,7 +5379,7 @@ def convert(v):
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
- raise Exception('Must pass 2-d input')
+ raise ValueError('Must pass 2-d input')
return values
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 385695ec6cc50..6bbb1d9ce979b 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -33,11 +33,11 @@ def __init__(self, values, items, ref_items, ndim=2):
values = np.array(values, dtype=object)
if values.ndim != ndim:
- raise AssertionError('Wrong number of dimensions')
+ raise ValueError('Wrong number of dimensions')
if len(items) != len(values):
- raise AssertionError('Wrong number of items passed (%d vs %d)'
- % (len(items), len(values)))
+ raise ValueError('Wrong number of items passed %d, indices imply %d'
+ % (len(items), len(values)))
self._ref_locs = None
self.values = values
@@ -911,13 +911,14 @@ def shape(self):
def _verify_integrity(self):
mgr_shape = self.shape
+ tot_items = sum(len(x.items) for x in self.blocks)
for block in self.blocks:
if block.ref_items is not self.items:
raise AssertionError("Block ref_items must be BlockManager "
"items")
if block.values.shape[1:] != mgr_shape[1:]:
- raise AssertionError('Block shape incompatible with manager')
- tot_items = sum(len(x.items) for x in self.blocks)
+ construction_error(tot_items,block.values.shape[1:],self.axes)
+
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items')
@@ -1704,7 +1705,39 @@ def item_dtypes(self):
return result
+def construction_error(tot_items, block_shape, axes):
+ """ raise a helpful message about our construction """
+ raise ValueError("Shape of passed values is %s, indices imply %s" % (
+ tuple([tot_items] + list(block_shape)),tuple(len(ax) for ax in axes)))
+
+
+def create_block_manager_from_blocks(blocks, axes):
+ try:
+
+ # if we are passed values, make the blocks
+ if len(blocks) == 1 and not isinstance(blocks[0], Block):
+ blocks = [ make_block(blocks[0], axes[0], axes[0]) ]
+
+ mgr = BlockManager(blocks, axes)
+ mgr._consolidate_inplace()
+ return mgr
+
+ except (ValueError):
+ blocks = [ getattr(b,'values',b) for b in blocks ]
+ tot_items = sum(b.shape[0] for b in blocks)
+ construction_error(tot_items,blocks[0].shape[1:],axes)
+
+def create_block_manager_from_arrays(arrays, names, axes):
+ try:
+ blocks = form_blocks(arrays, names, axes)
+ mgr = BlockManager(blocks, axes)
+ mgr._consolidate_inplace()
+ return mgr
+ except (ValueError):
+ construction_error(len(arrays),arrays[0].shape[1:],axes)
+
def form_blocks(arrays, names, axes):
+
# pre-filter out items if we passed it
items = axes[0]
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 9f91d8add1eac..d33ce4c90244b 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -13,7 +13,9 @@
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.core.indexing import _maybe_droplevels, _is_list_like
-from pandas.core.internals import BlockManager, make_block, form_blocks
+from pandas.core.internals import (BlockManager,
+ create_block_manager_from_arrays,
+ create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
@@ -310,10 +312,7 @@ def _init_dict(self, data, axes, dtype=None):
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
- # segregates dtypes and forms blocks matching to columns
- blocks = form_blocks(arrays, arr_names, axes)
- mgr = BlockManager(blocks, axes).consolidate()
- return mgr
+ return create_block_manager_from_arrays(arrays, arr_names, axes)
@property
def shape(self):
@@ -398,9 +397,7 @@ def _init_matrix(self, data, axes, dtype=None, copy=False):
ax = _ensure_index(ax)
fixed_axes.append(ax)
- items = fixed_axes[0]
- block = make_block(values, items, items)
- return BlockManager([block], fixed_axes)
+ return create_block_manager_from_blocks([ values ], fixed_axes)
#----------------------------------------------------------------------
# Array interface
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 84a4121387964..f9cc850cc6d27 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -22,7 +22,7 @@
from pandas.core.algorithms import match, unique, factorize
from pandas.core.categorical import Categorical
from pandas.core.common import _asarray_tuplesafe, _try_sort
-from pandas.core.internals import BlockManager, make_block, form_blocks
+from pandas.core.internals import BlockManager, make_block
from pandas.core.reshape import block2d_to_blocknd, factor_indexer
from pandas.core.index import Int64Index, _ensure_index
import pandas.core.common as com
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index b363a276723da..93477073d2a8e 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1976,7 +1976,7 @@ def test_constructor_cast_failure(self):
df['foo'] = np.ones((4,2)).tolist()
# this is not ok
- self.assertRaises(AssertionError, df.__setitem__, tuple(['test']), np.ones((4,2)))
+ self.assertRaises(ValueError, df.__setitem__, tuple(['test']), np.ones((4,2)))
# this is ok
df['foo2'] = np.ones((4,2)).tolist()
@@ -2135,6 +2135,51 @@ def test_constructor_dict(self):
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assert_(frame.index.equals(Index([])))
+ def test_constructor_error_msgs(self):
+
+ # mix dict and array, wrong size
+ try:
+ DataFrame({'A': {'a': 'a', 'b': 'b'},
+ 'B': ['a', 'b', 'c']})
+ except (Exception), detail:
+ self.assert_(type(detail) == ValueError)
+ self.assert_("Mixing dicts with non-Series may lead to ambiguous ordering." in str(detail))
+
+ # wrong size ndarray, GH 3105
+ from pandas import date_range
+ try:
+ DataFrame(np.arange(12).reshape((4, 3)), columns=['foo', 'bar', 'baz'],
+ index=date_range('2000-01-01', periods=3))
+ except (Exception), detail:
+ self.assert_(type(detail) == ValueError)
+ self.assert_(str(detail).startswith("Shape of passed values is (3, 4), indices imply (3, 3)"))
+
+ # higher dim raise exception
+ try:
+ DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
+ except (Exception), detail:
+ self.assert_(type(detail) == ValueError)
+ self.assert_("Must pass 2-d input" in str(detail))
+
+ # wrong size axis labels
+ try:
+ DataFrame(np.random.rand(2,3), columns=['A', 'B', 'C'], index=[1])
+ except (Exception), detail:
+ self.assert_(type(detail) == ValueError)
+ self.assert_(str(detail).startswith("Shape of passed values is (3, 2), indices imply (3, 1)"))
+
+ try:
+ DataFrame(np.random.rand(2,3), columns=['A', 'B'], index=[1, 2])
+ except (Exception), detail:
+ self.assert_(type(detail) == ValueError)
+ self.assert_(str(detail).startswith("Shape of passed values is (3, 2), indices imply (2, 2)"))
+
+ try:
+ DataFrame({'a': False, 'b': True})
+ except (Exception), detail:
+ self.assert_(type(detail) == ValueError)
+ self.assert_("If use all scalar values, must pass index" in str(detail))
+
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in xrange(10)),
@@ -3545,7 +3590,7 @@ def test_from_records_bad_index_column(self):
assert(df1.index.equals(Index(df.C)))
# should fail
- self.assertRaises(Exception, DataFrame.from_records, df, index=[2])
+ self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])
self.assertRaises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 84f5f3afab6db..921097e3408fd 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -939,9 +939,25 @@ def test_from_dict_mixed_orient(self):
self.assert_(panel['foo'].values.dtype == np.object_)
self.assert_(panel['A'].values.dtype == np.float64)
- def test_values(self):
- self.assertRaises(Exception, Panel, np.random.randn(5, 5, 5),
- range(5), range(5), range(4))
+ def test_constructor_error_msgs(self):
+
+ try:
+ Panel(np.random.randn(3,4,5), range(4), range(5), range(5))
+ except (Exception), detail:
+ self.assert_(type(detail) == ValueError)
+ self.assert_(str(detail).startswith("Shape of passed values is (3, 4, 5), indices imply (4, 5, 5)"))
+
+ try:
+ Panel(np.random.randn(3,4,5), range(5), range(4), range(5))
+ except (Exception), detail:
+ self.assert_(type(detail) == ValueError)
+ self.assert_(str(detail).startswith("Shape of passed values is (3, 4, 5), indices imply (5, 4, 5)"))
+
+ try:
+ Panel(np.random.randn(3,4,5), range(5), range(5), range(4))
+ except (Exception), detail:
+ self.assert_(type(detail) == ValueError)
+ self.assert_(str(detail).startswith("Shape of passed values is (3, 4, 5), indices imply (5, 5, 4)"))
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
| closes #3105
| https://api.github.com/repos/pandas-dev/pandas/pulls/3131 | 2013-03-21T20:48:48Z | 2013-03-25T19:48:34Z | 2013-03-25T19:48:34Z | 2014-07-31T14:36:10Z |
PERF: Limit memmove to >= 256 bytes, relax contiguity requirements | As per #3089, for now, I'm putting in a "magic" lower limit for memmove of 256 bytes (32 bytes was the case in the affected tests), which seems to be reasonable from local testing (::dubious::), unless someone on [StackOverflow](http://stackoverflow.com/questions/15554565/forcing-gcc-to-perform-loop-unswitching-of-memcpy-runtime-size-checks) gives me a better idea of what to do.
Also, realized that only the stride in the dimension of the copy matters (i.e. the entire array doesn't have to be contiguous, only the copied subarrays do), so I relaxed that requirement (non-contiguous cases don't seem to be tested in our performance regressions, since they're pretty shallow unfortunately, but they do happen often in practice...this should be addressed by #3114).
Here are the vbench results on the low (improved) end (<90%):
```
series_drop_duplicates_string 0.7474 1.0182 0.7340
frame_multi_and_st 763.4459 1008.7359 0.7568
reindex_frame_level_align 1.5111 1.9937 0.7579
reindex_frame_level_reindex 1.5304 1.9711 0.7764
panel_from_dict_equiv_indexes 34.4702 42.6086 0.8090
groupby_frame_cython_many_columns 6.4044 7.4999 0.8539
read_csv_comment2 40.2233 45.7705 0.8788
groupby_series_simple_cython 7.5779 8.4962 0.8919
```
and the high (regressed) end (>105%, as there were no cases of >110%):
```
frame_ctor_nested_dict 108.7902 103.3220 1.0529
stats_rolling_mean 2.8128 2.6283 1.0702
```
I suspect the last results are just noise.
This is 32-bit Linux GCC 4.6.3, mileage may vary (still haven't set up at 64-bit environment), if anyone else could test this commit too that would be great.
---
**EDIT**
repeat run results:
```
reindex_frame_level_reindex 1.4861 1.9335 0.7686
reindex_frame_level_align 1.5578 1.9846 0.7849
panel_from_dict_equiv_indexes 35.0858 42.0521 0.8343
groupby_frame_cython_many_columns 6.5504 7.8101 0.8387
read_csv_comment2 40.2616 46.1578 0.8723
groupby_last_float32 7.7265 8.6995 0.8882
```
and
```
frame_reindex_columns 0.4452 0.4237 1.0507
append_frame_single_homogenous 0.4923 0.4669 1.0543
reshape_pivot_time_series 292.3670 277.2169 1.0547
join_dataframe_integer_key 2.8652 2.6741 1.0714
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3130 | 2013-03-21T20:37:49Z | 2013-03-22T16:40:46Z | 2013-03-22T16:40:46Z | 2014-06-13T15:15:36Z | |
DOC: GH2072 add example and warning on fallback indexing for float indexes. | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 392768a21586b..0a4ca45af6b57 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -675,7 +675,8 @@ Advanced Indexing with ``.ix``
explicit about indexing choices. ``.ix`` allows a great flexibility to specify
indexing locations by *label* and/or *integer position*. Pandas will attempt
to use any passed *integer* as *label* locations first (like what ``.loc``
- would do, then to fall back on *positional* indexing, like what ``.iloc`` would do).
+ would do, then to fall back on *positional* indexing, like what ``.iloc``
+ would do). See :ref:`Fallback Indexing <indexing.fallback>` for an example.
The syntax of using ``.ix`` is identical to ``.loc``, in :ref:`Selection by Label <indexing.label>`,
and ``.iloc`` in :ref:`Selection by Position <indexing.integer>`.
@@ -800,6 +801,44 @@ values, though setting arbitrary vectors is not yet supported:
print df2
print df2.dtypes
+
+Fallback indexing
+~~~~~~~~~~~~~~~~~~~~
+
+.. _indexing.fallback:
+
+Float indexes should be used only with caution. If you have a float indexed
+``DataFrame`` and try to select using an integer, the row that Pandas returns
+might not be what you expect. Pandas first attempts to use the *integer*
+as a *label* location, but fails to find a match (because the types
+are not equal). Pandas then falls back to back to positional indexing.
+
+.. ipython:: python
+
+ df = pd.DataFrame(np.random.randn(4,4),
+ columns=list('ABCD'), index=[1.0, 2.0, 3.0, 4.0])
+ df
+ df.ix[1]
+
+To select the row you do expect, instead use a float label or
+use ``iloc``.
+
+.. ipython:: python
+
+ df.ix[1.0]
+ df.iloc[0]
+
+Instead of using a float index, it is often better to
+convert to an integer index:
+
+.. ipython:: python
+
+ df_new = df.reset_index()
+ df_new[df_new.index == 1.0]
+ # now you can also do "float selection"
+ df_new[(df_new.index >= 1.0) & (df_new.index < 2)]
+
+
.. _indexing.class:
Index objects
| https://github.com/pydata/pandas/issues/2072#issuecomment-9454035
| https://api.github.com/repos/pandas-dev/pandas/pulls/3128 | 2013-03-21T18:11:55Z | 2013-03-22T20:05:20Z | 2013-03-22T20:05:20Z | 2014-07-19T21:39:35Z |
BUG: GH2719, fixed reshape on a Series with invalid input | diff --git a/RELEASE.rst b/RELEASE.rst
index a33fad69fb3c1..c1fa30e23bc5a 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -201,6 +201,7 @@ pandas 0.11.0
- Fixed missing tick bars on scatter_matrix plot (GH3063_)
- Fixed bug in Timestamp(d,tz=foo) when d is date() rather then datetime() (GH2993_)
- series.plot(kind='bar') now respects pylab color schem (GH3115_)
+ - Fixed bug in reshape if not passed correct input, now raises TypeError (GH2719_)
.. _GH2758: https://github.com/pydata/pandas/issues/2758
.. _GH2809: https://github.com/pydata/pandas/issues/2809
@@ -220,6 +221,7 @@ pandas 0.11.0
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
.. _GH2681: https://github.com/pydata/pandas/issues/2681
+.. _GH2719: https://github.com/pydata/pandas/issues/2719
.. _GH2746: https://github.com/pydata/pandas/issues/2746
.. _GH2747: https://github.com/pydata/pandas/issues/2747
.. _GH2751: https://github.com/pydata/pandas/issues/2751
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0c006d4c60904..53793d503403e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -923,6 +923,9 @@ def reshape(self, newshape, order='C'):
"""
See numpy.ndarray.reshape
"""
+ if order not in ['C','F']:
+ raise TypeError("must specify a tuple / singular length to reshape")
+
if isinstance(newshape, tuple) and len(newshape) > 1:
return self.values.reshape(newshape, order=order)
else:
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 20c57ebbd0db6..fbbb48966f754 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -932,6 +932,10 @@ def test_reshape_non_2d(self):
x = Series(np.random.random(201), name='x')
self.assertRaises(TypeError, x.reshape, (len(x),))
+ # GH 2719
+ a = Series([1,2,3,4])
+ self.assertRaises(TypeError,a.reshape, 2, 2)
+
def test_reshape_2d_return_array(self):
x = Series(np.random.random(201), name='x')
result = x.reshape((-1, 1))
| fixes #2719
| https://api.github.com/repos/pandas-dev/pandas/pulls/3127 | 2013-03-21T18:08:15Z | 2013-03-21T19:46:46Z | 2013-03-21T19:46:46Z | 2014-06-20T13:45:44Z |
BUG: HDFStore bug when appending to a table, .typ not recreated on subsequent appends | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 929d9182f35a9..fd9127efa72df 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1162,6 +1162,10 @@ def set_kind(self):
elif self.dtype.startswith('bool'):
self.kind = 'bool'
+ # set my typ if we need
+ if self.typ is None:
+ self.typ = getattr(self.description,self.cname,None)
+
def set_atom(self, block, existing_col, min_itemsize, nan_rep, **kwargs):
""" create and setup my atom from the block b """
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index a40284ff1b1ae..158cb351678f3 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -659,6 +659,15 @@ def check_col(key,name,size):
result = store.select('df')
tm.assert_frame_equal(result, df)
+ with ensure_clean(self.path) as store:
+
+ # infer the .typ on subsequent appends
+ df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10))
+ store.remove('df')
+ store.append('df', df[:5], min_itemsize=200)
+ store.append('df', df[5:], min_itemsize=200)
+ tm.assert_frame_equal(store['df'], df)
+
def test_append_with_data_columns(self):
with ensure_clean(self.path) as store:
| not correctctly recreating string columns so that could test the min_itemsize if its passed on subsequent appends, see:
http://stackoverflow.com/questions/15488809/how-to-trouble-shoot-hdfstore-exception-cannot-find-the-correct-atom-type?noredirect=1#comment22032050_15488809
| https://api.github.com/repos/pandas-dev/pandas/pulls/3126 | 2013-03-21T17:50:03Z | 2013-03-21T19:37:21Z | 2013-03-21T19:37:21Z | 2013-03-21T19:37:21Z |
BUG: GH2903 implemented xs for axis=1 with a level specified | diff --git a/RELEASE.rst b/RELEASE.rst
index 51ed383648929..8fe7a33ea0c87 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -219,8 +219,9 @@ pandas 0.11.0
- Fix NameError issue on RESO_US (GH2787_)
- Allow selection in an *unordered* timeseries to work similary
to an *ordered* timeseries (GH2437_).
+ - Fix implemented ``.xs`` when called with ``axes=1`` and a level parameter (GH2903_)
- .. _GH2758: https://github.com/pydata/pandas/issues/2758
+.. _GH2758: https://github.com/pydata/pandas/issues/2758
.. _GH2809: https://github.com/pydata/pandas/issues/2809
.. _GH2810: https://github.com/pydata/pandas/issues/2810
.. _GH2837: https://github.com/pydata/pandas/issues/2837
@@ -257,6 +258,7 @@ pandas 0.11.0
.. _GH2850: https://github.com/pydata/pandas/issues/2850
.. _GH2898: https://github.com/pydata/pandas/issues/2898
.. _GH2892: https://github.com/pydata/pandas/issues/2892
+.. _GH2903: https://github.com/pydata/pandas/issues/2903
.. _GH2909: https://github.com/pydata/pandas/issues/2909
.. _GH2922: https://github.com/pydata/pandas/issues/2922
.. _GH2929: https://github.com/pydata/pandas/issues/2929
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b47b77fdaeb6c..afb698221c48b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2211,7 +2211,11 @@ def xs(self, key, axis=0, level=None, copy=True):
if labels.levels[lev_num].inferred_type == 'integer':
indexer = self.index[loc]
- result = self.ix[indexer]
+ # select on the correct axis
+ if axis == 1:
+ result = self.ix[:, indexer]
+ else:
+ result = self.ix[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 207eb96a9804e..75aa208e0c6b2 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -714,6 +714,16 @@ def test_ix_general(self):
df.sortlevel(inplace=True)
df.ix[(4.0,2012)]
+ def test_xs_multiindex(self):
+
+ # GH2903
+ columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'hello'), ('b', 'world')], names=['lvl0', 'lvl1'])
+ df = DataFrame(np.random.randn(4, 4), columns=columns)
+ df.sortlevel(axis=1,inplace=True)
+ result = df.xs('a', level='lvl0', axis=1)
+ expected = df.iloc[:,0:2].loc[:,'a']
+ assert_frame_equal(result,expected)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| closes #2903
| https://api.github.com/repos/pandas-dev/pandas/pulls/3125 | 2013-03-21T16:27:21Z | 2013-03-22T17:34:59Z | 2013-03-22T17:34:59Z | 2014-06-30T09:18:29Z |
BUG: GH2817 raise the correct KeyError that the multi-index is not sorted | diff --git a/pandas/core/index.py b/pandas/core/index.py
index 8b42f2146a7cf..467fe46e81efd 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -274,6 +274,9 @@ def values(self):
def is_monotonic(self):
return self._engine.is_monotonic
+ def is_lexsorted_for_tuple(self, tup):
+ return True
+
@cache_readonly
def is_unique(self):
return self._engine.is_unique
@@ -1692,6 +1695,12 @@ def is_lexsorted(self):
"""
return self.lexsort_depth == self.nlevels
+ def is_lexsorted_for_tuple(self, tup):
+ """
+ Return True if we are correctly lexsorted given the passed tuple
+ """
+ return len(tup) <= self.lexsort_depth
+
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 16259fd39c0a9..05ee4728e05aa 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -311,6 +311,10 @@ def _getitem_lowerdim(self, tup):
except Exception, e1:
if isinstance(tup[0], (slice, Index)):
raise IndexingError
+
+ # raise the error if we are not sorted
+ if not ax0.is_lexsorted_for_tuple(tup):
+ raise e1
try:
loc = ax0.get_loc(tup[0])
except KeyError:
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index f1ac1a288d45a..207eb96a9804e 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -697,6 +697,23 @@ def test_loc_multiindex(self):
xp = mi_int.ix[4]
assert_frame_equal(rs,xp)
+ def test_ix_general(self):
+
+ # ix general issues
+
+ # GH 2817
+ data={'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
+ 'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
+ 'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
+ df = DataFrame(data).set_index(keys=['col','year'])
+
+ # this should raise correct error
+ self.assertRaises(KeyError, df.ix.__getitem__, tuple([4.0,2012]))
+
+ # this is ok
+ df.sortlevel(inplace=True)
+ df.ix[(4.0,2012)]
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| closes #2817
| https://api.github.com/repos/pandas-dev/pandas/pulls/3124 | 2013-03-21T15:45:44Z | 2013-03-22T16:33:55Z | 2013-03-22T16:33:55Z | 2014-08-12T14:13:58Z |
TST: less likelihood of random failing tests | diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index e2d1d75e69329..a40284ff1b1ae 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -663,6 +663,7 @@ def test_append_with_data_columns(self):
with ensure_clean(self.path) as store:
df = tm.makeTimeDataFrame()
+ df.loc[:,'B'].iloc[0] = 1.
store.remove('df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
@@ -726,6 +727,8 @@ def check_col(key,name,size):
with ensure_clean(self.path) as store:
# multiple data columns
df_new = df.copy()
+ df_new.loc[:,'A'].iloc[0] = 1.
+ df_new.loc[:,'B'].iloc[0] = -1.
df_new['string'] = 'foo'
df_new['string'][1:4] = np.nan
df_new['string'][5:6] = 'bar'
@@ -743,9 +746,9 @@ def check_col(key,name,size):
# yield an empty frame
result = store.select('df', [Term('string', '=', 'foo'), Term(
- 'string2=bar'), Term('A>0'), Term('B<0')])
+ 'string2=cool')])
expected = df_new[(df_new.string == 'foo') & (
- df_new.string2 == 'bar') & (df_new.A > 0) & (df_new.B < 0)]
+ df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected)
with ensure_clean(self.path) as store:
| https://api.github.com/repos/pandas-dev/pandas/pulls/3120 | 2013-03-21T12:44:11Z | 2013-03-21T13:23:28Z | 2013-03-21T13:23:28Z | 2013-03-21T13:23:28Z | |
BUG: incorrect ensure_clean (was ensure_path) | diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index ec07077c7b5ea..968883ca64afd 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -964,7 +964,7 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
except Exception:
pass
- with ensure_path() as path:
+ with ensure_clean() as path:
plt.savefig(path)
if __name__ == '__main__':
| https://api.github.com/repos/pandas-dev/pandas/pulls/3116 | 2013-03-21T11:47:28Z | 2013-03-21T11:47:32Z | 2013-03-21T11:47:32Z | 2014-06-26T18:06:59Z | |
BUG: GH3109 fixed issues where passing an axis of 'columns' would fail | diff --git a/RELEASE.rst b/RELEASE.rst
index 7a72b9d00cec6..da1d3edd5f820 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -35,7 +35,6 @@ pandas 0.11.0
DataFrames and propogate in operations
- Add function to pandas.io.data for retrieving stock index components from
Yahoo! finance (GH2795_)
- - Add ``squeeze`` function to reduce dimensionality of 1-len objects
- Support slicing with time objects (GH2681_)
- Added ``.iloc`` attribute, to support strict integer based indexing, analagous to ``.ix`` (GH2922_)
- Added ``.loc`` attribute, to support strict label based indexing, analagous to ``.ix`` (GH3053_)
@@ -73,10 +72,7 @@ pandas 0.11.0
- New keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` are
provided to support iteration on ``select`` and ``select_as_multiple`` (GH3076_)
- - In ``HDFStore``, provide dotted attribute access to ``get`` from stores
- (e.g. ``store.df == store['df']``)
-
- - ``Squeeze`` to possibly remove length 1 dimensions from an object.
+ - Add ``squeeze`` method to possibly remove length 1 dimensions from an object.
.. ipython:: python
@@ -209,7 +205,6 @@ pandas 0.11.0
other values), (GH2850_)
- Unstack of a frame with no nans would always cause dtype upcasting (GH2929_)
- Fix scalar datetime.datetime parsing bug in read_csv (GH3071_)
- - Timedeltas are now fully operational (closes GH2898_)
- Fixed slow printing of large Dataframes, due to inefficient dtype
reporting (GH2807_)
- Fixed a segfault when using a function as grouper in groupby (GH3035_)
@@ -232,6 +227,7 @@ pandas 0.11.0
- Fix issue with indexing a series with a boolean key and specifiying a 1-len list on the rhs (GH2745_)
- Fixed bug in groupby apply when kernel generate list of arrays having unequal len (GH1738_)
- fixed handling of rolling_corr with center=True which could produce corr>1 (GH3155_)
+ - Fixed issues where indices can be passed as 'index/column' in addition to 0/1 for the axis parameter
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index daf31bf75f7a2..b689c7e473364 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2194,6 +2194,7 @@ def xs(self, key, axis=0, level=None, copy=True):
-------
xs : Series or DataFrame
"""
+ axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level)
@@ -2340,6 +2341,8 @@ def align(self, other, join='outer', axis=None, level=None, copy=True,
(left, right) : (DataFrame, type of other)
Aligned objects
"""
+ if axis is not None:
+ axis = self._get_axis_number(axis)
if isinstance(other, DataFrame):
return self._align_frame(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
@@ -2522,6 +2525,7 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
reindexed : same type as calling instance
"""
self._consolidate_inplace()
+ axis = self._get_axis_number(axis)
if axis == 0:
return self._reindex_index(labels, method, copy, level,
fill_value=fill_value,
@@ -2834,6 +2838,7 @@ def take(self, indices, axis=0, convert=True):
# check/convert indicies here
if convert:
+ axis = self._get_axis_number(axis)
indices = _maybe_convert_indices(indices, len(self._get_axis(axis)))
if self._is_mixed_type:
@@ -2922,6 +2927,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None):
subset=subset, axis=ax)
return result
+ axis = self._get_axis_number(axis)
if axis == 0:
agg_axis = 1
elif axis == 1:
@@ -3089,6 +3095,7 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False):
"""
from pandas.core.groupby import _lexsort_indexer
+ axis = self._get_axis_number(axis)
if axis not in [0, 1]:
raise ValueError('Axis must be 0 or 1, got %s' % str(axis))
@@ -3159,6 +3166,7 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False):
-------
sorted : DataFrame
"""
+ axis = self._get_axis_number(axis)
the_axis = self._get_axis(axis)
if not isinstance(the_axis, MultiIndex):
raise Exception('can only sort by level with a hierarchical index')
@@ -3202,6 +3210,7 @@ def swaplevel(self, i, j, axis=0):
"""
result = self.copy()
+ axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
@@ -3223,6 +3232,7 @@ def reorder_levels(self, order, axis=0):
-------
type of caller (new object)
"""
+ axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise Exception('Can only reorder levels on a hierarchical axis.')
@@ -3274,6 +3284,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
"""
self._consolidate_inplace()
+ axis = self._get_axis_number(axis)
if value is None:
if method is None:
raise ValueError('must specify a fill method or value')
@@ -3362,6 +3373,7 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
"""
self._consolidate_inplace()
+ axis = self._get_axis_number(axis)
if inplace:
import warnings
warnings.warn("replace with inplace=True will return None"
@@ -4057,6 +4069,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False,
if len(self.columns) == 0 and len(self.index) == 0:
return self
+ axis = self._get_axis_number(axis)
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
@@ -4478,6 +4491,7 @@ def corrwith(self, other, axis=0, drop=False):
-------
correls : Series
"""
+ axis = self._get_axis_number(axis)
if isinstance(other, Series):
return self.apply(other.corr, axis=axis)
@@ -4580,6 +4594,7 @@ def count(self, axis=0, level=None, numeric_only=False):
-------
count : Series (or DataFrame if level specified)
"""
+ axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
@@ -4756,6 +4771,7 @@ def mad(self, axis=0, skipna=True, level=None):
frame = self._get_numeric_data()
+ axis = self._get_axis_number(axis)
if axis == 0:
demeaned = frame - frame.mean(axis=0)
else:
@@ -4811,12 +4827,14 @@ def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwds):
grouped = self.groupby(level=level, axis=axis)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwds)
+ axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwds)
return grouped.aggregate(applyf)
def _reduce(self, op, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
+ axis = self._get_axis_number(axis)
f = lambda x: op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
if numeric_only is None:
@@ -4875,6 +4893,7 @@ def idxmin(self, axis=0, skipna=True):
-------
idxmin : Series
"""
+ axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
@@ -4897,6 +4916,7 @@ def idxmax(self, axis=0, skipna=True):
-------
idxmax : Series
"""
+ axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
@@ -5030,6 +5050,7 @@ def rank(self, axis=0, numeric_only=None, method='average',
-------
ranks : DataFrame
"""
+ axis = self._get_axis_number(axis)
if numeric_only is None:
try:
ranks = algos.rank(self.values, axis=axis, method=method,
@@ -5070,6 +5091,7 @@ def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
if copy:
new_data = new_data.copy()
+ axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
@@ -5100,6 +5122,7 @@ def to_period(self, freq=None, axis=0, copy=True):
if copy:
new_data = new_data.copy()
+ axis = self._get_axis_number(axis)
if axis == 0:
if freq is None:
freq = self.index.freqstr or self.index.inferred_freq
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bc07a509e71af..580148e11cc7c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -34,28 +34,29 @@ def load(cls, path):
#----------------------------------------------------------------------
# Axis name business
- @classmethod
- def _get_axis_number(cls, axis):
- axis = cls._AXIS_ALIASES.get(axis, axis)
-
+ def _get_axis_number(self, axis):
+ axis = self._AXIS_ALIASES.get(axis, axis)
if com.is_integer(axis):
- if axis in cls._AXIS_NAMES:
+ if axis in self._AXIS_NAMES:
return axis
- else:
- raise Exception('No %d axis' % axis)
else:
- return cls._AXIS_NUMBERS[axis]
+ try:
+ return self._AXIS_NUMBERS[axis]
+ except:
+ pass
+ raise ValueError('No axis named %s' % axis)
- @classmethod
- def _get_axis_name(cls, axis):
- axis = cls._AXIS_ALIASES.get(axis, axis)
+ def _get_axis_name(self, axis):
+ axis = self._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, basestring):
- if axis in cls._AXIS_NUMBERS:
+ if axis in self._AXIS_NUMBERS:
return axis
- else:
- raise Exception('No axis named %s' % axis)
else:
- return cls._AXIS_NAMES[axis]
+ try:
+ return self._AXIS_NAMES[axis]
+ except:
+ pass
+ raise ValueError('No axis named %s' % axis)
def _get_axis(self, axis):
name = self._get_axis_name(axis)
@@ -147,6 +148,7 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
GroupBy object
"""
from pandas.core.groupby import groupby
+ axis = self._get_axis_number(axis)
return groupby(self, by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys)
@@ -247,6 +249,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None,
range from 0 through 4. Defaults to 0
"""
from pandas.tseries.resample import TimeGrouper
+ axis = self._get_axis_number(axis)
sampler = TimeGrouper(rule, label=label, closed=closed, how=how,
axis=axis, kind=kind, loffset=loffset,
fill_method=fill_method, convention=convention,
@@ -925,6 +928,7 @@ def rename_axis(self, mapper, axis=0, copy=True):
mapper_f = _get_rename_function(mapper)
+ axis = self._get_axis_number(axis)
if axis == 0:
new_data = self._data.rename_items(mapper_f, copydata=copy)
else:
@@ -951,6 +955,7 @@ def take(self, indices, axis=0, convert=True):
# check/convert indicies here
if convert:
+ axis = self._get_axis_number(axis)
indices = _maybe_convert_indices(indices, len(self._get_axis(axis)))
if axis == 0:
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index d33ce4c90244b..d1f87e4e7c932 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1051,6 +1051,7 @@ def xs(self, key, axis=1, copy=True):
-------
y : ndim(self)-1
"""
+ axis = self._get_axis_number(axis)
if axis == 0:
data = self[key]
if copy:
@@ -1320,10 +1321,11 @@ def shift(self, lags, axis='major'):
vslicer = slice(-lags, None)
islicer = slice(None, lags)
- if axis == 'major':
+ axis = self._get_axis_name(axis)
+ if axis == 'major_axis':
values = values[:, vslicer, :]
major_axis = major_axis[islicer]
- elif axis == 'minor':
+ elif axis == 'minor_axis':
values = values[:, :, vslicer]
minor_axis = minor_axis[islicer]
else:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 670b8d2dcfb8d..fc475122a4ec8 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1766,19 +1766,20 @@ def setUp(self):
index=['a', 'b', 'c'])
def test_get_axis(self):
- self.assert_(DataFrame._get_axis_name(0) == 'index')
- self.assert_(DataFrame._get_axis_name(1) == 'columns')
- self.assert_(DataFrame._get_axis_name('index') == 'index')
- self.assert_(DataFrame._get_axis_name('columns') == 'columns')
- self.assertRaises(Exception, DataFrame._get_axis_name, 'foo')
- self.assertRaises(Exception, DataFrame._get_axis_name, None)
-
- self.assert_(DataFrame._get_axis_number(0) == 0)
- self.assert_(DataFrame._get_axis_number(1) == 1)
- self.assert_(DataFrame._get_axis_number('index') == 0)
- self.assert_(DataFrame._get_axis_number('columns') == 1)
- self.assertRaises(Exception, DataFrame._get_axis_number, 2)
- self.assertRaises(Exception, DataFrame._get_axis_number, None)
+ f = self.frame
+ self.assert_(f._get_axis_name(0) == 'index')
+ self.assert_(f._get_axis_name(1) == 'columns')
+ self.assert_(f._get_axis_name('index') == 'index')
+ self.assert_(f._get_axis_name('columns') == 'columns')
+ self.assertRaises(Exception, f._get_axis_name, 'foo')
+ self.assertRaises(Exception, f._get_axis_name, None)
+
+ self.assert_(f._get_axis_number(0) == 0)
+ self.assert_(f._get_axis_number(1) == 1)
+ self.assert_(f._get_axis_number('index') == 0)
+ self.assert_(f._get_axis_number('columns') == 1)
+ self.assertRaises(Exception, f._get_axis_number, 2)
+ self.assertRaises(Exception, f._get_axis_number, None)
self.assert_(self.frame._get_axis(0) is self.frame.index)
self.assert_(self.frame._get_axis(1) is self.frame.columns)
@@ -8426,6 +8427,19 @@ def test_get_axis_etc(self):
self.assert_(f._get_axis(1) is f.columns)
self.assertRaises(Exception, f._get_axis_number, 2)
+ def test_axis_aliases(self):
+
+ f = self.frame
+
+ # reg name
+ expected = f.sum(axis=0)
+ result = f.sum(axis='index')
+ assert_series_equal(result, expected)
+
+ expected = f.sum(axis=1)
+ result = f.sum(axis='columns')
+ assert_series_equal(result, expected)
+
def test_combine_first_mixed(self):
a = Series(['a', 'b'], index=range(2))
b = Series(range(2), index=range(2))
| BUG: some operations were expecting an axis number, this fix allows passing the regular axis name as well, e.g.
`df.sum(axis='columns')` will now work
_note: narrowed the scope of this PR to just a bug fix_
```
In [3]: df = pd.DataFrame(np.random.rand(5,2),columns=['A','B'])
In [4]: df.sum(axis=1)
Out[4]:
0 1.150325
1 0.789142
2 0.581486
3 0.864212
4 0.731511
In [5]: df.sum(axis='columns')
---------------------------------------------------------------------------
Exception: Must have 0<= axis <= 1
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3110 | 2013-03-20T23:30:00Z | 2013-03-26T14:55:26Z | 2013-03-26T14:55:26Z | 2014-06-22T12:52:31Z |
BUG: GH 3106, not combining timedelta64[ns] blocks correctly | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index fd998e5060b5f..385695ec6cc50 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -689,6 +689,7 @@ def should_store(self, value):
np.datetime64, np.bool_))
_NS_DTYPE = np.dtype('M8[ns]')
+_TD_DTYPE = np.dtype('m8[ns]')
class DatetimeBlock(Block):
@@ -1907,16 +1908,22 @@ def _consolidate(blocks, items):
new_blocks = []
for dtype, group_blocks in grouper:
- new_block = _merge_blocks(list(group_blocks), items)
+ new_block = _merge_blocks(list(group_blocks), items, dtype)
new_blocks.append(new_block)
return new_blocks
-def _merge_blocks(blocks, items):
+def _merge_blocks(blocks, items, dtype=None):
if len(blocks) == 1:
return blocks[0]
- new_values = _vstack([b.values for b in blocks])
+
+ if dtype is None:
+ if len(set([ b.dtype for b in blocks ])) != 1:
+ raise AssertionError("_merge_blocks are invalid!")
+ dtype = blocks[0].dtype
+
+ new_values = _vstack([ b.values for b in blocks ], dtype)
new_items = blocks[0].items.append([b.items for b in blocks[1:]])
new_block = make_block(new_values, new_items, items)
return new_block.reindex_items_from(items)
@@ -1930,10 +1937,12 @@ def _block_shape(values, ndim=1, shape=None):
values = values.reshape(tuple((1,) + shape))
return values
-def _vstack(to_stack):
- if all(x.dtype == _NS_DTYPE for x in to_stack):
- # work around NumPy 1.6 bug
+def _vstack(to_stack, dtype):
+
+ # work around NumPy 1.6 bug
+ if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
- return new_values.view(_NS_DTYPE)
+ return new_values.view(dtype)
+
else:
return np.vstack(to_stack)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 0866e154f296b..1c3c1da8a80d7 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -2951,6 +2951,16 @@ def test_operators_timedelta64(self):
# this is not
result = mixed.min(axis=1)
+ # GH 3106
+ df = DataFrame({ 'time' : date_range('20130102',periods=5), 'time2' : date_range('20130105',periods=5) })
+ df['off1'] = df['time2']-df['time']
+ self.assert_(df['off1'].dtype == 'timedelta64[ns]')
+
+ df['off2'] = df['time']-df['time2']
+ df._consolidate_inplace()
+ self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')
+ self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
+
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
| consolidation of multiple timedelta64[ns] blocks was changing their dtype to `us` for a strange numpy reason (needed a view)
closes #3106
| https://api.github.com/repos/pandas-dev/pandas/pulls/3107 | 2013-03-20T18:26:17Z | 2013-03-20T19:08:24Z | 2013-03-20T19:08:24Z | 2013-03-20T19:08:24Z |
TST: added ensure_clean file cleanup context manager for file based test | diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 062ba0c5e3463..00005c7570a28 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -19,7 +19,9 @@
from pandas.io.parsers import (read_csv, read_table, read_fwf,
ExcelFile, TextFileReader, TextParser)
from pandas.util.testing import (assert_almost_equal,
- assert_series_equal, network)
+ assert_series_equal,
+ network,
+ ensure_clean)
import pandas.util.testing as tm
import pandas as pd
@@ -275,31 +277,30 @@ def test_excel_roundtrip_xlsx(self):
def _check_extension(self, ext):
path = '__tmp_to_excel_from_excel__.' + ext
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(path, 'test1')
- self.frame.to_excel(path, 'test1', cols=['A', 'B'])
- self.frame.to_excel(path, 'test1', header=False)
- self.frame.to_excel(path, 'test1', index=False)
-
- # test roundtrip
- self.frame.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0)
- tm.assert_frame_equal(self.frame, recons)
-
- self.frame.to_excel(path, 'test1', index=False)
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=None)
- recons.index = self.frame.index
- tm.assert_frame_equal(self.frame, recons)
+ with ensure_clean(path) as path:
+ self.frame['A'][:5] = nan
- self.frame.to_excel(path, 'test1', na_rep='NA')
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0, na_values=['NA'])
- tm.assert_frame_equal(self.frame, recons)
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', cols=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
- os.remove(path)
+ # test roundtrip
+ self.frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1', index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(path, 'test1', index=False)
+ reader = ExcelFile(path)
+ recons = reader.parse('test1', index_col=None)
+ recons.index = self.frame.index
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(path, 'test1', na_rep='NA')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1', index_col=0, na_values=['NA'])
+ tm.assert_frame_equal(self.frame, recons)
def test_excel_roundtrip_xls_mixed(self):
_skip_if_no_xlrd()
@@ -315,12 +316,11 @@ def test_excel_roundtrip_xlsx_mixed(self):
def _check_extension_mixed(self, ext):
path = '__tmp_to_excel_from_excel_mixed__.' + ext
- self.mixed_frame.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0)
- tm.assert_frame_equal(self.mixed_frame, recons)
-
- os.remove(path)
+ with ensure_clean(path) as path:
+ self.mixed_frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1', index_col=0)
+ tm.assert_frame_equal(self.mixed_frame, recons)
def test_excel_roundtrip_xls_tsframe(self):
_skip_if_no_xlrd()
@@ -337,12 +337,11 @@ def _check_extension_tsframe(self, ext):
df = tm.makeTimeDataFrame()[:5]
- df.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1')
- tm.assert_frame_equal(df, recons)
-
- os.remove(path)
+ with ensure_clean(path) as path:
+ df.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1')
+ tm.assert_frame_equal(df, recons)
def test_excel_roundtrip_xls_int64(self):
_skip_if_no_excelsuite()
@@ -355,21 +354,20 @@ def test_excel_roundtrip_xlsx_int64(self):
def _check_extension_int64(self, ext):
path = '__tmp_to_excel_from_excel_int64__.' + ext
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(path, 'test1')
- self.frame.to_excel(path, 'test1', cols=['A', 'B'])
- self.frame.to_excel(path, 'test1', header=False)
- self.frame.to_excel(path, 'test1', index=False)
-
- # Test np.int64, values read come back as float
- frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)), dtype=np.int64)
- frame.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1').astype(np.int64)
- tm.assert_frame_equal(frame, recons, check_dtype=False)
+ with ensure_clean(path) as path:
+ self.frame['A'][:5] = nan
- os.remove(path)
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', cols=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
+ # Test np.int64, values read come back as float
+ frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)), dtype=np.int64)
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1').astype(np.int64)
+ tm.assert_frame_equal(frame, recons, check_dtype=False)
def test_excel_roundtrip_xls_bool(self):
_skip_if_no_excelsuite()
@@ -382,21 +380,20 @@ def test_excel_roundtrip_xlsx_bool(self):
def _check_extension_bool(self, ext):
path = '__tmp_to_excel_from_excel_bool__.' + ext
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(path, 'test1')
- self.frame.to_excel(path, 'test1', cols=['A', 'B'])
- self.frame.to_excel(path, 'test1', header=False)
- self.frame.to_excel(path, 'test1', index=False)
-
- # Test reading/writing np.bool8, roundtrip only works for xlsx
- frame = (DataFrame(np.random.randn(10, 2)) >= 0)
- frame.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1').astype(np.bool8)
- tm.assert_frame_equal(frame, recons)
+ with ensure_clean(path) as path:
+ self.frame['A'][:5] = nan
- os.remove(path)
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', cols=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
+ # Test reading/writing np.bool8, roundtrip only works for xlsx
+ frame = (DataFrame(np.random.randn(10, 2)) >= 0)
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1').astype(np.bool8)
+ tm.assert_frame_equal(frame, recons)
def test_excel_roundtrip_xls_sheets(self):
_skip_if_no_excelsuite()
@@ -409,28 +406,28 @@ def test_excel_roundtrip_xlsx_sheets(self):
def _check_extension_sheets(self, ext):
path = '__tmp_to_excel_from_excel_sheets__.' + ext
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(path, 'test1')
- self.frame.to_excel(path, 'test1', cols=['A', 'B'])
- self.frame.to_excel(path, 'test1', header=False)
- self.frame.to_excel(path, 'test1', index=False)
-
- # Test writing to separate sheets
- writer = ExcelWriter(path)
- self.frame.to_excel(writer, 'test1')
- self.tsframe.to_excel(writer, 'test2')
- writer.save()
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0)
- tm.assert_frame_equal(self.frame, recons)
- recons = reader.parse('test2', index_col=0)
- tm.assert_frame_equal(self.tsframe, recons)
- np.testing.assert_equal(2, len(reader.sheet_names))
- np.testing.assert_equal('test1', reader.sheet_names[0])
- np.testing.assert_equal('test2', reader.sheet_names[1])
-
- os.remove(path)
+ with ensure_clean(path) as path:
+ self.frame['A'][:5] = nan
+
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', cols=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
+ # Test writing to separate sheets
+ writer = ExcelWriter(path)
+ self.frame.to_excel(writer, 'test1')
+ self.tsframe.to_excel(writer, 'test2')
+ writer.save()
+ reader = ExcelFile(path)
+ recons = reader.parse('test1', index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+ recons = reader.parse('test2', index_col=0)
+ tm.assert_frame_equal(self.tsframe, recons)
+ np.testing.assert_equal(2, len(reader.sheet_names))
+ np.testing.assert_equal('test1', reader.sheet_names[0])
+ np.testing.assert_equal('test2', reader.sheet_names[1])
+
def test_excel_roundtrip_xls_colaliases(self):
_skip_if_no_excelsuite()
@@ -443,24 +440,23 @@ def test_excel_roundtrip_xlsx_colaliases(self):
def _check_extension_colaliases(self, ext):
path = '__tmp_to_excel_from_excel_aliases__.' + ext
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(path, 'test1')
- self.frame.to_excel(path, 'test1', cols=['A', 'B'])
- self.frame.to_excel(path, 'test1', header=False)
- self.frame.to_excel(path, 'test1', index=False)
-
- # column aliases
- col_aliases = Index(['AA', 'X', 'Y', 'Z'])
- self.frame2.to_excel(path, 'test1', header=col_aliases)
- reader = ExcelFile(path)
- rs = reader.parse('test1', index_col=0)
- xp = self.frame2.copy()
- xp.columns = col_aliases
- tm.assert_frame_equal(xp, rs)
-
- os.remove(path)
+ with ensure_clean(path) as path:
+ self.frame['A'][:5] = nan
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', cols=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
+ # column aliases
+ col_aliases = Index(['AA', 'X', 'Y', 'Z'])
+ self.frame2.to_excel(path, 'test1', header=col_aliases)
+ reader = ExcelFile(path)
+ rs = reader.parse('test1', index_col=0)
+ xp = self.frame2.copy()
+ xp.columns = col_aliases
+ tm.assert_frame_equal(xp, rs)
+
def test_excel_roundtrip_xls_indexlabels(self):
_skip_if_no_excelsuite()
self._check_extension_indexlabels('xls')
@@ -471,7 +467,9 @@ def test_excel_roundtrip_xlsx_indexlabels(self):
def _check_extension_indexlabels(self, ext):
path = '__tmp_to_excel_from_excel_indexlabels__.' + ext
- try:
+
+ with ensure_clean(path) as path:
+
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
@@ -501,12 +499,12 @@ def _check_extension_indexlabels(self, ext):
recons = reader.parse('test1', index_col=0).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
- finally:
- os.remove(path)
# test index_labels in same row as column names
path = '%s.xls' % tm.rands(10)
- try:
+
+ with ensure_clean(path) as path:
+
self.frame.to_excel(path, 'test1',
cols=['A', 'B', 'C', 'D'], index=False)
# take 'A' and 'B' as indexes (they are in same row as cols 'C',
@@ -517,8 +515,6 @@ def _check_extension_indexlabels(self, ext):
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=[0, 1])
tm.assert_frame_equal(df, recons)
- finally:
- os.remove(path)
def test_excel_roundtrip_indexname(self):
_skip_if_no_xlrd()
@@ -529,31 +525,29 @@ def test_excel_roundtrip_indexname(self):
df = DataFrame(np.random.randn(10, 4))
df.index.name = 'foo'
- df.to_excel(path)
-
- xf = ExcelFile(path)
- result = xf.parse(xf.sheet_names[0], index_col=0)
+ with ensure_clean(path) as path:
+ df.to_excel(path)
- tm.assert_frame_equal(result, df)
- self.assertEqual(result.index.name, 'foo')
-
- try:
- os.remove(path)
- except os.error:
- pass
+ xf = ExcelFile(path)
+ result = xf.parse(xf.sheet_names[0], index_col=0)
+
+ tm.assert_frame_equal(result, df)
+ self.assertEqual(result.index.name, 'foo')
def test_excel_roundtrip_datetime(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
+
# datetime.date, not sure what to test here exactly
path = '__tmp_excel_roundtrip_datetime__.xls'
tsf = self.tsframe.copy()
- tsf.index = [x.date() for x in self.tsframe.index]
- tsf.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1')
- tm.assert_frame_equal(self.tsframe, recons)
- os.remove(path)
+ with ensure_clean(path) as path:
+
+ tsf.index = [x.date() for x in self.tsframe.index]
+ tsf.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1')
+ tm.assert_frame_equal(self.tsframe, recons)
def test_excel_roundtrip_bool(self):
_skip_if_no_openpyxl()
@@ -561,24 +555,27 @@ def test_excel_roundtrip_bool(self):
# Test roundtrip np.bool8, does not seem to work for xls
path = '__tmp_excel_roundtrip_bool__.xlsx'
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
- frame.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1')
- tm.assert_frame_equal(frame, recons)
- os.remove(path)
+ with ensure_clean(path) as path:
+
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1')
+ tm.assert_frame_equal(frame, recons)
def test_to_excel_periodindex(self):
_skip_if_no_excelsuite()
+
for ext in ['xls', 'xlsx']:
path = '__tmp_to_excel_periodindex__.' + ext
frame = self.tsframe
xp = frame.resample('M', kind='period')
- xp.to_excel(path, 'sht1')
- reader = ExcelFile(path)
- rs = reader.parse('sht1', index_col=0, parse_dates=True)
- tm.assert_frame_equal(xp, rs.to_period('M'))
- os.remove(path)
+ with ensure_clean(path) as path:
+ xp.to_excel(path, 'sht1')
+
+ reader = ExcelFile(path)
+ rs = reader.parse('sht1', index_col=0, parse_dates=True)
+ tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self):
_skip_if_no_xlrd()
@@ -599,18 +596,18 @@ def _check_excel_multiindex(self, ext):
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
- frame.to_excel(path, 'test1', header=False)
- frame.to_excel(path, 'test1', cols=['A', 'B'])
- # round trip
- frame.to_excel(path, 'test1')
- reader = ExcelFile(path)
- df = reader.parse('test1', index_col=[0, 1], parse_dates=False)
- tm.assert_frame_equal(frame, df)
- self.assertEqual(frame.index.names, df.index.names)
- self.frame.index = old_index # needed if setUP becomes a classmethod
+ with ensure_clean(path) as path:
+ frame.to_excel(path, 'test1', header=False)
+ frame.to_excel(path, 'test1', cols=['A', 'B'])
- os.remove(path)
+ # round trip
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ df = reader.parse('test1', index_col=[0, 1], parse_dates=False)
+ tm.assert_frame_equal(frame, df)
+ self.assertEqual(frame.index.names, df.index.names)
+ self.frame.index = old_index # needed if setUP becomes a classmethod
def test_to_excel_multiindex_dates(self):
_skip_if_no_xlrd()
@@ -630,22 +627,21 @@ def _check_excel_multiindex_dates(self, ext):
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
- tsframe.to_excel(path, 'test1', index_label=['time', 'foo'])
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=[0, 1])
-
- tm.assert_frame_equal(tsframe, recons, check_names=False)
- self.assertEquals(recons.index.names, ['time', 'foo'])
-
- # infer index
- tsframe.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1')
- tm.assert_frame_equal(tsframe, recons)
+ with ensure_clean(path) as path:
+ tsframe.to_excel(path, 'test1', index_label=['time', 'foo'])
+ reader = ExcelFile(path)
+ recons = reader.parse('test1', index_col=[0, 1])
+
+ tm.assert_frame_equal(tsframe, recons, check_names=False)
+ self.assertEquals(recons.index.names, ['time', 'foo'])
- self.tsframe.index = old_index # needed if setUP becomes classmethod
+ # infer index
+ tsframe.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1')
+ tm.assert_frame_equal(tsframe, recons)
- os.remove(path)
+ self.tsframe.index = old_index # needed if setUP becomes classmethod
def test_to_excel_float_format(self):
_skip_if_no_excelsuite()
@@ -654,15 +650,16 @@ def test_to_excel_float_format(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
- df.to_excel(filename, 'test1', float_format='%.2f')
- reader = ExcelFile(filename)
- rs = reader.parse('test1', index_col=None)
- xp = DataFrame([[0.12, 0.23, 0.57],
- [12.32, 123123.20, 321321.20]],
- index=['A', 'B'], columns=['X', 'Y', 'Z'])
- tm.assert_frame_equal(rs, xp)
- os.remove(filename)
+ with ensure_clean(filename) as filename:
+ df.to_excel(filename, 'test1', float_format='%.2f')
+
+ reader = ExcelFile(filename)
+ rs = reader.parse('test1', index_col=None)
+ xp = DataFrame([[0.12, 0.23, 0.57],
+ [12.32, 123123.20, 321321.20]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+ tm.assert_frame_equal(rs, xp)
def test_to_excel_unicode_filename(self):
_skip_if_no_excelsuite()
@@ -680,15 +677,16 @@ def test_to_excel_unicode_filename(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
- df.to_excel(filename, 'test1', float_format='%.2f')
- reader = ExcelFile(filename)
- rs = reader.parse('test1', index_col=None)
- xp = DataFrame([[0.12, 0.23, 0.57],
- [12.32, 123123.20, 321321.20]],
- index=['A', 'B'], columns=['X', 'Y', 'Z'])
- tm.assert_frame_equal(rs, xp)
- os.remove(filename)
+ with ensure_clean(filename) as filename:
+ df.to_excel(filename, 'test1', float_format='%.2f')
+
+ reader = ExcelFile(filename)
+ rs = reader.parse('test1', index_col=None)
+ xp = DataFrame([[0.12, 0.23, 0.57],
+ [12.32, 123123.20, 321321.20]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+ tm.assert_frame_equal(rs, xp)
def test_to_excel_styleconverter(self):
from pandas.io.parsers import CellStyleConverter
@@ -840,12 +838,11 @@ def test_excel_010_hemstring(self):
def roundtrip(df, header=True, parser_hdr=0):
path = '__tmp__test_xl_010_%s__.xls' % np.random.randint(1, 10000)
df.to_excel(path, header=header)
- xf = pd.ExcelFile(path)
- try:
+
+ with ensure_clean(path) as path:
+ xf = pd.ExcelFile(path)
res = xf.parse(xf.sheet_names[0], header=parser_hdr)
return res
- finally:
- os.remove(path)
nrows = 5
ncols = 3
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index facc5325a5628..9326ae00fd402 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -19,7 +19,9 @@
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
from pandas.util.testing import (assert_almost_equal,
- assert_series_equal, network)
+ assert_series_equal,
+ network,
+ ensure_clean)
import pandas.util.testing as tm
import pandas as pd
@@ -1372,29 +1374,25 @@ def test_utf16_bom_skiprows(self):
path = '__%s__.csv' % tm.rands(10)
- for sep, dat in [('\t', data), (',', data2)]:
- for enc in ['utf-16', 'utf-16le', 'utf-16be']:
- bytes = dat.encode(enc)
- with open(path, 'wb') as f:
- f.write(bytes)
-
- s = BytesIO(dat.encode('utf-8'))
- if py3compat.PY3:
- # somewhat False since the code never sees bytes
- from io import TextIOWrapper
- s = TextIOWrapper(s, encoding='utf-8')
-
- result = self.read_csv(path, encoding=enc, skiprows=2,
- sep=sep)
- expected = self.read_csv(s, encoding='utf-8', skiprows=2,
- sep=sep)
-
- tm.assert_frame_equal(result, expected)
-
- try:
- os.remove(path)
- except os.error:
- pass
+ with ensure_clean(path) as path:
+ for sep, dat in [('\t', data), (',', data2)]:
+ for enc in ['utf-16', 'utf-16le', 'utf-16be']:
+ bytes = dat.encode(enc)
+ with open(path, 'wb') as f:
+ f.write(bytes)
+
+ s = BytesIO(dat.encode('utf-8'))
+ if py3compat.PY3:
+ # somewhat False since the code never sees bytes
+ from io import TextIOWrapper
+ s = TextIOWrapper(s, encoding='utf-8')
+
+ result = self.read_csv(path, encoding=enc, skiprows=2,
+ sep=sep)
+ expected = self.read_csv(s, encoding='utf-8', skiprows=2,
+ sep=sep)
+
+ tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = os.path.join(self.dirpath, 'utf16_ex.txt')
@@ -1722,32 +1720,27 @@ def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest
- with open('__foo__.txt', 'wb') as f:
- f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
+ with ensure_clean() as path:
+ with open(path, 'wb') as f:
+ f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
- with open('__foo__.txt', 'rb') as f:
- for line in f:
- if 'CCC' in line:
- break
+ with open(path, 'rb') as f:
+ for line in f:
+ if 'CCC' in line:
+ break
- try:
- read_table(f, squeeze=True, header=None, engine='c')
- except Exception:
- pass
- else:
- raise ValueError('this should not happen')
-
- result = read_table(f, squeeze=True, header=None,
- engine='python')
+ try:
+ read_table(f, squeeze=True, header=None, engine='c')
+ except Exception:
+ pass
+ else:
+ raise ValueError('this should not happen')
- expected = Series(['DDD', 'EEE', 'FFF', 'GGG'])
- tm.assert_series_equal(result, expected)
-
- try:
- os.remove('__foo__.txt')
- except os.error:
- pass
+ result = read_table(f, squeeze=True, header=None,
+ engine='python')
+ expected = Series(['DDD', 'EEE', 'FFF', 'GGG'])
+ tm.assert_series_equal(result, expected)
class TestCParserHighMemory(ParserTests, unittest.TestCase):
@@ -1924,41 +1917,30 @@ def test_decompression(self):
data = open(self.csv1, 'rb').read()
expected = self.read_csv(self.csv1)
- try:
- tmp = gzip.GzipFile('__tmp__', mode='wb')
+ with ensure_clean() as path:
+ tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
- result = self.read_csv('__tmp__', compression='gzip')
+ result = self.read_csv(path, compression='gzip')
tm.assert_frame_equal(result, expected)
- result = self.read_csv(open('__tmp__', 'rb'), compression='gzip')
+ result = self.read_csv(open(path, 'rb'), compression='gzip')
tm.assert_frame_equal(result, expected)
- finally:
- # try:
- # os.remove('__tmp__')
- # except:
- # pass
- pass
- try:
- tmp = bz2.BZ2File('__tmp__', mode='wb')
+ with ensure_clean() as path:
+ tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
- result = self.read_csv('__tmp__', compression='bz2')
+ result = self.read_csv(path, compression='bz2')
tm.assert_frame_equal(result, expected)
- # result = self.read_csv(open('__tmp__', 'rb'), compression='bz2')
+ # result = self.read_csv(open(path, 'rb'), compression='bz2')
# tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
- '__tmp__', compression='bz3')
- finally:
- try:
- os.remove('__tmp__')
- except:
- pass
+ path, compression='bz3')
def test_decompression_regex_sep(self):
try:
@@ -1971,35 +1953,24 @@ def test_decompression_regex_sep(self):
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
- try:
- tmp = gzip.GzipFile('__tmp__', mode='wb')
+ with ensure_clean() as path:
+ tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
- result = self.read_csv('__tmp__', sep='::', compression='gzip')
+ result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
- finally:
- # try:
- # os.remove('__tmp__')
- # except:
- # pass
- pass
- try:
- tmp = bz2.BZ2File('__tmp__', mode='wb')
+ with ensure_clean() as path:
+ tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
- result = self.read_csv('__tmp__', sep='::', compression='bz2')
+ result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
- '__tmp__', compression='bz3')
- finally:
- try:
- os.remove('__tmp__')
- except:
- pass
+ path, compression='bz3')
def test_memory_map(self):
# it works!
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 626592958147e..0866e154f296b 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -26,7 +26,8 @@
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
- assert_frame_equal)
+ assert_frame_equal,
+ ensure_clean)
from pandas.util import py3compat
from pandas.util.compat import OrderedDict
@@ -4405,66 +4406,67 @@ def test_float_none_comparison(self):
self.assertRaises(TypeError, df.__eq__, None)
def test_to_csv_from_csv(self):
- path = '__tmp_to_csv_from_csv__'
- self.frame['A'][:5] = nan
+ pname = '__tmp_to_csv_from_csv__'
+ with ensure_clean(pname) as path:
- self.frame.to_csv(path)
- self.frame.to_csv(path, cols=['A', 'B'])
- self.frame.to_csv(path, header=False)
- self.frame.to_csv(path, index=False)
+ self.frame['A'][:5] = nan
- # test roundtrip
+ self.frame.to_csv(path)
+ self.frame.to_csv(path, cols=['A', 'B'])
+ self.frame.to_csv(path, header=False)
+ self.frame.to_csv(path, index=False)
- self.tsframe.to_csv(path)
- recons = DataFrame.from_csv(path)
+ # test roundtrip
+ self.tsframe.to_csv(path)
+ recons = DataFrame.from_csv(path)
- assert_frame_equal(self.tsframe, recons)
+ assert_frame_equal(self.tsframe, recons)
- self.tsframe.to_csv(path, index_label='index')
- recons = DataFrame.from_csv(path, index_col=None)
- assert(len(recons.columns) == len(self.tsframe.columns) + 1)
+ self.tsframe.to_csv(path, index_label='index')
+ recons = DataFrame.from_csv(path, index_col=None)
+ assert(len(recons.columns) == len(self.tsframe.columns) + 1)
- # no index
- self.tsframe.to_csv(path, index=False)
- recons = DataFrame.from_csv(path, index_col=None)
- assert_almost_equal(self.tsframe.values, recons.values)
+ # no index
+ self.tsframe.to_csv(path, index=False)
+ recons = DataFrame.from_csv(path, index_col=None)
+ assert_almost_equal(self.tsframe.values, recons.values)
- # corner case
- dm = DataFrame({'s1': Series(range(3), range(3)),
- 's2': Series(range(2), range(2))})
- dm.to_csv(path)
- recons = DataFrame.from_csv(path)
- assert_frame_equal(dm, recons)
-
- # duplicate index
- df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
- columns=['x', 'y', 'z'])
- df.to_csv(path)
- result = DataFrame.from_csv(path)
- assert_frame_equal(result, df)
+ # corner case
+ dm = DataFrame({'s1': Series(range(3), range(3)),
+ 's2': Series(range(2), range(2))})
+ dm.to_csv(path)
+ recons = DataFrame.from_csv(path)
+ assert_frame_equal(dm, recons)
- midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
- df = DataFrame(np.random.randn(3, 3), index=midx,
- columns=['x', 'y', 'z'])
- df.to_csv(path)
- result = DataFrame.from_csv(path, index_col=[0, 1, 2],
- parse_dates=False)
- assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ?
-
- # column aliases
- col_aliases = Index(['AA', 'X', 'Y', 'Z'])
- self.frame2.to_csv(path, header=col_aliases)
- rs = DataFrame.from_csv(path)
- xp = self.frame2.copy()
- xp.columns = col_aliases
+ with ensure_clean(pname) as path:
- assert_frame_equal(xp, rs)
+ # duplicate index
+ df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
+ columns=['x', 'y', 'z'])
+ df.to_csv(path)
+ result = DataFrame.from_csv(path)
+ assert_frame_equal(result, df)
+
+ midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
+ df = DataFrame(np.random.randn(3, 3), index=midx,
+ columns=['x', 'y', 'z'])
+ df.to_csv(path)
+ result = DataFrame.from_csv(path, index_col=[0, 1, 2],
+ parse_dates=False)
+ assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ?
- self.assertRaises(ValueError, self.frame2.to_csv, path,
- header=['AA', 'X'])
+ # column aliases
+ col_aliases = Index(['AA', 'X', 'Y', 'Z'])
+ self.frame2.to_csv(path, header=col_aliases)
+ rs = DataFrame.from_csv(path)
+ xp = self.frame2.copy()
+ xp.columns = col_aliases
- os.remove(path)
+ assert_frame_equal(xp, rs)
+
+ self.assertRaises(ValueError, self.frame2.to_csv, path,
+ header=['AA', 'X'])
@slow
def test_to_csv_moar(self):
@@ -4472,14 +4474,10 @@ def test_to_csv_moar(self):
path = '__tmp_to_csv_moar__'
def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,
dupe_col=False):
- try:
+
+ with ensure_clean(path) as path:
df.to_csv(path,encoding='utf8')
recons = DataFrame.from_csv(path)
- finally:
- try:
- os.remove(path)
- except:
- pass
def _to_uni(x):
if not isinstance(x,unicode):
@@ -4584,119 +4582,107 @@ def stuple_to_tuple(x):
def test_to_csv_from_csv_w_some_infs(self):
- path = '__%s__' % tm.rands(10)
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
self.frame['H'] = self.frame.index.map(f)
- self.frame.to_csv(path)
- recons = DataFrame.from_csv(path)
-
- assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
- assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
+ with ensure_clean() as path:
+ self.frame.to_csv(path)
+ recons = DataFrame.from_csv(path)
- try:
- os.remove(path)
- except os.error:
- pass
+ assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
+ assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
- import tempfile
- path = tempfile.mktemp()
- path += '__tmp__'
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['E'] = np.inf
self.frame['F'] = -np.inf
- self.frame.to_csv(path)
- recons = DataFrame.from_csv(path)
+ with ensure_clean() as path:
+ self.frame.to_csv(path)
+ recons = DataFrame.from_csv(path)
- assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
- assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
-
- os.remove(path)
+ assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
+ assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_multiindex(self):
- path = '__tmp_to_csv_multiindex__'
+ pname = '__tmp_to_csv_multiindex__'
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
frame.index = new_index
- frame.to_csv(path, header=False)
- frame.to_csv(path, cols=['A', 'B'])
-
- # round trip
- frame.to_csv(path)
- df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)
-
- assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name
- self.assertEqual(frame.index.names, df.index.names)
- self.frame.index = old_index # needed if setUP becomes a classmethod
-
- # try multiindex with dates
- tsframe = self.tsframe
- old_index = tsframe.index
- new_index = [old_index, np.arange(len(old_index))]
- tsframe.index = MultiIndex.from_arrays(new_index)
-
- tsframe.to_csv(path, index_label=['time', 'foo'])
- recons = DataFrame.from_csv(path, index_col=[0, 1])
- assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name
- # do not load index
- tsframe.to_csv(path)
- recons = DataFrame.from_csv(path, index_col=None)
- np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2)
-
- # no index
- tsframe.to_csv(path, index=False)
- recons = DataFrame.from_csv(path, index_col=None)
- assert_almost_equal(recons.values, self.tsframe.values)
- self.tsframe.index = old_index # needed if setUP becomes classmethod
-
- os.remove(path)
-
- # empty
- tsframe[:0].to_csv(path)
- recons = DataFrame.from_csv(path)
- exp = tsframe[:0]
- exp.index = []
-
- self.assert_(recons.columns.equals(exp.columns))
- self.assert_(len(recons) == 0)
-
- os.remove(path)
+ with ensure_clean(pname) as path:
+ frame.to_csv(path, header=False)
+ frame.to_csv(path, cols=['A', 'B'])
+
+ # round trip
+ frame.to_csv(path)
+ df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)
+
+ assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name
+ self.assertEqual(frame.index.names, df.index.names)
+ self.frame.index = old_index # needed if setUP becomes a classmethod
+
+ # try multiindex with dates
+ tsframe = self.tsframe
+ old_index = tsframe.index
+ new_index = [old_index, np.arange(len(old_index))]
+ tsframe.index = MultiIndex.from_arrays(new_index)
+
+ tsframe.to_csv(path, index_label=['time', 'foo'])
+ recons = DataFrame.from_csv(path, index_col=[0, 1])
+ assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name
+
+ # do not load index
+ tsframe.to_csv(path)
+ recons = DataFrame.from_csv(path, index_col=None)
+ np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2)
+
+ # no index
+ tsframe.to_csv(path, index=False)
+ recons = DataFrame.from_csv(path, index_col=None)
+ assert_almost_equal(recons.values, self.tsframe.values)
+ self.tsframe.index = old_index # needed if setUP becomes classmethod
+
+ with ensure_clean(pname) as path:
+ # empty
+ tsframe[:0].to_csv(path)
+ recons = DataFrame.from_csv(path)
+ exp = tsframe[:0]
+ exp.index = []
+
+ self.assert_(recons.columns.equals(exp.columns))
+ self.assert_(len(recons) == 0)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
- pth = '__tmp_to_csv_float32_nanrep__.csv'
- df.to_csv(pth, na_rep=999)
+ with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:
+ df.to_csv(path, na_rep=999)
- lines = open(pth).readlines()
- self.assert_(lines[1].split(',')[2] == '999')
- os.remove(pth)
+ with open(path) as f:
+ lines = f.readlines()
+ self.assert_(lines[1].split(',')[2] == '999')
def test_to_csv_withcommas(self):
- path = '__tmp_to_csv_withcommas__'
# Commas inside fields should be correctly escaped when saving as CSV.
-
df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})
- df.to_csv(path)
- df2 = DataFrame.from_csv(path)
- assert_frame_equal(df2, df)
- os.remove(path)
+ with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:
+ df.to_csv(path)
+ df2 = DataFrame.from_csv(path)
+ assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
- filename = '__tmp_to_csv_mixed__.csv'
+
def create_cols(name):
return [ "%s%03d" % (name,i) for i in xrange(5) ]
@@ -4720,17 +4706,17 @@ def create_cols(name):
for c in create_cols(n):
dtypes[c] = dtype
- df.to_csv(filename)
-
- rs = pan.read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))
- assert_frame_equal(rs, df)
- os.remove(filename)
+ with ensure_clean() as filename:
+ df.to_csv(filename)
+ rs = pan.read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))
+ assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
- filename = '__tmp_to_csv_dup_cols__.csv'
df = DataFrame(np.random.randn(1000, 30),columns=range(15)+range(15),dtype='float64')
- df.to_csv(filename) # single dtype, fine
+
+ with ensure_clean() as filename:
+ df.to_csv(filename) # single dtype, fine
df_float = DataFrame(np.random.randn(1000, 30),dtype='float64')
df_int = DataFrame(np.random.randn(1000, 30),dtype='int64')
@@ -4740,48 +4726,45 @@ def test_to_csv_dups_cols(self):
df = pan.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
#### this raises because we have duplicate column names across dtypes ####
- self.assertRaises(Exception, df.to_csv, filename)
+ with ensure_clean() as filename:
+ self.assertRaises(Exception, df.to_csv, filename)
def test_to_csv_chunking(self):
- filename = '__tmp_to_csv_chunking__.csv'
aa=DataFrame({'A':range(100000)})
-
aa['B'] = aa.A + 1.0
aa['C'] = aa.A + 2.0
aa['D'] = aa.A + 3.0
for chunksize in [10000,50000,100000]:
- aa.to_csv(filename,chunksize=chunksize)
- rs = pan.read_csv(filename,index_col=0)
- assert_frame_equal(rs, aa)
-
- os.remove(filename)
+ with ensure_clean() as filename:
+ aa.to_csv(filename,chunksize=chunksize)
+ rs = pan.read_csv(filename,index_col=0)
+ assert_frame_equal(rs, aa)
def test_to_csv_bug(self):
- path = '__tmp_to_csv_bug__.csv'
f1 = StringIO('a,1.0\nb,2.0')
df = DataFrame.from_csv(f1, header=None)
newdf = DataFrame({'t': df[df.columns[0]]})
- newdf.to_csv(path)
- recons = pan.read_csv(path, index_col=0)
- assert_frame_equal(recons, newdf, check_names=False) # don't check_names as t != 1
+ with ensure_clean() as path:
+ newdf.to_csv(path)
- os.remove(path)
+ recons = pan.read_csv(path, index_col=0)
+ assert_frame_equal(recons, newdf, check_names=False) # don't check_names as t != 1
def test_to_csv_unicode(self):
- path = '__tmp_to_csv_unicode__.csv'
- df = DataFrame({u'c/\u03c3': [1, 2, 3]})
- df.to_csv(path, encoding='UTF-8')
- df2 = pan.read_csv(path, index_col=0, encoding='UTF-8')
- assert_frame_equal(df, df2)
- df.to_csv(path, encoding='UTF-8', index=False)
- df2 = pan.read_csv(path, index_col=None, encoding='UTF-8')
- assert_frame_equal(df, df2)
+ df = DataFrame({u'c/\u03c3': [1, 2, 3]})
+ with ensure_clean() as path:
+
+ df.to_csv(path, encoding='UTF-8')
+ df2 = pan.read_csv(path, index_col=0, encoding='UTF-8')
+ assert_frame_equal(df, df2)
- os.remove(path)
+ df.to_csv(path, encoding='UTF-8', index=False)
+ df2 = pan.read_csv(path, index_col=None, encoding='UTF-8')
+ assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO('')
@@ -4805,18 +4788,20 @@ def test_to_csv_stringio(self):
assert_frame_equal(recons, self.frame, check_names=False) # TODO to_csv drops column name
def test_to_csv_float_format(self):
- filename = '__tmp_to_csv_float_format__.csv'
+
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
- df.to_csv(filename, float_format='%.2f')
- rs = pan.read_csv(filename, index_col=0)
- xp = DataFrame([[0.12, 0.23, 0.57],
- [12.32, 123123.20, 321321.20]],
- index=['A', 'B'], columns=['X', 'Y', 'Z'])
- assert_frame_equal(rs, xp)
- os.remove(filename)
+ with ensure_clean() as filename:
+
+ df.to_csv(filename, float_format='%.2f')
+
+ rs = pan.read_csv(filename, index_col=0)
+ xp = DataFrame([[0.12, 0.23, 0.57],
+ [12.32, 123123.20, 321321.20]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+ assert_frame_equal(rs, xp)
def test_to_csv_quoting(self):
import csv
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 89cc407daf3f4..015a2ff9379b9 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -7,6 +7,7 @@
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
import pandas.util.testing as tm
+from pandas.util.testing import ensure_clean
import numpy as np
@@ -692,9 +693,6 @@ def test_grouped_hist(self):
for ax in axes.ravel():
self.assert_(len(ax.patches) > 0)
-PNG_PATH = 'tmp.png'
-
-
def _check_plot_works(f, *args, **kwargs):
import matplotlib.pyplot as plt
@@ -711,9 +709,9 @@ def _check_plot_works(f, *args, **kwargs):
assert(ret is not None) # do something more intelligent
except Exception:
pass
- plt.savefig(PNG_PATH)
- os.remove(PNG_PATH)
+ with ensure_clean() as path:
+ plt.savefig(path)
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index d857e999bdd33..84f5f3afab6db 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -18,7 +18,8 @@
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
assert_series_equal,
- assert_almost_equal)
+ assert_almost_equal,
+ ensure_clean)
import pandas.core.panel as panelm
import pandas.util.testing as tm
@@ -1317,12 +1318,12 @@ def test_to_excel(self):
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
- self.panel.to_excel(path)
- reader = ExcelFile(path)
- for item, df in self.panel.iterkv():
- recdf = reader.parse(str(item), index_col=0)
- assert_frame_equal(df, recdf)
- os.remove(path)
+ with ensure_clean(path) as path:
+ self.panel.to_excel(path)
+ reader = ExcelFile(path)
+ for item, df in self.panel.iterkv():
+ recdf = reader.parse(str(item), index_col=0)
+ assert_frame_equal(df, recdf)
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 5bb452deb1d4d..5981640b4159c 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -1043,30 +1043,9 @@ def test_group_agg(self):
def test_from_frame_level1_unsorted(self):
raise nose.SkipTest
- # tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2),
- # ('AAPL', 1), ('MSFT', 1)]
- # midx = MultiIndex.from_tuples(tuples)
- # df = DataFrame(np.random.rand(5,4), index=midx)
- # p = df.to_panel()
- # assert_frame_equal(p.minor_xs(2), df.ix[:,2].sort_index())
def test_to_excel(self):
raise nose.SkipTest
- # try:
- # import xlwt
- # import xlrd
- # import openpyxl
- # except ImportError:
- # raise nose.SkipTest
-
- # for ext in ['xls', 'xlsx']:
- # path = '__tmp__.' + ext
- # self.panel.to_excel(path)
- # reader = ExcelFile(path)
- # for item, df in self.panel.iteritems():
- # recdf = reader.parse(str(item),index_col=0)
- # assert_frame_equal(df, recdf)
- # os.remove(path)
if __name__ == '__main__':
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 74b41f4ef1cd7..20c57ebbd0db6 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -24,7 +24,9 @@
from pandas.util.py3compat import StringIO
from pandas.util import py3compat
-from pandas.util.testing import assert_series_equal, assert_almost_equal
+from pandas.util.testing import (assert_series_equal,
+ assert_almost_equal,
+ ensure_clean)
import pandas.util.testing as tm
@@ -178,10 +180,11 @@ def test_pickle_preserve_name(self):
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip_name(self, obj):
- obj.save('__tmp_name__')
- unpickled = Series.load('__tmp_name__')
- os.remove('__tmp_name__')
- return unpickled
+
+ with ensure_clean() as path:
+ obj.save(path)
+ unpickled = Series.load(path)
+ return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
@@ -610,10 +613,11 @@ def test_pickle(self):
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
- obj.save('__tmp_pickle_roundtrip__')
- unpickled = Series.load('__tmp_pickle_roundtrip__')
- os.remove('__tmp_pickle_roundtrip__')
- return unpickled
+
+ with ensure_clean() as path:
+ obj.save(path)
+ unpickled = Series.load(path)
+ return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
@@ -2557,43 +2561,42 @@ def test_rank(self):
assert_series_equal(iranks, exp)
def test_from_csv(self):
- path = '_foo_from_csv'
- self.ts.to_csv(path)
- ts = Series.from_csv(path)
- assert_series_equal(self.ts, ts)
- self.assertTrue(ts.index.name is None)
-
- self.series.to_csv(path)
- series = Series.from_csv(path)
- self.assert_(series.name is None)
- self.assert_(series.index.name is None)
- assert_series_equal(self.series, series)
-
- outfile = open(path, 'w')
- outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
- outfile.close()
- series = Series.from_csv(path, sep='|')
- checkseries = Series(
- {datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0})
- assert_series_equal(checkseries, series)
-
- series = Series.from_csv(path, sep='|', parse_dates=False)
- checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
- assert_series_equal(checkseries, series)
-
- os.remove(path)
+
+ with ensure_clean() as path:
+ self.ts.to_csv(path)
+ ts = Series.from_csv(path)
+ assert_series_equal(self.ts, ts)
+ self.assertTrue(ts.index.name is None)
+
+ self.series.to_csv(path)
+ series = Series.from_csv(path)
+ self.assert_(series.name is None)
+ self.assert_(series.index.name is None)
+ assert_series_equal(self.series, series)
+
+ outfile = open(path, 'w')
+ outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
+ outfile.close()
+ series = Series.from_csv(path, sep='|')
+ checkseries = Series(
+ {datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0})
+ assert_series_equal(checkseries, series)
+
+ series = Series.from_csv(path, sep='|', parse_dates=False)
+ checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
+ assert_series_equal(checkseries, series)
def test_to_csv(self):
- self.ts.to_csv('_foo')
- lines = open('_foo', 'U').readlines()
- assert(lines[1] != '\n')
+ with ensure_clean() as path:
+ self.ts.to_csv(path)
- self.ts.to_csv('_foo', index=False)
- arr = np.loadtxt('_foo')
- assert_almost_equal(arr, self.ts.values)
+ lines = open(path, 'U').readlines()
+ assert(lines[1] != '\n')
- os.remove('_foo')
+ self.ts.to_csv(path, index=False)
+ arr = np.loadtxt(path)
+ assert_almost_equal(arr, self.ts.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
@@ -2620,14 +2623,14 @@ def test_to_dict(self):
self.assert_(np.array_equal(Series(self.ts.to_dict()), self.ts))
def test_to_csv_float_format(self):
- filename = '__tmp__.csv'
- ser = Series([0.123456, 0.234567, 0.567567])
- ser.to_csv(filename, float_format='%.2f')
- rs = Series.from_csv(filename)
- xp = Series([0.12, 0.23, 0.57])
- assert_series_equal(rs, xp)
- os.remove(filename)
+ with ensure_clean() as filename:
+ ser = Series([0.123456, 0.234567, 0.567567])
+ ser.to_csv(filename, float_format='%.2f')
+
+ rs = Series.from_csv(filename)
+ xp = Series([0.12, 0.23, 0.57])
+ assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = Series(['jack and jill', 'jesse and frank'])
diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index 69e9651258340..ec07077c7b5ea 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -15,7 +15,7 @@
from pandas.tseries.period import period_range, Period, PeriodIndex
from pandas.tseries.resample import DatetimeIndex
-from pandas.util.testing import assert_series_equal
+from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.util.testing as tm
@@ -933,9 +933,6 @@ def test_mpl_nopandas(self):
assert_array_equal(np.array([x.toordinal() for x in dates]),
line2.get_xydata()[:, 0])
-PNG_PATH = 'tmp.png'
-
-
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
@@ -966,8 +963,9 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
assert(ret is not None) # do something more intelligent
except Exception:
pass
- plt.savefig(PNG_PATH)
- os.remove(PNG_PATH)
+
+ with ensure_path() as path:
+ plt.savefig(path)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 758629a4293b2..bcd3fb6a35cb6 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -6,6 +6,7 @@
import random
import string
import sys
+import tempfile
from contextlib import contextmanager # contextlib is available since 2.5
@@ -74,6 +75,24 @@ def set_trace():
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
+#------------------------------------------------------------------------------
+# contextmanager to ensure the file cleanup
+from contextlib import contextmanager
+@contextmanager
+def ensure_clean(filename = None):
+ # if we are not passed a filename, generate a temporary
+ if filename is None:
+ filename = tempfile.mkstemp()[1]
+
+ try:
+ yield filename
+ finally:
+ import os
+ try:
+ os.remove(filename)
+ except:
+ pass
+
#------------------------------------------------------------------------------
# Comparators
| avoids leaving temp files around if they are interrupted (during dev),
passes thru exceptions
idiom is:
```
with ensure_clean(path=None) as path:
.....
```
rather than
```
try:
....
finally:
os.remove(path)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3103 | 2013-03-20T12:36:42Z | 2013-03-20T14:44:44Z | 2013-03-20T14:44:44Z | 2013-03-20T14:44:44Z |
BLD: fix time module import conflict | diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 7a5bb0f569349..950dddde06601 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -5,7 +5,12 @@ from numpy cimport (int32_t, int64_t, import_array, ndarray,
NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA)
import numpy as np
-from cpython cimport *
+from cpython cimport (
+ PyTypeObject,
+ PyFloat_Check,
+ PyObject_RichCompareBool,
+ PyString_Check
+)
# Cython < 0.17 doesn't have this in cpython
cdef extern from "Python.h":
@@ -909,12 +914,12 @@ def array_to_timedelta64(ndarray[object] values, coerce=True):
val = _delta_to_nanoseconds(np.timedelta64(val).item())
result[i] = val
-
+
elif util._checknull(val) or val == iNaT or val is NaT:
result[i] = iNaT
else:
-
+
# just return, don't convert
if not coerce:
return values.copy()
@@ -925,7 +930,7 @@ def array_to_timedelta64(ndarray[object] values, coerce=True):
def repr_timedelta64(object value):
""" provide repr for timedelta64 """
-
+
ivalue = value.view('i8')
# put frac in seconds
@@ -1009,9 +1014,9 @@ def array_strptime(ndarray[object] values, object fmt):
result = np.empty(n, dtype='M8[ns]')
iresult = result.view('i8')
-
+
dts.us = dts.ps = dts.as = 0
-
+
cdef dict _parse_code_table = {
'y': 0,
'Y': 1,
| See #3097.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3098 | 2013-03-19T21:21:38Z | 2013-03-20T00:30:53Z | 2013-03-20T00:30:53Z | 2013-03-20T00:31:03Z |
PERF: added convert=boolean to take to enable negative index conversion | diff --git a/RELEASE.rst b/RELEASE.rst
index 4740c29de1b14..3dbfa080021e3 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -44,6 +44,8 @@ pandas 0.11.0
- Moved functionaility from ``irow,icol,iget_value/iset_value`` to ``.iloc`` indexer
(via ``_ixs`` methods in each object)
- Added support for expression evaluation using the ``numexpr`` library
+ - Added ``convert=boolean`` to ``take`` routines to translate negative indices to positive,
+ defaults to True
**Improvements to existing features**
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 89ca0087d8d8f..6ef2ad642612c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1854,8 +1854,7 @@ def _ixs(self, i, axis=0, copy=False):
else:
label = self.columns[i]
if isinstance(label, Index):
-
- return self.take(i, axis=1)
+ return self.take(i, axis=1, convert=True)
values = self._data.iget(i)
return self._col_klass.from_array(values, index=self.index,
@@ -1907,10 +1906,10 @@ def _getitem_array(self, key):
# be reindexed to match DataFrame rows
key = _check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
- return self.take(indexer, axis=0)
+ return self.take(indexer, axis=0, convert=False)
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
- return self.take(indexer, axis=1)
+ return self.take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
@@ -2242,9 +2241,9 @@ def xs(self, key, axis=0, level=None, copy=True):
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
- return self.take(inds, axis=axis)
+ return self.take(inds, axis=axis, convert=False)
else:
- return self.take(loc, axis=axis)
+ return self.take(loc, axis=axis, convert=True)
if not np.isscalar(loc):
new_index = self.index[loc]
@@ -2820,7 +2819,7 @@ def _maybe_cast(values):
delevel = deprecate('delevel', reset_index)
- def take(self, indices, axis=0):
+ def take(self, indices, axis=0, convert=True):
"""
Analogous to ndarray.take, return DataFrame corresponding to requested
indices along an axis
@@ -2829,6 +2828,8 @@ def take(self, indices, axis=0):
----------
indices : list / array of ints
axis : {0, 1}
+ convert : convert indices for negative values, check bounds, default True
+ mainly useful for an user routine calling
Returns
-------
@@ -2836,7 +2837,8 @@ def take(self, indices, axis=0):
"""
# check/convert indicies here
- indices = _maybe_convert_indices(indices, len(self._get_axis(axis)))
+ if convert:
+ indices = _maybe_convert_indices(indices, len(self._get_axis(axis)))
if self._is_mixed_type:
if axis == 0:
@@ -2950,7 +2952,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None):
else:
raise ValueError('must specify how or thresh')
- return self.take(mask.nonzero()[0], axis=axis)
+ return self.take(mask.nonzero()[0], axis=axis, convert=False)
def drop_duplicates(self, cols=None, take_last=False, inplace=False):
"""
@@ -3141,7 +3143,7 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False):
" from pandas 0.11 onward", FutureWarning)
return self
else:
- return self.take(indexer, axis=axis)
+ return self.take(indexer, axis=axis, convert=False)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False):
"""
@@ -3187,7 +3189,7 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False):
" from pandas 0.11 onward", FutureWarning)
return self
else:
- return self.take(indexer, axis=axis)
+ return self.take(indexer, axis=axis, convert=False)
def swaplevel(self, i, j, axis=0):
"""
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2c5c8c4d088be..d1c2db67713d4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -189,7 +189,7 @@ def at_time(self, time, asof=False):
"""
try:
indexer = self.index.indexer_at_time(time, asof=asof)
- return self.take(indexer)
+ return self.take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
@@ -213,7 +213,7 @@ def between_time(self, start_time, end_time, include_start=True,
indexer = self.index.indexer_between_time(
start_time, end_time, include_start=include_start,
include_end=include_end)
- return self.take(indexer)
+ return self.take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
@@ -934,7 +934,7 @@ def rename_axis(self, mapper, axis=0, copy=True):
return self._constructor(new_data)
- def take(self, indices, axis=0):
+ def take(self, indices, axis=0, convert=True):
"""
Analogous to ndarray.take
@@ -942,6 +942,7 @@ def take(self, indices, axis=0):
----------
indices : list / array of ints
axis : int, default 0
+ convert : translate neg to pos indices (default)
Returns
-------
@@ -949,7 +950,8 @@ def take(self, indices, axis=0):
"""
# check/convert indicies here
- indices = _maybe_convert_indices(indices, len(self._get_axis(axis)))
+ if convert:
+ indices = _maybe_convert_indices(indices, len(self._get_axis(axis)))
if axis == 0:
labels = self._get_axis(axis)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index cab9e967519de..16259fd39c0a9 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -391,7 +391,7 @@ def _reindex(keys, level=None):
if com._is_bool_indexer(key):
key = _check_bool_indexer(labels, key)
inds, = key.nonzero()
- return self.obj.take(inds, axis=axis)
+ return self.obj.take(inds, axis=axis, convert=False)
else:
if isinstance(key, Index):
# want Index objects to pass through untouched
@@ -408,7 +408,7 @@ def _reindex(keys, level=None):
if labels.inferred_type == 'mixed-integer':
indexer = labels.get_indexer(keyarr)
if (indexer >= 0).all():
- self.obj.take(indexer, axis=axis)
+ self.obj.take(indexer, axis=axis, convert=True)
else:
return self.obj.take(keyarr, axis=axis)
elif not labels.inferred_type == 'integer':
@@ -426,7 +426,7 @@ def _reindex(keys, level=None):
return _reindex(keyarr, level=level)
else:
mask = labels.isin(keyarr)
- return self.obj.take(mask.nonzero()[0], axis=axis)
+ return self.obj.take(mask.nonzero()[0], axis=axis, convert=False)
def _convert_to_indexer(self, obj, axis=0):
"""
@@ -644,7 +644,7 @@ def _getbool_axis(self, key, axis=0):
key = _check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
- return self.obj.take(inds, axis=axis)
+ return self.obj.take(inds, axis=axis, convert=False)
except (Exception), detail:
raise self._exception(detail)
def _get_slice_axis(self, slice_obj, axis=0):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index fcefbbe216aa3..0c006d4c60904 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2550,7 +2550,7 @@ def reindex_like(self, other, method=None, limit=None, fill_value=pa.NA):
return self.reindex(other.index, method=method, limit=limit,
fill_value=fill_value)
- def take(self, indices, axis=0):
+ def take(self, indices, axis=0, convert=True):
"""
Analogous to ndarray.take, return Series corresponding to requested
indices
@@ -2558,6 +2558,7 @@ def take(self, indices, axis=0):
Parameters
----------
indices : list / array of ints
+ convert : translate negative to positive indices (default)
Returns
-------
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index 6e003d5a032db..b2dbca70f3b77 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -10,7 +10,7 @@
from pandas.core.common import _pickle_array, _unpickle_array, _try_sort
from pandas.core.index import Index, MultiIndex, _ensure_index
-from pandas.core.indexing import _check_slice_bounds
+from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
from pandas.core.series import Series
from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,
_default_index)
@@ -634,7 +634,7 @@ def _rename_columns_inplace(self, mapper):
self.columns = new_columns
self._series = new_series
- def take(self, indices, axis=0):
+ def take(self, indices, axis=0, convert=True):
"""
Analogous to ndarray.take, return SparseDataFrame corresponding to
requested indices along an axis
@@ -643,12 +643,20 @@ def take(self, indices, axis=0):
----------
indices : list / array of ints
axis : {0, 1}
+ convert : convert indices for negative values, check bounds, default True
+ mainly useful for an user routine calling
Returns
-------
taken : SparseDataFrame
"""
+
indices = com._ensure_platform_int(indices)
+
+ # check/convert indicies here
+ if convert:
+ indices = _maybe_convert_indices(indices, len(self._get_axis(axis)))
+
new_values = self.values.take(indices, axis=axis)
if axis == 0:
new_columns = self.columns
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index 5ed86b5f1c746..b799188170e6f 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -468,7 +468,7 @@ def fillna(self, value=None, method=None, inplace=False, limit=None):
else:
return result
- def take(self, indices, axis=0):
+ def take(self, indices, axis=0, convert=True):
"""
Sparse-compatible version of ndarray.take
| mainly for user facing routines (defaults to True)
was causing a perf regression (see #3089)
#3033 will 'fix' this as will create a new internal routine to do takes
(and make DataFrame/take) user facing only
| https://api.github.com/repos/pandas-dev/pandas/pulls/3093 | 2013-03-19T18:51:23Z | 2013-03-19T18:51:33Z | 2013-03-19T18:51:33Z | 2013-03-19T23:54:32Z |
ENH: better errors message on storage failure in HDFStore | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 4d99ebdce86dc..0a55d78dd24c3 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -99,6 +99,12 @@ Grouping
`Create multiple aggregated columns
<http://stackoverflow.com/questions/14897100/create-multiple-columns-in-pandas-aggregation-function>`__
+Expanding Data
+~~~~~~~~~~~~~~
+
+`Alignment and to-date
+<http://stackoverflow.com/questions/15489011/python-time-series-alignment-and-to-date-functions>`__
+
Splitting
~~~~~~~~~
@@ -171,8 +177,8 @@ SQL
`Reading from databases with SQL
<http://stackoverflow.com/questions/10065051/python-pandas-and-databases-like-mysql>`__
-HDF5
-~~~~
+HDFStore
+~~~~~~~~
`Simple Queries with a Timestamp Index
<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__
@@ -186,6 +192,9 @@ HDF5
`Large Data work flows
<http://stackoverflow.com/questions/14262433/large-data-work-flows-using-pandas>`__
+`Troubleshoot HDFStore exceptions
+<http://stackoverflow.com/questions/15488809/how-to-trouble-shoot-hdfstore-exception-cannot-find-the-correct-atom-type>`__
+
Storing Attributes to a group node
.. ipython:: python
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index ca2e3b6e04f19..929d9182f35a9 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -18,7 +18,7 @@
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.tseries.api import PeriodIndex, DatetimeIndex
-from pandas.core.common import adjoin
+from pandas.core.common import adjoin, isnull
from pandas.core.algorithms import match, unique, factorize
from pandas.core.categorical import Categorical
from pandas.core.common import _asarray_tuplesafe, _try_sort
@@ -727,8 +727,8 @@ def _create_storer(self, group, value = None, table = False, append = False, **k
""" return a suitable Storer class to operate """
def error(t):
- raise Exception("cannot properly create the storer for: [%s] [group->%s,value->%s,table->%s,append->%s,kwargs->%s]" %
- (t,group,type(value),table,append,kwargs))
+ raise NotImplementedError("cannot properly create the storer for: [%s] [group->%s,value->%s,table->%s,append->%s,kwargs->%s]" %
+ (t,group,type(value),table,append,kwargs))
pt = getattr(group._v_attrs,'pandas_type',None)
tt = getattr(group._v_attrs,'table_type',None)
@@ -768,7 +768,12 @@ def error(t):
if value is not None:
if pt == 'frame_table':
- tt = 'appendable_frame' if value.index.nlevels == 1 else 'appendable_multiframe'
+ index = getattr(value,'index',None)
+ if index is not None:
+ if index.nlevels == 1:
+ tt = 'appendable_frame'
+ elif index.nlevels > 1:
+ tt = 'appendable_multiframe'
elif pt == 'wide_table':
tt = 'appendable_panel'
elif pt == 'ndim_table':
@@ -1187,7 +1192,23 @@ def get_atom_string(self, block, itemsize):
def set_atom_string(self, block, existing_col, min_itemsize, nan_rep):
# fill nan items with myself
- data = block.fillna(nan_rep).values
+ block = block.fillna(nan_rep)
+ data = block.values
+
+ # see if we have a valid string type
+ inferred_type = lib.infer_dtype(data.ravel())
+ if inferred_type != 'string':
+
+ # we cannot serialize this data, so report an exception on a column by column basis
+ for item in block.items:
+
+ col = block.get(item)
+ inferred_type = lib.infer_dtype(col.ravel())
+ if inferred_type != 'string':
+ raise NotImplementedError("cannot serialize the column [%s] because "
+ "its data contents are [%s] object dtype" %
+ (item,inferred_type))
+
# itemsize is the maximum length of a string (along any dimension)
itemsize = lib.max_len_string_array(data.ravel())
@@ -2234,7 +2255,11 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
# set the default axes if needed
if axes is None:
- axes = _AXES_MAP[type(obj)]
+ try:
+ axes = _AXES_MAP[type(obj)]
+ except:
+ raise NotImplementedError("cannot properly create the storer for: [group->%s,value->%s]" %
+ (self.group._v_name,type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
@@ -2251,7 +2276,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
- raise Exception("currenctly only support ndim-1 indexers in an AppendableTable")
+ raise Exception("currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
@@ -2335,10 +2360,18 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
name = b.items[0]
self.data_columns.append(name)
- try:
- existing_col = existing_table.values_axes[
- i] if existing_table is not None and validate else None
+ # make sure that we match up the existing columns
+ # if we have an existing table
+ if existing_table is not None and validate:
+ try:
+ existing_col = existing_table.values_axes[i]
+ except:
+ raise Exception("Incompatible appended table [%s] with existing table [%s]" %
+ (blocks,existing_table.values_axes))
+ else:
+ existing_col = None
+ try:
col = klass.create_for_block(
i=i, name=name, version=self.version)
col.set_atom(block=b,
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 8cf40a77d639f..e2d1d75e69329 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1002,6 +1002,44 @@ def test_append_misc(self):
result = store.select('df1')
tm.assert_frame_equal(result, df)
+ def test_append_raise(self):
+
+ with ensure_clean(self.path) as store:
+
+ # test append with invalid input to get good error messages
+
+ # list in column
+ df = tm.makeDataFrame()
+ df['invalid'] = [['a']] * len(df)
+ self.assert_(df.dtypes['invalid'] == np.object_)
+ self.assertRaises(NotImplementedError, store.append,'df',df)
+
+ # multiple invalid columns
+ df['invalid2'] = [['a']] * len(df)
+ df['invalid3'] = [['a']] * len(df)
+ self.assertRaises(NotImplementedError, store.append,'df',df)
+
+ # datetime with embedded nans as object
+ df = tm.makeDataFrame()
+ s = Series(datetime.datetime(2001,1,2),index=df.index,dtype=object)
+ s[0:5] = np.nan
+ df['invalid'] = s
+ self.assert_(df.dtypes['invalid'] == np.object_)
+ self.assertRaises(NotImplementedError, store.append,'df', df)
+
+ # directy ndarray
+ self.assertRaises(NotImplementedError, store.append,'df',np.arange(10))
+
+ # series directly
+ self.assertRaises(NotImplementedError, store.append,'df',Series(np.arange(10)))
+
+ # appending an incompatbile table
+ df = tm.makeDataFrame()
+ store.append('df',df)
+
+ df['foo'] = 'foo'
+ self.assertRaises(Exception, store.append,'df',df)
+
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
| https://api.github.com/repos/pandas-dev/pandas/pulls/3088 | 2013-03-19T14:30:04Z | 2013-03-19T14:30:11Z | 2013-03-19T14:30:11Z | 2014-06-26T17:54:21Z | |
Resample docstring default values changed | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2c5c8c4d088be..970c345d04194 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -231,9 +231,9 @@ def resample(self, rule, how=None, axis=0, fill_method=None,
downsampling
axis : int, optional, default 0
fill_method : string, fill_method for upsampling, default None
- closed : {'right', 'left'}, default None
+ closed : {'right', 'left'}
Which side of bin interval is closed
- label : {'right', 'left'}, default None
+ label : {'right', 'left'}
Which bin edge label to label bucket with
convention : {'start', 'end', 's', 'e'}
kind: "period"/"timestamp"
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index fbfbbc7ccdd13..b20303efe222f 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -24,8 +24,8 @@ class TimeGrouper(CustomGrouper):
Parameters
----------
freq : pandas date offset or offset alias for identifying bin edges
- closed : closed end of interval; left (default) or right
- label : interval boundary to use for labeling; left (default) or right
+ closed : closed end of interval; left or right
+ label : interval boundary to use for labeling; left or right
nperiods : optional, integer
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
| This docstring seems better, since TimeGrouper specifies it [that way](https://github.com/pydata/pandas/blob/master/pandas/tseries/resample.py#L27) too:
| https://api.github.com/repos/pandas-dev/pandas/pulls/3085 | 2013-03-18T20:30:09Z | 2013-03-25T15:34:30Z | 2013-03-25T15:34:30Z | 2014-07-09T07:31:43Z |
Added count of columns when DataFrame is printed out | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 40a6b27ee32a8..52f92acfd340d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1651,7 +1651,7 @@ def info(self, verbose=True, buf=None, max_cols=None):
max_cols = get_option('display.max_info_columns')
if verbose and len(self.columns) <= max_cols:
- lines.append('Data columns:')
+ lines.append('Data columns (total %d columns):' % len(self.columns))
space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4
counts = self.count()
if len(cols) != len(counts):
| https://api.github.com/repos/pandas-dev/pandas/pulls/3084 | 2013-03-18T19:25:47Z | 2013-03-18T21:18:40Z | 2013-03-18T21:18:39Z | 2013-03-18T21:18:48Z | |
DOC/CLN: cookbook addition, minor cleaning in internals.py | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index a83bca267213f..4d99ebdce86dc 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -99,6 +99,12 @@ Grouping
`Create multiple aggregated columns
<http://stackoverflow.com/questions/14897100/create-multiple-columns-in-pandas-aggregation-function>`__
+Splitting
+~~~~~~~~~
+
+`Splitting a frame
+<http://stackoverflow.com/questions/13353233/best-way-to-split-a-dataframe-given-an-edge/15449992#15449992>`__
+
Timeseries
----------
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 59f750d4570ad..4cbb09378266f 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -795,14 +795,6 @@ def __nonzero__(self):
def ndim(self):
return len(self.axes)
- def is_mixed_dtype(self):
- counts = set()
- for block in self.blocks:
- counts.add(block.dtype)
- if len(counts) > 1:
- return True
- return False
-
def set_axis(self, axis, value):
cur_axis = self.axes[axis]
value = _ensure_index(value)
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index f39a6d3b3feec..93e9b07558319 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -238,10 +238,10 @@ def test_attrs(self):
self.assertEquals(len(self.mgr), len(self.mgr.items))
def test_is_mixed_dtype(self):
- self.assert_(self.mgr.is_mixed_dtype())
+ self.assert_(self.mgr.is_mixed_type)
mgr = create_blockmanager([get_bool_ex(['a']), get_bool_ex(['b'])])
- self.assert_(not mgr.is_mixed_dtype())
+ self.assert_(not mgr.is_mixed_type)
def test_is_indexed_like(self):
self.assert_(self.mgr._is_indexed_like(self.mgr))
| https://api.github.com/repos/pandas-dev/pandas/pulls/3079 | 2013-03-18T01:26:10Z | 2013-03-18T14:10:00Z | 2013-03-18T14:10:00Z | 2013-03-18T14:10:00Z | |
ENH: support iteration on returned results in select and select_as_multiple in HDFStore | diff --git a/RELEASE.rst b/RELEASE.rst
index 7e0187df9f61d..45e82d4ef83ce 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -63,6 +63,12 @@ pandas 0.11.0
- Add ``axes`` property to ``Series`` for compatibility
- Add ``xs`` function to ``Series`` for compatibility
- Allow setitem in a frame where only mixed numerics are present (e.g. int and float), (GH3037_)
+ - ``HDFStore``
+
+ - Provide dotted attribute access to ``get`` from stores
+ (e.g. store.df == store['df'])
+ - New keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` are
+ provided to support iteration on ``select`` and ``select_as_multiple`` (GH3076_)
- In ``HDFStore``, provide dotted attribute access to ``get`` from stores
(e.g. ``store.df == store['df']``)
@@ -140,8 +146,6 @@ pandas 0.11.0
- Fix weird PyTables error when using too many selectors in a where
also correctly filter on any number of values in a Term expression
(so not using numexpr filtering, but isin filtering)
- - Provide dotted attribute access to ``get`` from stores
- (e.g. store.df == store['df'])
- Internally, change all variables to be private-like (now have leading
underscore)
- fixes for query parsing to correctly interpret boolean and != (GH2849_, GH2973_)
@@ -218,6 +222,7 @@ pandas 0.11.0
.. _GH2819: https://github.com/pydata/pandas/issues/2819
.. _GH2845: https://github.com/pydata/pandas/issues/2845
.. _GH2867: https://github.com/pydata/pandas/issues/2867
+.. _GH2803: https://github.com/pydata/pandas/issues/2803
.. _GH2807: https://github.com/pydata/pandas/issues/2807
.. _GH2849: https://github.com/pydata/pandas/issues/2849
.. _GH2850: https://github.com/pydata/pandas/issues/2850
@@ -238,7 +243,7 @@ pandas 0.11.0
.. _GH3037: https://github.com/pydata/pandas/issues/3037
.. _GH3041: https://github.com/pydata/pandas/issues/3041
.. _GH3053: https://github.com/pydata/pandas/issues/3053
-.. _GH2803: https://github.com/pydata/pandas/issues/2803
+.. _GH3076: https://github.com/pydata/pandas/issues/3076
pandas 0.10.1
diff --git a/doc/source/io.rst b/doc/source/io.rst
index c1b40c92529f4..c30b64d9ae07a 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1307,6 +1307,23 @@ you cannot change data columns (nor indexables) after the first
append/put operation (Of course you can simply read in the data and
create a new table!)
+Iterator
+~~~~~~~~
+
+Starting in 0.11, you can pass, ``iterator=True`` or ``chunksize=number_in_a_chunk``
+to ``select`` and ``select_as_multiple`` to return an iterator on the results.
+The default is 50,000 rows returned in a chunk.
+
+.. ipython:: python
+
+ for df in store.select('df', chunksize=3):
+ print df
+
+Note, that the chunksize keyword applies to the **returned** rows. So if you
+are doing a query, then that set will be subdivided and returned in the
+iterator. Keep in mind that if you do not pass a ``where`` selection criteria
+then the ``nrows`` of the table are considered.
+
Advanced Queries
~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt
index 487321b35ef99..328e14432e333 100644
--- a/doc/source/v0.11.0.txt
+++ b/doc/source/v0.11.0.txt
@@ -238,6 +238,9 @@ Enhancements
- In ``HDFStore``, provide dotted attribute access to ``get`` from stores
(e.g. ``store.df == store['df']``)
+ - In ``HDFStore``, new keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` are
+ provided to support iteration on ``select`` and ``select_as_multiple`` (GH3076_)
+
- ``Squeeze`` to possibly remove length 1 dimensions from an object.
.. ipython:: python
@@ -300,6 +303,7 @@ on GitHub for a complete list.
.. _GH2806: https://github.com/pydata/pandas/issues/2806
.. _GH2807: https://github.com/pydata/pandas/issues/2807
.. _GH2918: https://github.com/pydata/pandas/issues/2918
-.. _GH3011: https://github.com/pydata/pandas/issues/3011
-.. _GH2979: https://github.com/pydata/pandas/issues/2979
.. _GH2758: https://github.com/pydata/pandas/issues/2758
+.. _GH2979: https://github.com/pydata/pandas/issues/2979
+.. _GH3011: https://github.com/pydata/pandas/issues/3011
+.. _GH3076: https://github.com/pydata/pandas/issues/3076
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 6b3b36f231c1a..ca2e3b6e04f19 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -347,7 +347,7 @@ def get(self, key):
raise KeyError('No object named %s in the file' % key)
return self._read_group(group)
- def select(self, key, where=None, start=None, stop=None, columns=None, **kwargs):
+ def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
@@ -362,16 +362,30 @@ def select(self, key, where=None, start=None, stop=None, columns=None, **kwargs)
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return columns
+ iterator : boolean, return an iterator, default False
+ chunksize : nrows to include in iteration, return an iterator
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
- return self._read_group(group, where=where, start=start, stop=stop, columns=columns, **kwargs)
- def select_as_coordinates(self, key, where=None, **kwargs):
+ # create the storer and axes
+ s = self._create_storer(group)
+ s.infer_axes()
+
+ # what we are actually going to do for a chunk
+ def func(_start, _stop):
+ return s.read(where=where, start=_start, stop=_stop, columns=columns, **kwargs)
+
+ if iterator or chunksize is not None:
+ return TableIterator(func, nrows=s.nrows, start=start, stop=stop, chunksize=chunksize)
+
+ return TableIterator(func, nrows=s.nrows, start=start, stop=stop).get_values()
+
+ def select_as_coordinates(self, key, where=None, start=None, stop=None, **kwargs):
"""
- return the selection as a Coordinates. Note that start/stop/columns parematers are inapplicable here.
+ return the selection as a Coordinates.
Parameters
----------
@@ -380,8 +394,10 @@ def select_as_coordinates(self, key, where=None, **kwargs):
Optional Parameters
-------------------
where : list of Term (or convertable) objects, optional
+ start : integer (defaults to None), row number to start selection
+ stop : integer (defaults to None), row number to stop selection
"""
- return self.get_storer(key).read_coordinates(where = where, **kwargs)
+ return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs)
def unique(self, key, column, **kwargs):
"""
@@ -400,7 +416,7 @@ def unique(self, key, column, **kwargs):
"""
return self.get_storer(key).read_column(column = column, **kwargs)
- def select_as_multiple(self, keys, where=None, selector=None, columns=None, **kwargs):
+ def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, **kwargs):
""" Retrieve pandas objects from multiple tables
Parameters
@@ -408,6 +424,10 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, **kw
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0] if not supplied)
columns : the columns I want back
+ start : integer (defaults to None), row number to start selection
+ stop : integer (defaults to None), row number to stop selection
+ iterator : boolean, return an iterator, default False
+ chunksize : nrows to include in iteration, return an iterator
Exceptions
----------
@@ -418,7 +438,7 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, **kw
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, basestring):
- return self.select(key=keys, where=where, columns=columns, **kwargs)
+ return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, **kwargs)
if not isinstance(keys, (list, tuple)):
raise Exception("keys must be a list/tuple")
@@ -433,6 +453,8 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, **kw
tbls = [ self.get_storer(k) for k in keys ]
# validate rows
+ if tbls[0] is None:
+ raise Exception("no valid tables to select as multiple")
nrows = tbls[0].nrows
for t in tbls:
if t.nrows != nrows:
@@ -441,16 +463,25 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, **kw
raise Exception("object [%s] is not a table, and cannot be used in all select as multiple" % t.pathname)
# select coordinates from the selector table
- c = self.select_as_coordinates(selector, where)
+ c = self.select_as_coordinates(selector, where, start=start, stop=stop)
+ nrows = len(c)
+
+ def func(_start, _stop):
+
+ # collect the returns objs
+ objs = [t.read(where=c[_start:_stop], columns=columns) for t in tbls]
+
+ # axis is the concentation axes
+ axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]
- # collect the returns objs
- objs = [t.read(where=c, columns=columns) for t in tbls]
+ # concat and return
+ return concat(objs, axis=axis, verify_integrity=True)
- # axis is the concentation axes
- axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]
+ if iterator or chunksize is not None:
+ return TableIterator(func, nrows=nrows, start=start, stop=stop, chunksize=chunksize)
+
+ return TableIterator(func, nrows=nrows, start=start, stop=stop).get_values()
- # concat and return
- return concat(objs, axis=axis, verify_integrity=True)
def put(self, key, value, table=None, append=False, **kwargs):
"""
@@ -807,6 +838,49 @@ def _read_group(self, group, **kwargs):
s.infer_axes()
return s.read(**kwargs)
+class TableIterator(object):
+ """ define the iteration interface on a table
+
+ Parameters
+ ----------
+
+ func : the function to get results
+ nrows : the rows to iterate on
+ start : the passed start value (default is None)
+ stop : the passed stop value (default is None)
+ chunksize : the passed chunking valeu (default is 50000)
+ kwargs : the passed kwargs
+ """
+
+ def __init__(self, func, nrows, start=None, stop=None, chunksize=None):
+ self.func = func
+ self.nrows = nrows
+ self.start = start or 0
+
+ if stop is None:
+ stop = self.nrows
+ self.stop = min(self.nrows,stop)
+
+ if chunksize is None:
+ chunksize = 50000
+
+ self.chunksize = chunksize
+
+ def __iter__(self):
+ current = self.start
+ while current < self.stop:
+ stop = current + self.chunksize
+ v = self.func(current, stop)
+ current = stop
+
+ if v is None:
+ continue
+
+ yield v
+
+ def get_values(self):
+ return self.func(self.start, self.stop)
+
class IndexCol(object):
""" an index column description class
@@ -2351,7 +2425,7 @@ def create_description(self, complib=None, complevel=None, fletcher32=False, exp
return d
- def read_coordinates(self, where=None, **kwargs):
+ def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
""" select coordinates (row numbers) from a table; return the coordinates object """
# validate the version
@@ -2362,7 +2436,7 @@ def read_coordinates(self, where=None, **kwargs):
return False
# create the selection
- self.selection = Selection(self, where=where, **kwargs)
+ self.selection = Selection(self, where=where, start=start, stop=stop, **kwargs)
return Coordinates(self.selection.select_coords(), group=self.group, where=where)
def read_column(self, column, **kwargs):
@@ -3132,6 +3206,12 @@ def __init__(self, values, group, where, **kwargs):
self.group = group
self.where = where
+ def __len__(self):
+ return len(self.values)
+
+ def __getitem__(self, key):
+ """ return a new coordinates object, sliced by the key """
+ return Coordinates(self.values[key], self.group, self.where)
class Selection(object):
"""
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index c3a8990962ca1..8cf40a77d639f 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1829,6 +1829,66 @@ def test_select_with_many_inputs(self):
tm.assert_frame_equal(expected, result)
self.assert_(len(result) == 100)
+ def test_select_iterator(self):
+
+ # single table
+ with ensure_clean(self.path) as store:
+
+ df = tm.makeTimeDataFrame(500)
+ store.remove('df')
+ store.append('df', df)
+
+ expected = store.select('df')
+
+ results = []
+ for s in store.select('df',iterator=True):
+ results.append(s)
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+ results = []
+ for s in store.select('df',chunksize=100):
+ results.append(s)
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ results = []
+ for s in store.select('df',chunksize=150):
+ results.append(s)
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ # multiple
+
+ with ensure_clean(self.path) as store:
+
+ df1 = tm.makeTimeDataFrame(500)
+ store.append('df1',df1,data_columns=True)
+ df2 = tm.makeTimeDataFrame(500).rename(columns=lambda x: "%s_2" % x)
+ df2['foo'] = 'bar'
+ store.append('df2',df2)
+
+ df = concat([df1, df2], axis=1)
+
+ # full selection
+ expected = store.select_as_multiple(
+ ['df1', 'df2'], selector='df1')
+ results = []
+ for s in store.select_as_multiple(
+ ['df1', 'df2'], selector='df1', chunksize=150):
+ results.append(s)
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ # where selection
+ expected = store.select_as_multiple(
+ ['df1', 'df2'], where= Term('A>0'), selector='df1')
+ results = []
+ for s in store.select_as_multiple(
+ ['df1', 'df2'], where= Term('A>0'), selector='df1', chunksize=25):
+ results.append(s)
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
def test_panel_select(self):
wp = tm.makePanel()
@@ -2042,6 +2102,11 @@ def test_select_as_multiple(self):
df2['foo'] = 'bar'
with ensure_clean(self.path) as store:
+
+ # no tables stored
+ self.assertRaises(Exception, store.select_as_multiple,
+ None, where=['A>0', 'B>0'], selector='df1')
+
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
| New keywords `iterator=a_boolean` and
`chunksize=number_in_a_chunk`, (default is 50,000) rows are
provided to support iteration on `select` and `select_as_multiple` results
DOC and TST included
closes #3076
| https://api.github.com/repos/pandas-dev/pandas/pulls/3078 | 2013-03-18T01:15:47Z | 2013-03-18T14:36:10Z | 2013-03-18T14:36:10Z | 2014-07-04T12:26:26Z |
Fix figsize issue when using matplotlib locally | diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 89cc407daf3f4..8f2e8b072da53 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -63,6 +63,15 @@ def test_plot(self):
Series(np.random.randn(10)).plot(kind='bar', color='black')
+ # figsize and title
+ import matplotlib.pyplot as plt
+ plt.close('all')
+ ax = self.series.plot(title='Test', figsize=(16, 8))
+
+ self.assert_(ax.title.get_text() == 'Test')
+ self.assert_((np.round(ax.figure.get_size_inches())
+ == np.array((16., 8.))).all())
+
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 1bc0d16bbbf1b..eadd8518505e4 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -750,6 +750,7 @@ def _maybe_right_yaxis(self, ax):
if (sec_true or has_sec) and not hasattr(ax, 'right_ax'):
orig_ax, new_ax = ax, ax.twinx()
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
+ new_ax.right_ax = new_ax
if len(orig_ax.get_lines()) == 0: # no data on left y
orig_ax.get_yaxis().set_visible(False)
@@ -1500,6 +1501,7 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
For line plots, use log scaling on y axis
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
+ figsize : a tuple (width, height) in inches
kwds : keywords
Options to pass to matplotlib plotting method
@@ -1515,7 +1517,18 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
elif kind == 'kde':
klass = KdePlot
- if ax is None:
+ """
+ If no axis is specified, we check whether there are existing figures.
+ If so, we get the current axis and check whether yaxis ticks are on the
+ right. Ticks for the plot of the series will be on the right unless
+ there is at least one axis with ticks on the left.
+
+ If we do not check for whether there are existing figures, _gca() will
+ create a figure with the default figsize, causing the figsize= parameter to
+ be ignored.
+ """
+ import matplotlib.pyplot as plt
+ if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
if ax.get_yaxis().get_ticks_position().strip().lower() == 'right':
fig = _gcf()
@@ -1539,7 +1552,8 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
plot_obj.generate()
plot_obj.draw()
- return plot_obj.ax
+ # plot_obj.ax is None if we created the first figure
+ return plot_obj.axes[0]
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
| This is a pretty minor issue, but I'm seeing the following when passing `figsize=(20,10)` when plotting a Series in the `ipython --pylab` environment:

Everything's there - the window is just initialized to the wrong size. Without the `--pylab` flag and manually calling `plt.show()`, the window shows up as the default size (which is incorrect).
The issue appears to be that `plot_series()` will call `plt.gca()` before `figsize=` has been passed to `plt.figure()`. This initializes the window to the default size and it apparently isn't updated when redrawn.
What this patch does is use `plt.get_fignums()` to see if we have any existing figures. If so, we can call `plt.gca()` without automatically creating a figure and window at the default sizes.
The addition of `new_ax.right_axis = new_ax` is to accommodate Series plotted with `secondary_y=True` and thus getting `new_ax` as their `Axes`. It renders fine without it but a few tests expect `right_axis` to exist. This could be handled differently if this is inappropriate.
Finally, there's a test to check that `figsize=` is at least implemented on the figure. I don't know of a way to test this bug in a backend-independent way, so this only checks that we're not regressing to the pre-v0.10.0 state where `figsize=` was silently ignored by `plot_series()`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3073 | 2013-03-17T05:51:24Z | 2013-04-08T16:24:46Z | 2013-04-08T16:24:46Z | 2013-04-08T16:24:58Z |
BUG: replace with a dict misbehaving (GH 3064), due to incorrect filtering | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5603730974c7e..40a6b27ee32a8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3468,8 +3468,8 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
return self
new_data = self._data
- if isinstance(to_replace, dict):
- if isinstance(value, dict): # {'A' : NA} -> {'A' : 0}
+ if isinstance(to_replace, (dict, Series)):
+ if isinstance(value, (dict, Series)): # {'A' : NA} -> {'A' : 0}
new_data = self._data
for c, src in to_replace.iteritems():
if c in value and c in self:
@@ -3481,7 +3481,7 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
if k in self:
new_data = new_data.replace(src, value, filter = [ k ], inplace=inplace)
else:
- raise ValueError('Fill value must be scalar or dict')
+ raise ValueError('Fill value must be scalar or dict or Series')
elif isinstance(to_replace, (list, np.ndarray)):
# [NA, ''] -> [0, 'missing']
@@ -3501,7 +3501,7 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
else:
# dest iterable dict-like
- if isinstance(value, dict): # NA -> {'A' : 0, 'B' : -1}
+ if isinstance(value, (dict, Series)): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.iteritems():
@@ -3528,7 +3528,7 @@ def _interpolate(self, to_replace, method, axis, inplace, limit):
method = com._clean_fill_method(method)
- if isinstance(to_replace, dict):
+ if isinstance(to_replace, (dict, Series)):
if axis == 1:
return self.T.replace(to_replace, method=method,
limit=limit).T
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 4163c6ad8f60f..59f750d4570ad 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -259,10 +259,15 @@ def _try_cast_result(self, result):
we may have roundtripped thru object in the mean-time """
return result
- def replace(self, to_replace, value, inplace=False):
+ def replace(self, to_replace, value, inplace=False, filter=None):
""" replace the to_replace value with value, possible to create new blocks here
this is just a call to putmask """
mask = com.mask_missing(self.values, to_replace)
+ if filter is not None:
+ for i, item in enumerate(self.items):
+ if item not in filter:
+ mask[i] = False
+
if not mask.any():
if inplace:
return [ self ]
@@ -886,14 +891,15 @@ def apply(self, f, *args, **kwargs):
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
- filter : callable, if supplied, only call the block if the filter is True
+ filter : list, if supplied, only call the block if the filter is in the block
"""
axes = kwargs.pop('axes',None)
- filter = kwargs.pop('filter',None)
+ filter = kwargs.get('filter')
result_blocks = []
for blk in self.blocks:
if filter is not None:
+ kwargs['filter'] = set(kwargs['filter'])
if not blk.items.isin(filter).any():
result_blocks.append(blk)
continue
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index f143d0fcacc2f..07d84613fd86d 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5587,6 +5587,26 @@ def test_replace(self):
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
+ def test_resplace_series_dict(self):
+ # from GH 3064
+ df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
+ result = df.replace(0, {'zero': 0.5, 'one': 1.0})
+ expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
+ assert_frame_equal(result, expected)
+
+ result = df.replace(0, df.mean())
+ assert_frame_equal(result, expected)
+
+ # series to series/dict
+ df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
+ s = Series({'zero': 0.0, 'one': 2.0})
+ result = df.replace(s, {'zero': 0.5, 'one': 1.0})
+ expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
+ assert_frame_equal(result, expected)
+
+ result = df.replace(s, df.mean())
+ assert_frame_equal(result, expected)
+
def test_replace_mixed(self):
self.mixed_frame['foo'][5:20] = nan
self.mixed_frame['A'][-10:] = nan
| rolled #3064 changes in here
| https://api.github.com/repos/pandas-dev/pandas/pulls/3072 | 2013-03-17T03:14:09Z | 2013-03-17T03:54:56Z | 2013-03-17T03:54:56Z | 2013-03-17T18:38:37Z |
BUG: fixes in replace to deal with block upcasting | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 20c6ae05349ec..1568018174e68 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -745,17 +745,36 @@ def _maybe_promote(dtype, fill_value=np.nan):
return dtype, fill_value
-def _maybe_upcast_putmask(result, mask, other, dtype=None):
+def _maybe_upcast_putmask(result, mask, other, dtype=None, change=None):
""" a safe version of put mask that (potentially upcasts the result
- return the result and a changed flag """
- try:
- np.putmask(result, mask, other)
- except:
- # our type is wrong here, need to upcast
- if (-mask).any():
- result, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True)
+ return the result
+ if change is not None, then MUTATE the change (and change the dtype)
+ return a changed flag
+ """
+
+ if mask.any():
+
+ def changeit():
+ # our type is wrong here, need to upcast
+ if (-mask).any():
+ r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True)
+ np.putmask(r, mask, other)
+
+ # we need to actually change the dtype here
+ if change is not None:
+ change.dtype = r.dtype
+ change[:] = r
+
+ return r, True
+
+ new_dtype, fill_value = _maybe_promote(result.dtype,other)
+ if new_dtype != result.dtype:
+ return changeit()
+
+ try:
np.putmask(result, mask, other)
- return result, True
+ except:
+ return changeit()
return result, False
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4eba4b52aaa81..5603730974c7e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3467,14 +3467,21 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
if len(self.columns) == 0:
return self
+ new_data = self._data
if isinstance(to_replace, dict):
if isinstance(value, dict): # {'A' : NA} -> {'A' : 0}
- return self._replace_both_dict(to_replace, value, inplace)
+ new_data = self._data
+ for c, src in to_replace.iteritems():
+ if c in value and c in self:
+ new_data = new_data.replace(src, value[c], filter = [ c ], inplace=inplace)
elif not isinstance(value, (list, np.ndarray)):
- return self._replace_src_dict(to_replace, value, inplace)
-
- raise ValueError('Fill value must be scalar or dict')
+ new_data = self._data
+ for k, src in to_replace.iteritems():
+ if k in self:
+ new_data = new_data.replace(src, value, filter = [ k ], inplace=inplace)
+ else:
+ raise ValueError('Fill value must be scalar or dict')
elif isinstance(to_replace, (list, np.ndarray)):
# [NA, ''] -> [0, 'missing']
@@ -3491,25 +3498,29 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
new_data = self._data.replace(to_replace, value,
inplace=inplace)
- if inplace:
- self._data = new_data
- return self
- else:
- return self._constructor(new_data)
else:
+
+ # dest iterable dict-like
if isinstance(value, dict): # NA -> {'A' : 0, 'B' : -1}
- return self._replace_dest_dict(to_replace, value, inplace)
+
+ new_data = self._data
+ for k, v in value.iteritems():
+ if k in self:
+ new_data = new_data.replace(to_replace, v, filter = [ k ], inplace=inplace)
+
elif not isinstance(value, (list, np.ndarray)): # NA -> 0
new_data = self._data.replace(to_replace, value,
inplace=inplace)
- if inplace:
- self._data = new_data
- return self
- else:
- return self._constructor(new_data)
+ else:
+ raise ValueError('Invalid to_replace type: %s' %
+ type(to_replace)) # pragma: no cover
+
- raise ValueError('Invalid to_replace type: %s' %
- type(to_replace)) # pragma: no cover
+ if inplace:
+ self._data = new_data
+ return self
+ else:
+ return self._constructor(new_data)
def _interpolate(self, to_replace, method, axis, inplace, limit):
if self._is_mixed_type and axis == 1:
@@ -3543,27 +3554,6 @@ def _interpolate(self, to_replace, method, axis, inplace, limit):
else:
return self._constructor(new_data)
- def _replace_dest_dict(self, to_replace, value, inplace):
- rs = self if inplace else self.copy()
- for k, v in value.iteritems():
- if k in rs:
- rs[k].replace(to_replace, v, inplace=True)
- return rs if not inplace else None
-
- def _replace_src_dict(self, to_replace, value, inplace):
- rs = self if inplace else self.copy()
- for k, src in to_replace.iteritems():
- if k in rs:
- rs[k].replace(src, value, inplace=True)
- return rs if not inplace else None
-
- def _replace_both_dict(self, to_replace, value, inplace):
- rs = self if inplace else self.copy()
- for c, src in to_replace.iteritems():
- if c in value and c in rs:
- rs[c].replace(src, value[c], inplace=True)
- return rs if not inplace else None
-
#----------------------------------------------------------------------
# Rename
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 0228baf238bcd..4163c6ad8f60f 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -880,10 +880,23 @@ def _verify_integrity(self):
'block items')
def apply(self, f, *args, **kwargs):
- """ iterate over the blocks, collect and create a new block manager """
+ """ iterate over the blocks, collect and create a new block manager
+
+ Parameters
+ ----------
+ f : the callable or function name to operate on at the block level
+ axes : optional (if not supplied, use self.axes)
+ filter : callable, if supplied, only call the block if the filter is True
+ """
+
axes = kwargs.pop('axes',None)
+ filter = kwargs.pop('filter',None)
result_blocks = []
for blk in self.blocks:
+ if filter is not None:
+ if not blk.items.isin(filter).any():
+ result_blocks.append(blk)
+ continue
if callable(f):
applied = f(blk, *args, **kwargs)
else:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c6fe396b08867..fcefbbe216aa3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -732,13 +732,8 @@ def where(self, cond, other=nan, inplace=False):
if len(other) != len(ser):
raise ValueError('Length of replacements must equal series length')
- result, changed = com._maybe_upcast_putmask(ser,~cond,other)
- if changed:
-
- # need to actually change ser here
- if inplace:
- ser.dtype = result.dtype
- ser[:] = result
+ change = ser if inplace else None
+ result, changed = com._maybe_upcast_putmask(ser,~cond,other,change=change)
return None if inplace else ser
@@ -2680,11 +2675,17 @@ def replace(self, to_replace, value=None, method='pad', inplace=False,
-------
replaced : Series
"""
- result = self.copy() if not inplace else self
+
+ if inplace:
+ result = self
+ change = self
+ else:
+ result = self.copy()
+ change = None
def _rep_one(s, to_rep, v): # replace single value
mask = com.mask_missing(s.values, to_rep)
- np.putmask(s.values, mask, v)
+ com._maybe_upcast_putmask(s.values,mask,v,change=change)
def _rep_dict(rs, to_rep): # replace {[src] -> dest}
@@ -2701,7 +2702,7 @@ def _rep_dict(rs, to_rep): # replace {[src] -> dest}
masks[d] = com.mask_missing(rs.values, sset)
for d, m in masks.iteritems():
- np.putmask(rs.values, m, d)
+ com._maybe_upcast_putmask(rs.values,m,d,change=change)
else: # if no risk of clobbering then simple
for d, sset in dd.iteritems():
_rep_one(rs, sset, d)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 2cdb4488b8126..f143d0fcacc2f 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5621,6 +5621,16 @@ def test_replace_mixed(self):
result = df.replace([1,2], ['foo','bar'])
assert_frame_equal(result,expected)
+ # test case from
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })
+ result = df.replace(3, df.mean().to_dict())
+ expected = df.copy().astype('float64')
+ m = df.mean()
+ expected.iloc[0,0] = m[0]
+ expected.iloc[1,1] = m[1]
+ assert_frame_equal(result,expected)
+
def test_replace_interpolate(self):
padded = self.tsframe.replace(nan, method='pad')
assert_frame_equal(padded, self.tsframe.fillna(method='pad'))
| ENH:
- _maybe_upcast_putmask now has the keyword, change to provide inline putmask changes to an object (series)
- apply in BlockManager now has a keyword, filter to allow acting on only those items contained in the filter (if supplied)
CLN:
- consolidated all replace subs to main replace in DataFrame (which calls replace in BlockManager)
now passed test in GH #3064
| https://api.github.com/repos/pandas-dev/pandas/pulls/3068 | 2013-03-16T19:45:30Z | 2013-03-16T20:18:25Z | 2013-03-16T20:18:25Z | 2014-07-01T17:38:18Z |
cython dependency and a pip tip | diff --git a/README.rst b/README.rst
index 59bf2667181f9..5145f801fc6eb 100644
--- a/README.rst
+++ b/README.rst
@@ -94,10 +94,25 @@ Optional dependencies
Installation from sources
=========================
-In the ``pandas`` directory (same one where you found this file), execute::
+To install pandas from source you need ``cython`` in addition to the normal dependencies above,
+which can be installed from pypi::
+
+ pip install cython
+
+In the ``pandas`` directory (same one where you found this file after cloning the git repo), execute::
python setup.py install
+or for installing in `development mode <http://www.pip-installer.org/en/latest/usage.html>`__::
+
+ python setup.py develop
+
+Alternatively, you can use `pip` if you want all the dependencies pulled in automatically
+(the optional ``-e`` option is for installing it in
+`development mode <http://www.pip-installer.org/en/latest/usage.html>`__)::
+
+ pip install -e .
+
On Windows, you will need to install MinGW and execute::
python setup.py build --compiler=mingw32
| Some advice that would've helped me when I was first trying to get the development version working.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3067 | 2013-03-16T17:08:08Z | 2013-03-17T13:54:05Z | 2013-03-17T13:54:05Z | 2013-06-07T14:31:45Z |
BUG/ENH: guarantee blocks will upcast as needed, and split as needed | diff --git a/RELEASE.rst b/RELEASE.rst
index 6a35db03a5307..b1c18daf20a3b 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -79,7 +79,7 @@ pandas 0.11.0
doesn't have nans, then an int will be returned)
- backfill/pad/take/diff/ohlc will now support ``float32/int16/int8``
operations
- - Integer block types will upcast as needed in where operations (GH2793_)
+ - Block types will upcast as needed in where/masking operations (GH2793_)
- Series now automatically will try to set the correct dtype based on passed
datetimelike objects (datetime/Timestamp)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index a3e8c09839891..20c6ae05349ec 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -221,6 +221,13 @@ def mask_missing(arr, values_to_mask):
for x in nonna:
if mask is None:
mask = arr == x
+
+ # if x is a string and mask is not, then we get a scalar
+ # return value, which is not good
+ if not isinstance(mask,np.ndarray):
+ m = mask
+ mask = np.empty(arr.shape,dtype=np.bool)
+ mask.fill(m)
else:
mask = mask | (arr == x)
@@ -730,6 +737,11 @@ def _maybe_promote(dtype, fill_value=np.nan):
dtype = np.complex128
else:
dtype = np.object_
+
+ # in case we have a string that looked like a number
+ if issubclass(np.dtype(dtype).type, basestring):
+ dtype = np.object_
+
return dtype, fill_value
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index d2c3f4104950b..0228baf238bcd 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -4,7 +4,7 @@
from numpy import nan
import numpy as np
-from pandas.core.common import _possibly_downcast_to_dtype
+from pandas.core.common import _possibly_downcast_to_dtype, isnull
from pandas.core.index import Index, _ensure_index, _handle_legacy_indexes
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
import pandas.core.common as com
@@ -260,32 +260,14 @@ def _try_cast_result(self, result):
return result
def replace(self, to_replace, value, inplace=False):
- new_values = self.values if inplace else self.values.copy()
- if self._can_hold_element(value):
- value = self._try_cast(value)
-
- if not isinstance(to_replace, (list, np.ndarray)):
- if self._can_hold_element(to_replace):
- to_replace = self._try_cast(to_replace)
- msk = com.mask_missing(new_values, to_replace)
- np.putmask(new_values, msk, value)
- else:
- try:
- to_replace = np.array(to_replace, dtype=self.dtype)
- msk = com.mask_missing(new_values, to_replace)
- np.putmask(new_values, msk, value)
- except Exception:
- to_replace = np.array(to_replace, dtype=object)
- for r in to_replace:
- if self._can_hold_element(r):
- r = self._try_cast(r)
- msk = com.mask_missing(new_values, to_replace)
- np.putmask(new_values, msk, value)
-
- if inplace:
- return self
- else:
- return make_block(new_values, self.items, self.ref_items)
+ """ replace the to_replace value with value, possible to create new blocks here
+ this is just a call to putmask """
+ mask = com.mask_missing(self.values, to_replace)
+ if not mask.any():
+ if inplace:
+ return [ self ]
+ return [ self.copy() ]
+ return self.putmask(mask, value, inplace=inplace)
def putmask(self, mask, new, inplace=False):
""" putmask the data to the block; it is possible that we may create a new dtype of block
@@ -309,19 +291,34 @@ def putmask(self, mask, new, inplace=False):
# maybe upcast me
elif mask.any():
- # type of the new block
- if ((isinstance(new, np.ndarray) and issubclass(new.dtype, np.number)) or
- isinstance(new, float)):
- typ = np.float64
- else:
- typ = np.object_
- # we need to exiplicty astype here to make a copy
- new_values = new_values.astype(typ)
+ # need to go column by column
+ new_blocks = []
+ for i, item in enumerate(self.items):
- # we create a new block type
- np.putmask(new_values, mask, new)
- return [ make_block(new_values, self.items, self.ref_items) ]
+ m = mask[i]
+
+ # need a new block
+ if m.any():
+
+ n = new[i] if isinstance(new, np.ndarray) else new
+
+ # type of the new block
+ dtype, _ = com._maybe_promote(np.array(n).dtype)
+
+ # we need to exiplicty astype here to make a copy
+ nv = new_values[i].astype(dtype)
+
+ # we create a new block type
+ np.putmask(nv, m, n)
+
+ else:
+ nv = new_values[i] if inplace else new_values[i].copy()
+
+ nv = _block_shape(nv)
+ new_blocks.append(make_block(nv, [ item ], self.ref_items))
+
+ return new_blocks
if inplace:
return [ self ]
@@ -350,7 +347,7 @@ def interpolate(self, method='pad', axis=0, inplace=False,
if missing is None:
mask = None
else: # todo create faster fill func without masking
- mask = _mask_missing(transf(values), missing)
+ mask = com.mask_missing(transf(values), missing)
if method == 'pad':
com.pad_2d(transf(values), limit=limit, mask=mask)
@@ -532,7 +529,7 @@ def create_block(result, items, transpose = True):
if len(result) == 1:
result = np.repeat(result,self.shape[1:])
- result = result.reshape(((1,) + self.shape[1:]))
+ result = _block_shape(result,ndim=self.ndim,shape=self.shape[1:])
result_blocks.append(create_block(result, item, transpose = False))
return result_blocks
@@ -540,23 +537,6 @@ def create_block(result, items, transpose = True):
result = func(cond,values,other)
return create_block(result, self.items)
-def _mask_missing(array, missing_values):
- if not isinstance(missing_values, (list, np.ndarray)):
- missing_values = [missing_values]
-
- mask = None
- missing_values = np.array(missing_values, dtype=object)
- if com.isnull(missing_values).any():
- mask = com.isnull(array)
- missing_values = missing_values[com.notnull(missing_values)]
-
- for v in missing_values:
- if mask is None:
- mask = array == missing_values
- else:
- mask |= array == missing_values
- return mask
-
class NumericBlock(Block):
is_numeric = True
_can_hold_na = True
@@ -659,7 +639,7 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True):
values = self.get(c)
values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric)
- values = values.reshape(((1,) + values.shape))
+ values = _block_shape(values)
items = self.items.take([i])
newb = make_block(values, items, self.ref_items)
blocks.append(newb)
@@ -949,23 +929,37 @@ def replace(self, *args, **kwargs):
def replace_list(self, src_lst, dest_lst, inplace=False):
""" do a list replace """
- if not inplace:
- self = self.copy()
-
- sset = set(src_lst)
- if any([k in sset for k in dest_lst]):
- masks = {}
- for s in src_lst:
- masks[s] = [b.values == s for b in self.blocks]
-
- for s, d in zip(src_lst, dest_lst):
- [b.putmask(masks[s][i], d, inplace=True) for i, b in
- enumerate(self.blocks)]
- else:
- for s, d in zip(src_lst, dest_lst):
- self.replace(s, d, inplace=True)
- return self
+ # figure out our mask a-priori to avoid repeated replacements
+ values = self.as_matrix()
+ def comp(s):
+ if isnull(s):
+ return isnull(values)
+ return values == s
+ masks = [ comp(s) for i, s in enumerate(src_lst) ]
+
+ result_blocks = []
+ for blk in self.blocks:
+
+ # its possible to get multiple result blocks here
+ # replace ALWAYS will return a list
+ rb = [ blk if inplace else blk.copy() ]
+ for i, d in enumerate(dest_lst):
+ new_rb = []
+ for b in rb:
+ # get our mask for this element, sized to this
+ # particular block
+ m = masks[i][b.ref_locs]
+ if m.any():
+ new_rb.extend(b.putmask(m, d, inplace=True))
+ else:
+ new_rb.append(b)
+ rb = new_rb
+ result_blocks.extend(rb)
+
+ bm = self.__class__(result_blocks, self.axes)
+ bm._consolidate_inplace()
+ return bm
def is_consolidated(self):
"""
@@ -1302,8 +1296,7 @@ def set(self, item, value):
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
- if value.ndim == self.ndim - 1:
- value = value.reshape((1,) + value.shape)
+ value = _block_shape(value,self.ndim-1)
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
@@ -1873,6 +1866,14 @@ def _merge_blocks(blocks, items):
return new_block.reindex_items_from(items)
+def _block_shape(values, ndim=1, shape=None):
+ """ guarantee the shape of the values to be at least 1 d """
+ if values.ndim == ndim:
+ if shape is None:
+ shape = values.shape
+ values = values.reshape(tuple((1,) + shape))
+ return values
+
def _vstack(to_stack):
if all(x.dtype == _NS_DTYPE for x in to_stack):
# work around NumPy 1.6 bug
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 6db1db76d664a..2cdb4488b8126 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5596,6 +5596,31 @@ def test_replace_mixed(self):
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
+ # int block upcasting
+ df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
+ expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64') })
+ result = df.replace(0, 0.5)
+ assert_frame_equal(result,expected)
+
+ df.replace(0, 0.5, inplace=True)
+ assert_frame_equal(df,expected)
+
+ # int block splitting
+ df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64'), 'C' : Series([1,2],dtype='int64') })
+ expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64'), 'C' : Series([1,2],dtype='int64') })
+ result = df.replace(0, 0.5)
+ assert_frame_equal(result,expected)
+
+ # to object block upcasting
+ df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
+ expected = DataFrame({ 'A' : Series([1,'foo'],dtype='object'), 'B' : Series([0,1],dtype='int64') })
+ result = df.replace(2, 'foo')
+ assert_frame_equal(result,expected)
+
+ expected = DataFrame({ 'A' : Series(['foo','bar'],dtype='object'), 'B' : Series([0,'foo'],dtype='object') })
+ result = df.replace([1,2], ['foo','bar'])
+ assert_frame_equal(result,expected)
+
def test_replace_interpolate(self):
padded = self.tsframe.replace(nan, method='pad')
assert_frame_equal(padded, self.tsframe.fillna(method='pad'))
| - replace on an IntBlock with a float will upcast, it may yield multiple blocks if the original IntBlock didn't fully replace
- small fix in com.mask_missing to guarantee that a bool ndarray is returned even in the case of a string comparison with a numeric array, (which for some reason numpy returns a single, non-ndarray value)
allows #3064 to proceed (e.g. df.replace(0,0.5) will work on an integer dtype)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3065 | 2013-03-16T14:05:29Z | 2013-03-16T15:00:05Z | 2013-03-16T15:00:05Z | 2014-07-16T08:05:32Z |
ENH: support for nanosecond time in offset and period | diff --git a/.gitignore b/.gitignore
index da76a414865e5..201a965a0f409 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,15 +30,11 @@ pandas/io/*.dat
pandas/io/*.json
*.log
.noseids
-
-.idea/libraries/sass_stdlib.xml
-
-.idea/pandas.iml
.build_cache_dir
.vagrant
*.whl
**/wheelhouse/*
-
.project
.pydevproject
.settings
+.idea
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index 0e3c3b50fcd85..eb5ae6740044d 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -425,6 +425,29 @@ Enhancements
the file if the data has correctly separated and properly aligned columns
using the delimiter provided to the function (:issue:`4488`).
+ - support for nanosecond times in periods
+
+ .. warning::
+
+ These operations require ``numpy >= 1.7``
+
+ Period conversions in the range of seconds and below were reworked and extended
+ up to nanoseconds. Periods in the nanosecond range are now available.
+
+ .. ipython:: python
+ date_range('2013-01-01', periods=5, freq='5N')
+
+ or with frequency as offset
+
+ .. ipython:: python
+ date_range('2013-01-01', periods=5, freq=pd.offsets.Nano(5))
+
+ Timestamps can be modified in the nanosecond range
+
+ .. ipython:: python
+ t = Timestamp('20130101 09:01:02')
+ t + pd.datetools.Nano(123)
+
.. _whatsnew_0130.experimental:
Experimental
diff --git a/pandas/src/period.c b/pandas/src/period.c
index 4e7ab44c7b150..2e544afce9da2 100644
--- a/pandas/src/period.c
+++ b/pandas/src/period.c
@@ -272,6 +272,162 @@ int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo,
// helpers for frequency conversion routines //
+static int daytime_conversion_factors[][2] = {
+ { FR_DAY, 1 },
+ { FR_HR, 24 },
+ { FR_MIN, 60 },
+ { FR_SEC, 60 },
+ { FR_MS, 1000 },
+ { FR_US, 1000 },
+ { FR_NS, 1000 },
+ { 0, 0 }
+};
+
+static npy_int64** daytime_conversion_factor_matrix = NULL;
+
+static int max_value(int a, int b) {
+ return a > b ? a : b;
+}
+
+static int min_value(int a, int b) {
+ return a < b ? a : b;
+}
+
+static int get_freq_group(int freq) {
+ return (freq/1000)*1000;
+}
+
+static int get_freq_group_index(int freq) {
+ return freq/1000;
+}
+
+static int calc_conversion_factors_matrix_size() {
+ int matrix_size = 0;
+ int index;
+ for (index=0;; index++) {
+ int period_value = get_freq_group_index(daytime_conversion_factors[index][0]);
+ if (period_value == 0) {
+ break;
+ }
+ matrix_size = max_value(matrix_size, period_value);
+ }
+ return matrix_size + 1;
+}
+
+static void alloc_conversion_factors_matrix(int matrix_size) {
+ int row_index;
+ int column_index;
+ daytime_conversion_factor_matrix = malloc(matrix_size * sizeof(**daytime_conversion_factor_matrix));
+ for (row_index = 0; row_index < matrix_size; row_index++) {
+ daytime_conversion_factor_matrix[row_index] = malloc(matrix_size * sizeof(**daytime_conversion_factor_matrix));
+ for (column_index = 0; column_index < matrix_size; column_index++) {
+ daytime_conversion_factor_matrix[row_index][column_index] = 0;
+ }
+ }
+}
+
+static npy_int64 calculate_conversion_factor(int start_value, int end_value) {
+ npy_int64 conversion_factor = 0;
+ int index;
+ for (index=0;; index++) {
+ int freq_group = daytime_conversion_factors[index][0];
+
+ if (freq_group == 0) {
+ conversion_factor = 0;
+ break;
+ }
+
+ if (freq_group == start_value) {
+ conversion_factor = 1;
+ } else {
+ conversion_factor *= daytime_conversion_factors[index][1];
+ }
+
+ if (freq_group == end_value) {
+ break;
+ }
+ }
+ return conversion_factor;
+}
+
+static void populate_conversion_factors_matrix() {
+ int row_index_index;
+ int row_value, row_index;
+ int column_index_index;
+ int column_value, column_index;
+
+ for (row_index_index = 0;; row_index_index++) {
+ row_value = daytime_conversion_factors[row_index_index][0];
+ if (row_value == 0) {
+ break;
+ }
+ row_index = get_freq_group_index(row_value);
+ for (column_index_index = row_index_index;; column_index_index++) {
+ column_value = daytime_conversion_factors[column_index_index][0];
+ if (column_value == 0) {
+ break;
+ }
+ column_index = get_freq_group_index(column_value);
+
+ daytime_conversion_factor_matrix[row_index][column_index] = calculate_conversion_factor(row_value, column_value);
+ }
+ }
+}
+
+static void initialize_daytime_conversion_factor_maxtrix() {
+ int matrix_size = calc_conversion_factors_matrix_size();
+ alloc_conversion_factors_matrix(matrix_size);
+ populate_conversion_factors_matrix();
+}
+
+npy_int64 get_daytime_conversion_factor(int index1, int index2)
+{
+ if (daytime_conversion_factor_matrix == NULL) {
+ initialize_daytime_conversion_factor_maxtrix();
+ }
+ return daytime_conversion_factor_matrix[min_value(index1, index2)][max_value(index1, index2)];
+}
+
+npy_int64 convert_daytime(npy_int64 ordinal, int from, int to, int atEnd)
+{
+ int from_index, to_index, offset;
+ npy_int64 conversion_factor;
+
+ if (from == to) {
+ return ordinal;
+ }
+
+ from_index = get_freq_group_index(from);
+ to_index = get_freq_group_index(to);
+
+ conversion_factor = get_daytime_conversion_factor(from_index, to_index);
+
+ offset = atEnd ? 1 : 0;
+
+ if (from <= to) {
+ return (ordinal + offset) * conversion_factor - offset;
+ } else {
+ return ordinal / conversion_factor;
+ }
+
+}
+
+static npy_int64 transform_via_day(npy_int64 ordinal, char relation, asfreq_info *af_info, freq_conv_func first_func, freq_conv_func second_func) {
+ int tempStore = af_info->targetFreq;
+ npy_int64 result;
+
+ af_info->targetFreq = FR_DAY;
+ result = (*first_func)(ordinal, relation, af_info);
+ af_info->targetFreq = tempStore;
+
+ tempStore = af_info->sourceFreq;
+ af_info->sourceFreq = FR_DAY;
+ result = (*second_func)(result, relation, af_info);
+ af_info->sourceFreq = tempStore;
+
+ return result;
+}
+
static npy_int64 DtoB_weekday(npy_int64 absdate) {
return (((absdate) / 7) * 5) + (absdate) % 7 - BDAY_OFFSET;
}
@@ -302,24 +458,23 @@ static npy_int64 absdate_from_ymd(int y, int m, int d) {
//************ FROM DAILY ***************
-static npy_int64 asfreq_DtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) {
-
+static npy_int64 asfreq_DTtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) {
struct date_info dinfo;
- if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET,
- GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0);
+ if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, GREGORIAN_CALENDAR))
+ return INT_ERR_CODE;
if (dinfo.month > af_info->to_a_year_end) {
- return (npy_int64)(dinfo.year + 1 - BASE_YEAR);
- }
+ return (npy_int64)(dinfo.year + 1 - BASE_YEAR);
+ }
else {
- return (npy_int64)(dinfo.year - BASE_YEAR);
- }
+ return (npy_int64)(dinfo.year - BASE_YEAR);
+ }
}
-static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info,
- int *year, int *quarter) {
+static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year, int *quarter) {
struct date_info dinfo;
- if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET,
- GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, GREGORIAN_CALENDAR))
+ return INT_ERR_CODE;
if (af_info->to_q_year_end != 12) {
dinfo.month -= af_info->to_q_year_end;
if (dinfo.month <= 0) { dinfo.month += 12; }
@@ -333,11 +488,11 @@ static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info,
return 0;
}
-
-static npy_int64 asfreq_DtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info) {
-
+static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info) {
int year, quarter;
+ ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0);
+
if (DtoQ_yq(ordinal, af_info, &year, &quarter) == INT_ERR_CODE) {
return INT_ERR_CODE;
}
@@ -345,23 +500,28 @@ static npy_int64 asfreq_DtoQ(npy_int64 ordinal, char relation, asfreq_info *af_i
return (npy_int64)((year - BASE_YEAR) * 4 + quarter - 1);
}
-static npy_int64 asfreq_DtoM(npy_int64 ordinal, char relation, asfreq_info *af_info) {
-
+static npy_int64 asfreq_DTtoM(npy_int64 ordinal, char relation, asfreq_info *af_info) {
struct date_info dinfo;
+
+ ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0);
+
if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, GREGORIAN_CALENDAR))
return INT_ERR_CODE;
return (npy_int64)((dinfo.year - BASE_YEAR) * 12 + dinfo.month - 1);
}
-static npy_int64 asfreq_DtoW(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+static npy_int64 asfreq_DTtoW(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0);
return (ordinal + ORD_OFFSET - (1 + af_info->to_week_end))/7 + 1 - WEEK_OFFSET;
}
-static npy_int64 asfreq_DtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) {
-
+static npy_int64 asfreq_DTtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) {
struct date_info dinfo;
- if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET,
- GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+
+ ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0);
+
+ if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, GREGORIAN_CALENDAR))
+ return INT_ERR_CODE;
if (relation == 'S') {
return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week);
@@ -370,184 +530,93 @@ static npy_int64 asfreq_DtoB(npy_int64 ordinal, char relation, asfreq_info *af_i
}
}
-// needed for getDateInfo function
-static npy_int64 asfreq_DtoD(npy_int64 ordinal, char relation, asfreq_info *af_info) { return ordinal; }
+// all intra day calculations are now done within one function
+static npy_int64 asfreq_WithinDT(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ //if (relation == 'E') {
+ // ordinal += 1;
+ //}
-static npy_int64 asfreq_DtoHIGHFREQ(npy_int64 ordinal, char relation, npy_int64 per_day) {
- if (relation == 'S') {
- return ordinal * per_day;
- }
- else {
- return (ordinal+ 1) * per_day - 1;
- }
+ return convert_daytime(ordinal, af_info->sourceFreq, af_info->targetFreq, relation == 'E');
}
-static npy_int64 asfreq_DtoH(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoHIGHFREQ(ordinal, relation, 24); }
-static npy_int64 asfreq_DtoT(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoHIGHFREQ(ordinal, relation, 24*60); }
-static npy_int64 asfreq_DtoS(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoHIGHFREQ(ordinal, relation, 24*60*60); }
-
-//************ FROM SECONDLY ***************
-
-static npy_int64 asfreq_StoD(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return (ordinal)/(60*60*24); }
-
-static npy_int64 asfreq_StoA(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoA(asfreq_StoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-
-static npy_int64 asfreq_StoQ(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoQ(asfreq_StoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-
-static npy_int64 asfreq_StoM(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoM(asfreq_StoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
-
-static npy_int64 asfreq_StoW(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoW(asfreq_StoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-
-static npy_int64 asfreq_StoB(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoB(asfreq_StoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
+//************ FROM BUSINESS ***************
+static npy_int64 asfreq_BtoDT(npy_int64 ordinal, char relation, asfreq_info *af_info)
+{
+ ordinal += BDAY_OFFSET;
+ ordinal = (((ordinal - 1) / 5) * 7 +
+ mod_compat(ordinal - 1, 5) + 1 - ORD_OFFSET);
-static npy_int64 asfreq_StoT(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return ordinal / 60;
+ return convert_daytime(ordinal, FR_DAY, af_info->targetFreq, relation != 'S');
}
-static npy_int64 asfreq_StoH(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return ordinal / (60*60);
+static npy_int64 asfreq_BtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_BtoDT, asfreq_DTtoA);
}
-//************ FROM MINUTELY ***************
-
-static npy_int64 asfreq_TtoD(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return (ordinal)/(60*24); }
-
-static npy_int64 asfreq_TtoA(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoA(asfreq_TtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-static npy_int64 asfreq_TtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoQ(asfreq_TtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-static npy_int64 asfreq_TtoM(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoM(asfreq_TtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_TtoW(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoW(asfreq_TtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-static npy_int64 asfreq_TtoB(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoB(asfreq_TtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
-
-static npy_int64 asfreq_TtoH(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return ordinal / 60;
+static npy_int64 asfreq_BtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_BtoDT, asfreq_DTtoQ);
}
-static npy_int64 asfreq_TtoS(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- if (relation == 'S') {
- return ordinal*60; }
- else {
- return ordinal*60 + 59;
- }
-}
-
-//************ FROM HOURLY ***************
-
-static npy_int64 asfreq_HtoD(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return ordinal / 24; }
-static npy_int64 asfreq_HtoA(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoA(asfreq_HtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-static npy_int64 asfreq_HtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoQ(asfreq_HtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-static npy_int64 asfreq_HtoM(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoM(asfreq_HtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_HtoW(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoW(asfreq_HtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-static npy_int64 asfreq_HtoB(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoB(asfreq_HtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
-
-// calculation works out the same as TtoS, so we just call that function for HtoT
-static npy_int64 asfreq_HtoT(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_TtoS(ordinal, relation, &NULL_AF_INFO); }
-
-static npy_int64 asfreq_HtoS(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- if (relation == 'S') {
- return ordinal*60*60;
- }
- else {
- return (ordinal + 1)*60*60 - 1;
- }
+static npy_int64 asfreq_BtoM(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_BtoDT, asfreq_DTtoM);
}
-//************ FROM BUSINESS ***************
-
-static npy_int64 asfreq_BtoD(npy_int64 ordinal, char relation, asfreq_info *af_info)
- {
- ordinal += BDAY_OFFSET;
- return (((ordinal - 1) / 5) * 7 +
- mod_compat(ordinal - 1, 5) + 1 - ORD_OFFSET);
- }
-
-static npy_int64 asfreq_BtoA(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoA(asfreq_BtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-
-static npy_int64 asfreq_BtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoQ(asfreq_BtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
-
-static npy_int64 asfreq_BtoM(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoM(asfreq_BtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
-
-static npy_int64 asfreq_BtoW(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoW(asfreq_BtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
+static npy_int64 asfreq_BtoW(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_BtoDT, asfreq_DTtoW);
+}
-static npy_int64 asfreq_BtoH(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoH(asfreq_BtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
+//************ FROM WEEKLY ***************
-static npy_int64 asfreq_BtoT(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoT(asfreq_BtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
+static npy_int64 asfreq_WtoDT(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ ordinal += WEEK_OFFSET;
+ if (relation != 'S') {
+ ordinal += 1;
+ }
-static npy_int64 asfreq_BtoS(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoS(asfreq_BtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
+ ordinal = ordinal * 7 - 6 + af_info->from_week_end - ORD_OFFSET;
-//************ FROM WEEKLY ***************
+ if (relation != 'S') {
+ ordinal -= 1;
+ }
-static npy_int64 asfreq_WtoD(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- ordinal += WEEK_OFFSET;
- if (relation == 'S') {
- return ordinal * 7 - 6 + af_info->from_week_end - ORD_OFFSET;
- }
- else {
- return ordinal * 7 + af_info->from_week_end - ORD_OFFSET;
- }
+ return convert_daytime(ordinal, FR_DAY, af_info->targetFreq, relation != 'S');
}
static npy_int64 asfreq_WtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return asfreq_DtoA(asfreq_WtoD(ordinal, 'E', af_info), relation, af_info); }
+ return transform_via_day(ordinal, relation, af_info, asfreq_WtoDT, asfreq_DTtoA);
+}
+
static npy_int64 asfreq_WtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return asfreq_DtoQ(asfreq_WtoD(ordinal, 'E', af_info), relation, af_info); }
+ return transform_via_day(ordinal, relation, af_info, asfreq_WtoDT, asfreq_DTtoQ);
+}
+
static npy_int64 asfreq_WtoM(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return asfreq_DtoM(asfreq_WtoD(ordinal, 'E', af_info), relation, &NULL_AF_INFO); }
+ return transform_via_day(ordinal, relation, af_info, asfreq_WtoDT, asfreq_DTtoM);
+}
-static npy_int64 asfreq_WtoW(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoW(asfreq_WtoD(ordinal, relation, af_info), relation, af_info); }
+static npy_int64 asfreq_WtoW(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_WtoDT, asfreq_DTtoW);
+}
static npy_int64 asfreq_WtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) {
struct date_info dinfo;
+ int tempStore = af_info->targetFreq;
+ af_info->targetFreq = FR_DAY;
if (dInfoCalc_SetFromAbsDate(&dinfo,
- asfreq_WtoD(ordinal, relation, af_info) + ORD_OFFSET,
- GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ asfreq_WtoDT(ordinal, relation, af_info) + ORD_OFFSET,
+ GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ af_info->targetFreq = tempStore;
if (relation == 'S') {
- return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week);
- }
+ return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week);
+ }
else {
- return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week);
- }
+ return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week);
+ }
}
-static npy_int64 asfreq_WtoH(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoH(asfreq_WtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_WtoT(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoT(asfreq_WtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_WtoS(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoS(asfreq_WtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
-
//************ FROM MONTHLY ***************
static void MtoD_ym(npy_int64 ordinal, int *y, int *m) {
*y = floordiv(ordinal, 12) + BASE_YEAR;
@@ -555,49 +624,50 @@ static void MtoD_ym(npy_int64 ordinal, int *y, int *m) {
}
-static npy_int64 asfreq_MtoD(npy_int64 ordinal, char relation, asfreq_info *af_info) {
-
+static npy_int64 asfreq_MtoDT(npy_int64 ordinal, char relation, asfreq_info* af_info) {
npy_int64 absdate;
int y, m;
- if (relation == 'S') {
- MtoD_ym(ordinal, &y, &m);
- if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE;
- return absdate - ORD_OFFSET;
- } else {
- MtoD_ym(ordinal + 1, &y, &m);
- if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE;
- return absdate - 1 - ORD_OFFSET;
+ if (relation == 'E') {
+ ordinal += 1;
+ }
+ MtoD_ym(ordinal, &y, &m);
+ if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE;
+ ordinal = absdate - ORD_OFFSET;
+
+ if (relation == 'E') {
+ ordinal -= 1;
}
+
+ return convert_daytime(ordinal, FR_DAY, af_info->targetFreq, relation != 'S');
}
static npy_int64 asfreq_MtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return asfreq_DtoA(asfreq_MtoD(ordinal, 'E', &NULL_AF_INFO), relation, af_info); }
+ return transform_via_day(ordinal, relation, af_info, asfreq_MtoDT, asfreq_DTtoA);
+}
static npy_int64 asfreq_MtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return asfreq_DtoQ(asfreq_MtoD(ordinal, 'E', &NULL_AF_INFO), relation, af_info); }
+ return transform_via_day(ordinal, relation, af_info, asfreq_MtoDT, asfreq_DTtoQ);
+}
-static npy_int64 asfreq_MtoW(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoW(asfreq_MtoD(ordinal, relation, &NULL_AF_INFO), relation, af_info); }
+static npy_int64 asfreq_MtoW(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_MtoDT, asfreq_DTtoW);
+}
static npy_int64 asfreq_MtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) {
-
struct date_info dinfo;
+
+ int tempStore = af_info->targetFreq;
+ af_info->targetFreq = FR_DAY;
if (dInfoCalc_SetFromAbsDate(&dinfo,
- asfreq_MtoD(ordinal, relation, &NULL_AF_INFO) + ORD_OFFSET,
- GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ asfreq_MtoDT(ordinal, relation, af_info) + ORD_OFFSET,
+ GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ af_info->targetFreq = tempStore;
if (relation == 'S') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); }
else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); }
}
-static npy_int64 asfreq_MtoH(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoH(asfreq_MtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_MtoT(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoT(asfreq_MtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_MtoS(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoS(asfreq_MtoD(ordinal, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); }
-
//************ FROM QUARTERLY ***************
static void QtoD_ym(npy_int64 ordinal, int *y, int *m, asfreq_info *af_info) {
@@ -611,122 +681,124 @@ static void QtoD_ym(npy_int64 ordinal, int *y, int *m, asfreq_info *af_info) {
}
}
-static npy_int64 asfreq_QtoD(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+static npy_int64 asfreq_QtoDT(npy_int64 ordinal, char relation, asfreq_info *af_info) {
npy_int64 absdate;
int y, m;
- if (relation == 'S') {
- QtoD_ym(ordinal, &y, &m, af_info);
- // printf("ordinal: %d, year: %d, month: %d\n", (int) ordinal, y, m);
- if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE;
- return absdate - ORD_OFFSET;
- } else {
- QtoD_ym(ordinal+1, &y, &m, af_info);
- /* printf("ordinal: %d, year: %d, month: %d\n", (int) ordinal, y, m); */
- if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE;
- return absdate - 1 - ORD_OFFSET;
+ if (relation == 'E') {
+ ordinal += 1;
}
+
+ QtoD_ym(ordinal, &y, &m, af_info);
+
+ if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE;
+
+ if (relation == 'E') {
+ absdate -= 1;
+ }
+
+ return convert_daytime(absdate - ORD_OFFSET, FR_DAY, af_info->targetFreq, relation != 'S');
}
-static npy_int64 asfreq_QtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoQ(asfreq_QtoD(ordinal, relation, af_info), relation, af_info); }
+static npy_int64 asfreq_QtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_QtoDT, asfreq_DTtoQ);
+}
static npy_int64 asfreq_QtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return asfreq_DtoA(asfreq_QtoD(ordinal, relation, af_info), relation, af_info); }
+ return transform_via_day(ordinal, relation, af_info, asfreq_QtoDT, asfreq_DTtoA);
+}
static npy_int64 asfreq_QtoM(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- return asfreq_DtoM(asfreq_QtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
+ return transform_via_day(ordinal, relation, af_info, asfreq_QtoDT, asfreq_DTtoM);
+}
-static npy_int64 asfreq_QtoW(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoW(asfreq_QtoD(ordinal, relation, af_info), relation, af_info); }
+static npy_int64 asfreq_QtoW(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_QtoDT, asfreq_DTtoW);
+}
static npy_int64 asfreq_QtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) {
struct date_info dinfo;
+ int tempStore = af_info->targetFreq;
+ af_info->targetFreq = FR_DAY;
if (dInfoCalc_SetFromAbsDate(&dinfo,
- asfreq_QtoD(ordinal, relation, af_info) + ORD_OFFSET,
- GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ asfreq_QtoDT(ordinal, relation, af_info) + ORD_OFFSET,
+ GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ af_info->targetFreq = tempStore;
if (relation == 'S') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); }
else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); }
}
-static npy_int64 asfreq_QtoH(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoH(asfreq_QtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_QtoT(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoT(asfreq_QtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_QtoS(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoS(asfreq_QtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
-
-
//************ FROM ANNUAL ***************
-static npy_int64 asfreq_AtoD(npy_int64 ordinal, char relation, asfreq_info *af_info) {
- npy_int64 absdate, final_adj;
- int year;
+static npy_int64 asfreq_AtoDT(npy_int64 year, char relation, asfreq_info *af_info) {
+ npy_int64 absdate;
int month = (af_info->from_a_year_end) % 12;
- // start from 1970
- ordinal += BASE_YEAR;
+ // start from 1970
+ year += BASE_YEAR;
- if (month == 0) { month = 1; }
- else { month += 1; }
+ month += 1;
- if (relation == 'S') {
- if (af_info->from_a_year_end == 12) {year = ordinal;}
- else {year = ordinal - 1;}
- final_adj = 0;
- } else {
- if (af_info->from_a_year_end == 12) {year = ordinal+1;}
- else {year = ordinal;}
- final_adj = -1;
+ if (af_info->from_a_year_end != 12) {
+ year -= 1;
+ }
+
+ if (relation == 'E') {
+ year += 1;
}
+
absdate = absdate_from_ymd(year, month, 1);
+
if (absdate == INT_ERR_CODE) {
- return INT_ERR_CODE;
- }
- return absdate + final_adj - ORD_OFFSET;
+ return INT_ERR_CODE;
+ }
+
+ if (relation == 'E') {
+ absdate -= 1;
+ }
+
+ return convert_daytime(absdate - ORD_OFFSET, FR_DAY, af_info->targetFreq, relation != 'S');
}
-static npy_int64 asfreq_AtoA(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoA(asfreq_AtoD(ordinal, relation, af_info), relation, af_info); }
+static npy_int64 asfreq_AtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_AtoDT, asfreq_DTtoA);
+}
-static npy_int64 asfreq_AtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoQ(asfreq_AtoD(ordinal, relation, af_info), relation, af_info); }
+static npy_int64 asfreq_AtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_AtoDT, asfreq_DTtoQ);
+}
-static npy_int64 asfreq_AtoM(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoM(asfreq_AtoD(ordinal, relation, af_info), relation, af_info); }
+static npy_int64 asfreq_AtoM(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_AtoDT, asfreq_DTtoM);
+}
-static npy_int64 asfreq_AtoW(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoW(asfreq_AtoD(ordinal, relation, af_info), relation, af_info); }
+static npy_int64 asfreq_AtoW(npy_int64 ordinal, char relation, asfreq_info *af_info) {
+ return transform_via_day(ordinal, relation, af_info, asfreq_AtoDT, asfreq_DTtoW);
+}
static npy_int64 asfreq_AtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) {
struct date_info dinfo;
+ int tempStore = af_info->targetFreq;
+ af_info->targetFreq = FR_DAY;
if (dInfoCalc_SetFromAbsDate(&dinfo,
- asfreq_AtoD(ordinal, relation, af_info) + ORD_OFFSET,
- GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ asfreq_AtoDT(ordinal, relation, af_info) + ORD_OFFSET,
+ GREGORIAN_CALENDAR)) return INT_ERR_CODE;
+ af_info->targetFreq = tempStore;
if (relation == 'S') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); }
else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); }
}
-static npy_int64 asfreq_AtoH(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoH(asfreq_AtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_AtoT(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoT(asfreq_AtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
-static npy_int64 asfreq_AtoS(npy_int64 ordinal, char relation, asfreq_info *af_info)
- { return asfreq_DtoS(asfreq_AtoD(ordinal, relation, af_info), relation, &NULL_AF_INFO); }
-
static npy_int64 nofunc(npy_int64 ordinal, char relation, asfreq_info *af_info) { return INT_ERR_CODE; }
static npy_int64 no_op(npy_int64 ordinal, char relation, asfreq_info *af_info) { return ordinal; }
// end of frequency specific conversion routines
-static int get_freq_group(int freq) { return (freq/1000)*1000; }
-
static int calc_a_year_end(int freq, int group) {
int result = (freq - group) % 12;
if (result == 0) {return 12;}
@@ -741,30 +813,33 @@ void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info) {
int fromGroup = get_freq_group(fromFreq);
int toGroup = get_freq_group(toFreq);
+ af_info->sourceFreq = fromFreq;
+ af_info->targetFreq = toFreq;
+
switch(fromGroup)
{
- case FR_WK: {
+ case FR_WK:
af_info->from_week_end = calc_week_end(fromFreq, fromGroup);
- } break;
- case FR_ANN: {
+ break;
+ case FR_ANN:
af_info->from_a_year_end = calc_a_year_end(fromFreq, fromGroup);
- } break;
- case FR_QTR: {
+ break;
+ case FR_QTR:
af_info->from_q_year_end = calc_a_year_end(fromFreq, fromGroup);
- } break;
+ break;
}
switch(toGroup)
{
- case FR_WK: {
+ case FR_WK:
af_info->to_week_end = calc_week_end(toFreq, toGroup);
- } break;
- case FR_ANN: {
+ break;
+ case FR_ANN:
af_info->to_a_year_end = calc_a_year_end(toFreq, toGroup);
- } break;
- case FR_QTR: {
+ break;
+ case FR_QTR:
af_info->to_q_year_end = calc_a_year_end(toFreq, toGroup);
- } break;
+ break;
}
}
@@ -786,10 +861,15 @@ freq_conv_func get_asfreq_func(int fromFreq, int toFreq)
case FR_MTH: return &asfreq_AtoM;
case FR_WK: return &asfreq_AtoW;
case FR_BUS: return &asfreq_AtoB;
- case FR_DAY: return &asfreq_AtoD;
- case FR_HR: return &asfreq_AtoH;
- case FR_MIN: return &asfreq_AtoT;
- case FR_SEC: return &asfreq_AtoS;
+ case FR_DAY:
+ case FR_HR:
+ case FR_MIN:
+ case FR_SEC:
+ case FR_MS:
+ case FR_US:
+ case FR_NS:
+ return &asfreq_AtoDT;
+
default: return &nofunc;
}
@@ -801,10 +881,14 @@ freq_conv_func get_asfreq_func(int fromFreq, int toFreq)
case FR_MTH: return &asfreq_QtoM;
case FR_WK: return &asfreq_QtoW;
case FR_BUS: return &asfreq_QtoB;
- case FR_DAY: return &asfreq_QtoD;
- case FR_HR: return &asfreq_QtoH;
- case FR_MIN: return &asfreq_QtoT;
- case FR_SEC: return &asfreq_QtoS;
+ case FR_DAY:
+ case FR_HR:
+ case FR_MIN:
+ case FR_SEC:
+ case FR_MS:
+ case FR_US:
+ case FR_NS:
+ return &asfreq_QtoDT;
default: return &nofunc;
}
@@ -816,10 +900,14 @@ freq_conv_func get_asfreq_func(int fromFreq, int toFreq)
case FR_MTH: return &no_op;
case FR_WK: return &asfreq_MtoW;
case FR_BUS: return &asfreq_MtoB;
- case FR_DAY: return &asfreq_MtoD;
- case FR_HR: return &asfreq_MtoH;
- case FR_MIN: return &asfreq_MtoT;
- case FR_SEC: return &asfreq_MtoS;
+ case FR_DAY:
+ case FR_HR:
+ case FR_MIN:
+ case FR_SEC:
+ case FR_MS:
+ case FR_US:
+ case FR_NS:
+ return &asfreq_MtoDT;
default: return &nofunc;
}
@@ -831,10 +919,14 @@ freq_conv_func get_asfreq_func(int fromFreq, int toFreq)
case FR_MTH: return &asfreq_WtoM;
case FR_WK: return &asfreq_WtoW;
case FR_BUS: return &asfreq_WtoB;
- case FR_DAY: return &asfreq_WtoD;
- case FR_HR: return &asfreq_WtoH;
- case FR_MIN: return &asfreq_WtoT;
- case FR_SEC: return &asfreq_WtoS;
+ case FR_DAY:
+ case FR_HR:
+ case FR_MIN:
+ case FR_SEC:
+ case FR_MS:
+ case FR_US:
+ case FR_NS:
+ return &asfreq_WtoDT;
default: return &nofunc;
}
@@ -845,112 +937,84 @@ freq_conv_func get_asfreq_func(int fromFreq, int toFreq)
case FR_QTR: return &asfreq_BtoQ;
case FR_MTH: return &asfreq_BtoM;
case FR_WK: return &asfreq_BtoW;
- case FR_DAY: return &asfreq_BtoD;
case FR_BUS: return &no_op;
- case FR_HR: return &asfreq_BtoH;
- case FR_MIN: return &asfreq_BtoT;
- case FR_SEC: return &asfreq_BtoS;
+ case FR_DAY:
+ case FR_HR:
+ case FR_MIN:
+ case FR_SEC:
+ case FR_MS:
+ case FR_US:
+ case FR_NS:
+ return &asfreq_BtoDT;
default: return &nofunc;
}
case FR_DAY:
- switch(toGroup)
- {
- case FR_ANN: return &asfreq_DtoA;
- case FR_QTR: return &asfreq_DtoQ;
- case FR_MTH: return &asfreq_DtoM;
- case FR_WK: return &asfreq_DtoW;
- case FR_BUS: return &asfreq_DtoB;
- case FR_DAY: return &asfreq_DtoD;
- case FR_HR: return &asfreq_DtoH;
- case FR_MIN: return &asfreq_DtoT;
- case FR_SEC: return &asfreq_DtoS;
- default: return &nofunc;
- }
-
case FR_HR:
- switch(toGroup)
- {
- case FR_ANN: return &asfreq_HtoA;
- case FR_QTR: return &asfreq_HtoQ;
- case FR_MTH: return &asfreq_HtoM;
- case FR_WK: return &asfreq_HtoW;
- case FR_BUS: return &asfreq_HtoB;
- case FR_DAY: return &asfreq_HtoD;
- case FR_HR: return &no_op;
- case FR_MIN: return &asfreq_HtoT;
- case FR_SEC: return &asfreq_HtoS;
- default: return &nofunc;
- }
-
case FR_MIN:
- switch(toGroup)
- {
- case FR_ANN: return &asfreq_TtoA;
- case FR_QTR: return &asfreq_TtoQ;
- case FR_MTH: return &asfreq_TtoM;
- case FR_WK: return &asfreq_TtoW;
- case FR_BUS: return &asfreq_TtoB;
- case FR_DAY: return &asfreq_TtoD;
- case FR_HR: return &asfreq_TtoH;
- case FR_MIN: return &no_op;
- case FR_SEC: return &asfreq_TtoS;
- default: return &nofunc;
- }
-
case FR_SEC:
+ case FR_MS:
+ case FR_US:
+ case FR_NS:
switch(toGroup)
{
- case FR_ANN: return &asfreq_StoA;
- case FR_QTR: return &asfreq_StoQ;
- case FR_MTH: return &asfreq_StoM;
- case FR_WK: return &asfreq_StoW;
- case FR_BUS: return &asfreq_StoB;
- case FR_DAY: return &asfreq_StoD;
- case FR_HR: return &asfreq_StoH;
- case FR_MIN: return &asfreq_StoT;
- case FR_SEC: return &no_op;
+ case FR_ANN: return &asfreq_DTtoA;
+ case FR_QTR: return &asfreq_DTtoQ;
+ case FR_MTH: return &asfreq_DTtoM;
+ case FR_WK: return &asfreq_DTtoW;
+ case FR_BUS: return &asfreq_DTtoB;
+ case FR_DAY:
+ case FR_HR:
+ case FR_MIN:
+ case FR_SEC:
+ case FR_MS:
+ case FR_US:
+ case FR_NS:
+ return &asfreq_WithinDT;
default: return &nofunc;
}
+
default: return &nofunc;
}
}
-double get_abs_time(int freq, npy_int64 daily_ord, npy_int64 ordinal) {
+double get_abs_time(int freq, npy_int64 date_ordinal, npy_int64 ordinal) {
+ //printf("get_abs_time %d %lld %lld\n", freq, date_ordinal, ordinal);
- npy_int64 start_ord, per_day, unit;
- switch(freq)
- {
- case FR_HR:
- per_day = 24;
- unit = 60 * 60;
- break;
- case FR_MIN:
- per_day = 24*60;
- unit = 60;
- break;
- case FR_SEC:
- per_day = 24*60*60;
- unit = 1;
- break;
- default:
- return 0; // 24*60*60 - 1;
+ int freq_index, day_index, base_index;
+ npy_int64 per_day, start_ord;
+ double unit, result;
+
+ if (freq <= FR_DAY) {
+ return 0;
}
- start_ord = asfreq_DtoHIGHFREQ(daily_ord, 'S', per_day);
- /* printf("start_ord: %d\n", start_ord); */
- return (double) ( unit * (ordinal - start_ord));
- /* if (ordinal >= 0) { */
- /* } */
- /* else { */
- /* return (double) (unit * mod_compat(ordinal - start_ord, per_day)); */
- /* } */
+ freq_index = get_freq_group_index(freq);
+ day_index = get_freq_group_index(FR_DAY);
+ base_index = get_freq_group_index(FR_SEC);
+
+ //printf(" indices: day %d, freq %d, base %d\n", day_index, freq_index, base_index);
+
+ per_day = get_daytime_conversion_factor(day_index, freq_index);
+ unit = get_daytime_conversion_factor(freq_index, base_index);
+
+ //printf(" per_day: %lld, unit: %f\n", per_day, unit);
+
+ if (base_index < freq_index) {
+ unit = 1 / unit;
+ //printf(" corrected unit: %f\n", unit);
+ }
+
+ start_ord = date_ordinal * per_day;
+ //printf("start_ord: %lld\n", start_ord);
+ result = (double) ( unit * (ordinal - start_ord));
+ //printf(" result: %f\n", result);
+ return result;
}
/* Sets the time part of the DateTime object. */
-static
-int dInfoCalc_SetFromAbsTime(struct date_info *dinfo,
- double abstime)
+static int dInfoCalc_SetFromAbsTime(struct date_info *dinfo,
+ double abstime)
{
int inttime;
int hour,minute;
@@ -973,18 +1037,16 @@ int dInfoCalc_SetFromAbsTime(struct date_info *dinfo,
/* Set the instance's value using the given date and time. calendar
may be set to the flags: GREGORIAN_CALENDAR, JULIAN_CALENDAR to
indicate the calendar to be used. */
-static
-int dInfoCalc_SetFromAbsDateTime(struct date_info *dinfo,
- npy_int64 absdate,
- double abstime,
- int calendar)
+static int dInfoCalc_SetFromAbsDateTime(struct date_info *dinfo,
+ npy_int64 absdate,
+ double abstime,
+ int calendar)
{
-
/* Bounds check */
Py_AssertWithArg(abstime >= 0.0 && abstime <= SECONDS_PER_DAY,
- PyExc_ValueError,
- "abstime out of range (0.0 - 86400.0): %f",
- abstime);
+ PyExc_ValueError,
+ "abstime out of range (0.0 - 86400.0): %f",
+ abstime);
/* Calculate the date */
if (dInfoCalc_SetFromAbsDate(dinfo, absdate, calendar)) goto onError;
@@ -993,7 +1055,7 @@ int dInfoCalc_SetFromAbsDateTime(struct date_info *dinfo,
if (dInfoCalc_SetFromAbsTime(dinfo, abstime)) goto onError;
return 0;
- onError:
+onError:
return INT_ERR_CODE;
}
@@ -1007,15 +1069,16 @@ npy_int64 asfreq(npy_int64 period_ordinal, int freq1, int freq2, char relation)
freq_conv_func func;
asfreq_info finfo;
- func = get_asfreq_func(freq1, freq2);
+ func = get_asfreq_func(freq1, freq2);
+
get_asfreq_info(freq1, freq2, &finfo);
val = (*func)(period_ordinal, relation, &finfo);
if (val == INT_ERR_CODE) {
- // Py_Error(PyExc_ValueError, "Unable to convert to desired frequency.");
- goto onError;
- }
+ //Py_Error(PyExc_ValueError, "Unable to convert to desired frequency.");
+ goto onError;
+ }
return val;
onError:
return INT_ERR_CODE;
@@ -1024,19 +1087,33 @@ npy_int64 asfreq(npy_int64 period_ordinal, int freq1, int freq2, char relation)
/* generate an ordinal in period space */
npy_int64 get_period_ordinal(int year, int month, int day,
- int hour, int minute, int second,
- int freq)
+ int hour, int minute, int second, int microseconds, int picoseconds,
+ int freq)
{
- npy_int64 absdays, delta;
+ npy_int64 absdays, delta, seconds;
npy_int64 weeks, days;
npy_int64 ordinal, day_adj;
int freq_group, fmonth, mdiff;
freq_group = get_freq_group(freq);
- if (freq == FR_SEC) {
+ if (freq == FR_SEC || freq == FR_MS || freq == FR_US || freq == FR_NS) {
+
absdays = absdate_from_ymd(year, month, day);
delta = (absdays - ORD_OFFSET);
- return (npy_int64)(delta*86400 + hour*3600 + minute*60 + second);
+ seconds = (npy_int64)(delta * 86400 + hour * 3600 + minute * 60 + second);
+
+ switch(freq) {
+ case FR_MS:
+ return seconds * 1000 + microseconds / 1000;
+
+ case FR_US:
+ return seconds * 1000000 + microseconds;
+
+ case FR_NS:
+ return seconds * 1000000000 + microseconds * 1000 + picoseconds / 1000;
+ }
+
+ return seconds;
}
if (freq == FR_MIN) {
@@ -1056,12 +1133,12 @@ npy_int64 get_period_ordinal(int year, int month, int day,
if (freq == FR_DAY)
{
- return (npy_int64) (absdate_from_ymd(year, month, day) - ORD_OFFSET);
+ return (npy_int64) (absdate_from_ymd(year, month, day) - ORD_OFFSET);
}
if (freq == FR_UND)
{
- return (npy_int64) (absdate_from_ymd(year, month, day) - ORD_OFFSET);
+ return (npy_int64) (absdate_from_ymd(year, month, day) - ORD_OFFSET);
}
if (freq == FR_BUS)
@@ -1091,26 +1168,26 @@ npy_int64 get_period_ordinal(int year, int month, int day,
if (freq_group == FR_QTR)
{
- fmonth = freq - FR_QTR;
- if (fmonth == 0) fmonth = 12;
+ fmonth = freq - FR_QTR;
+ if (fmonth == 0) fmonth = 12;
- mdiff = month - fmonth;
- if (mdiff < 0) mdiff += 12;
- if (month >= fmonth) mdiff += 12;
+ mdiff = month - fmonth;
+ if (mdiff < 0) mdiff += 12;
+ if (month >= fmonth) mdiff += 12;
- return (year - BASE_YEAR) * 4 + (mdiff - 1) / 3;
+ return (year - BASE_YEAR) * 4 + (mdiff - 1) / 3;
}
if (freq_group == FR_ANN)
{
- fmonth = freq - FR_ANN;
- if (fmonth == 0) fmonth = 12;
- if (month <= fmonth) {
- return year - BASE_YEAR;
- }
- else {
- return year - BASE_YEAR + 1;
- }
+ fmonth = freq - FR_ANN;
+ if (fmonth == 0) fmonth = 12;
+ if (month <= fmonth) {
+ return year - BASE_YEAR;
+ }
+ else {
+ return year - BASE_YEAR + 1;
+ }
}
Py_Error(PyExc_RuntimeError, "Unable to generate frequency ordinal");
@@ -1120,22 +1197,23 @@ npy_int64 get_period_ordinal(int year, int month, int day,
}
/*
- Returns the proleptic Gregorian ordinal of the date, as an integer.
- This corresponds to the number of days since Jan., 1st, 1AD.
- When the instance has a frequency less than daily, the proleptic date
- is calculated for the last day of the period.
-*/
+ Returns the proleptic Gregorian ordinal of the date, as an integer.
+ This corresponds to the number of days since Jan., 1st, 1AD.
+ When the instance has a frequency less than daily, the proleptic date
+ is calculated for the last day of the period.
+ */
npy_int64 get_python_ordinal(npy_int64 period_ordinal, int freq)
{
asfreq_info af_info;
- npy_int64 (*toDaily)(npy_int64, char, asfreq_info*);
+ freq_conv_func toDaily = NULL;
if (freq == FR_DAY)
return period_ordinal + ORD_OFFSET;
toDaily = get_asfreq_func(freq, FR_DAY);
get_asfreq_info(freq, FR_DAY, &af_info);
+
return toDaily(period_ordinal, 'E', &af_info) + ORD_OFFSET;
}
@@ -1147,8 +1225,8 @@ char *str_replace(const char *s, const char *old, const char *new) {
for (i = 0; s[i] != '\0'; i++) {
if (strstr(&s[i], old) == &s[i]) {
- count++;
- i += oldlen - 1;
+ count++;
+ i += oldlen - 1;
}
}
@@ -1256,12 +1334,12 @@ static int _ISOWeek(struct date_info *dinfo)
if (week < 0) {
/* The day lies in last week of the previous year */
if ((week > -2) ||
- (week == -2 && dInfoCalc_Leapyear(dinfo->year-1, dinfo->calendar)))
+ (week == -2 && dInfoCalc_Leapyear(dinfo->year-1, dinfo->calendar)))
week = 53;
else
week = 52;
} else if (week == 53) {
- /* Check if the week belongs to year or year+1 */
+ /* Check if the week belongs to year or year+1 */
if (31-dinfo->day + dinfo->day_of_week < 3) {
week = 1;
}
@@ -1273,15 +1351,19 @@ static int _ISOWeek(struct date_info *dinfo)
int get_date_info(npy_int64 ordinal, int freq, struct date_info *dinfo)
{
npy_int64 absdate = get_python_ordinal(ordinal, freq);
- /* printf("freq: %d, absdate: %d\n", freq, (int) absdate); */
double abstime = get_abs_time(freq, absdate - ORD_OFFSET, ordinal);
- if (abstime < 0) {
- abstime += 86400;
- absdate -= 1;
- }
+
+ while (abstime < 0) {
+ abstime += 86400;
+ absdate -= 1;
+ }
+ while (abstime >= 86400) {
+ abstime -= 86400;
+ absdate += 1;
+ }
if(dInfoCalc_SetFromAbsDateTime(dinfo, absdate,
- abstime, GREGORIAN_CALENDAR))
+ abstime, GREGORIAN_CALENDAR))
return INT_ERR_CODE;
return 0;
diff --git a/pandas/src/period.h b/pandas/src/period.h
index 4ba92bf8fde41..af35838ad0355 100644
--- a/pandas/src/period.h
+++ b/pandas/src/period.h
@@ -85,6 +85,9 @@
#define FR_HR 7000 /* Hourly */
#define FR_MIN 8000 /* Minutely */
#define FR_SEC 9000 /* Secondly */
+#define FR_MS 10000 /* Millisecondly */
+#define FR_US 11000 /* Microsecondly */
+#define FR_NS 12000 /* Nanosecondly */
#define FR_UND -10000 /* Undefined */
@@ -102,6 +105,9 @@ typedef struct asfreq_info {
int from_q_year_end; // month the year ends on in the "from" frequency
int to_q_year_end; // month the year ends on in the "to" frequency
+
+ int sourceFreq;
+ int targetFreq;
} asfreq_info;
@@ -130,7 +136,7 @@ typedef npy_int64 (*freq_conv_func)(npy_int64, char, asfreq_info*);
npy_int64 asfreq(npy_int64 period_ordinal, int freq1, int freq2, char relation);
npy_int64 get_period_ordinal(int year, int month, int day,
- int hour, int minute, int second,
+ int hour, int minute, int second, int microseconds, int picoseconds,
int freq);
npy_int64 get_python_ordinal(npy_int64 period_ordinal, int freq);
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 9405f3c58bfd7..55f70e9e4fe28 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -1664,7 +1664,7 @@ def test_timedelta64(self):
from pandas import date_range
from datetime import datetime, timedelta
- Series(np.array([1100, 20], dtype='timedelta64[s]')).to_string()
+ Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string()
s = Series(date_range('2012-1-1', periods=3, freq='D'))
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 852d02764affc..f29cee6942672 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -29,6 +29,8 @@
import pandas as pd
from pandas.lib import Timestamp
+from pandas import _np_version_under1p7
+
class TestIndex(unittest.TestCase):
_multiprocess_can_split_ = True
@@ -230,6 +232,25 @@ def test_asof(self):
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
+ def test_nanosecond_index_access(self):
+ if _np_version_under1p7:
+ import nose
+
+ raise nose.SkipTest('numpy >= 1.7 required')
+
+ from pandas import Series, Timestamp, DatetimeIndex
+
+ s = Series([Timestamp('20130101')]).values.view('i8')[0]
+ r = DatetimeIndex([s + 50 + i for i in range(100)])
+ x = Series(np.random.randn(100), index=r)
+
+ first_value = x.asof(x.index[0])
+
+ # this does not yet work, as parsing strings is done via dateutil
+ #self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
+
+ self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
+
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py
index 7c34562e64f6e..bfbd28f7bb4a4 100644
--- a/pandas/tseries/converter.py
+++ b/pandas/tseries/converter.py
@@ -458,7 +458,13 @@ def _daily_finder(vmin, vmax, freq):
periodsperday = -1
if freq >= FreqGroup.FR_HR:
- if freq == FreqGroup.FR_SEC:
+ if freq == FreqGroup.FR_NS:
+ periodsperday = 24 * 60 * 60 * 1000000000
+ elif freq == FreqGroup.FR_US:
+ periodsperday = 24 * 60 * 60 * 1000000
+ elif freq == FreqGroup.FR_MS:
+ periodsperday = 24 * 60 * 60 * 1000
+ elif freq == FreqGroup.FR_SEC:
periodsperday = 24 * 60 * 60
elif freq == FreqGroup.FR_MIN:
periodsperday = 24 * 60
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 2c4fc0d1b9c78..d1fd51c073f83 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -12,6 +12,7 @@
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
+from pandas import _np_version_under1p7
class FreqGroup(object):
@@ -24,6 +25,9 @@ class FreqGroup(object):
FR_HR = 7000
FR_MIN = 8000
FR_SEC = 9000
+ FR_MS = 10000
+ FR_US = 11000
+ FR_NS = 12000
class Resolution(object):
@@ -116,7 +120,7 @@ def _get_freq_str(base, mult=1):
# Offset names ("time rules") and related functions
-from pandas.tseries.offsets import (Micro, Milli, Second, Minute, Hour,
+from pandas.tseries.offsets import (Nano, Micro, Milli, Second, Minute, Hour,
Day, BDay, CDay, Week, MonthBegin,
MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin,
@@ -275,6 +279,9 @@ def _get_freq_str(base, mult=1):
}
+if not _np_version_under1p7:
+ _offset_map['N'] = Nano()
+
_offset_to_period_map = {
'WEEKDAY': 'D',
'EOM': 'M',
@@ -291,6 +298,9 @@ def _get_freq_str(base, mult=1):
'B': 'B',
'T': 'T',
'S': 'S',
+ 'L': 'L',
+ 'U': 'U',
+ 'N': 'N',
'H': 'H',
'Q': 'Q',
'A': 'A',
@@ -609,6 +619,9 @@ def get_standard_freq(freq):
"H": 7000, # Hourly
"T": 8000, # Minutely
"S": 9000, # Secondly
+ "L": 10000, # Millisecondly
+ "U": 11000, # Microsecondly
+ "N": 12000, # Nanosecondly
}
_reverse_period_code_map = {}
@@ -636,7 +649,10 @@ def _period_alias_dictionary():
H_aliases = ["H", "HR", "HOUR", "HRLY", "HOURLY"]
T_aliases = ["T", "MIN", "MINUTE", "MINUTELY"]
S_aliases = ["S", "SEC", "SECOND", "SECONDLY"]
-
+ L_aliases = ["L", "MS", "MILLISECOND", "MILLISECONDLY"]
+ U_aliases = ["U", "US", "MICROSECOND", "MICROSECONDLY"]
+ N_aliases = ["N", "NS", "NANOSECOND", "NANOSECONDLY"]
+
for k in M_aliases:
alias_dict[k] = 'M'
@@ -655,6 +671,15 @@ def _period_alias_dictionary():
for k in S_aliases:
alias_dict[k] = 'S'
+ for k in L_aliases:
+ alias_dict[k] = 'L'
+
+ for k in U_aliases:
+ alias_dict[k] = 'U'
+
+ for k in N_aliases:
+ alias_dict[k] = 'N'
+
A_prefixes = ["A", "Y", "ANN", "ANNUAL", "ANNUALLY", "YR", "YEAR",
"YEARLY"]
@@ -722,6 +747,9 @@ def _period_alias_dictionary():
"hour": "H",
"minute": "T",
"second": "S",
+ "millisecond": "L",
+ "microsecond": "U",
+ "nanosecond": "N",
}
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 847896871045b..24e94f4c2d482 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1162,8 +1162,16 @@ def get_value(self, series, key):
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
+ timestamp = None
+ #if isinstance(key, Timestamp):
+ # timestamp = key
+ #el
if isinstance(key, datetime):
- return self.get_value_maybe_box(series, key)
+ # needed to localize naive datetimes
+ timestamp = Timestamp(key, tz=self.tz)
+
+ if timestamp:
+ return self.get_value_maybe_box(series, timestamp)
try:
return _maybe_box(self, Index.get_value(self, series, key), series, key)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 232ebd2c3726c..e496bf46cf57a 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -8,6 +8,8 @@
# import after tools, dateutil check
from dateutil.relativedelta import relativedelta
import pandas.tslib as tslib
+import numpy as np
+from pandas import _np_version_under1p7
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
@@ -25,7 +27,6 @@ class ApplyTypeError(TypeError):
class CacheableOffset(object):
-
_cacheable = True
@@ -107,7 +108,7 @@ def _should_cache(self):
def _params(self):
attrs = [(k, v) for k, v in compat.iteritems(vars(self))
if k not in ['kwds', '_offset', 'name', 'normalize',
- 'busdaycalendar']]
+ 'busdaycalendar']]
attrs.extend(list(self.kwds.items()))
attrs = sorted(set(attrs))
@@ -123,7 +124,7 @@ def __repr__(self):
attrs = []
for attr in sorted(self.__dict__):
if ((attr == 'kwds' and len(self.kwds) == 0)
- or attr.startswith('_')):
+ or attr.startswith('_')):
continue
elif attr == 'kwds':
kwds_new = {}
@@ -157,6 +158,7 @@ def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
+
other = to_offset(other)
if not isinstance(other, DateOffset):
@@ -255,6 +257,7 @@ class BusinessDay(CacheableOffset, DateOffset):
"""
DateOffset subclass representing possibly n business days
"""
+
def __init__(self, n=1, **kwds):
self.n = int(n)
self.kwds = kwds
@@ -412,6 +415,7 @@ class CustomBusinessDay(BusinessDay):
def __init__(self, n=1, **kwds):
# Check we have the required numpy version
from distutils.version import LooseVersion
+
if LooseVersion(np.__version__) < '1.7.0':
raise NotImplementedError("CustomBusinessDay requires numpy >= "
"1.7.0. Current version: " +
@@ -476,7 +480,7 @@ def apply(self, other):
day64 = dt64.astype('datetime64[D]')
time = dt64 - day64
- if self.n<=0:
+ if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
@@ -558,7 +562,7 @@ def apply(self, other):
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
- % 7) - 4, 0)
+ % 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
@@ -621,6 +625,7 @@ class Week(CacheableOffset, DateOffset):
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
+
def __init__(self, n=1, **kwds):
self.n = n
self.weekday = kwds.get('weekday', None)
@@ -628,7 +633,7 @@ def __init__(self, n=1, **kwds):
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
- self.weekday)
+ self.weekday)
self._inc = timedelta(weeks=1)
self.kwds = kwds
@@ -667,6 +672,7 @@ def rule_code(self):
suffix = '-%s' % (_weekday_dict[self.weekday])
return 'W' + suffix
+
_weekday_dict = {
0: 'MON',
1: 'TUE',
@@ -696,6 +702,7 @@ class WeekOfMonth(CacheableOffset, DateOffset):
5: Saturdays
6: Sundays
"""
+
def __init__(self, n=1, **kwds):
self.n = n
self.weekday = kwds['weekday']
@@ -706,10 +713,10 @@ def __init__(self, n=1, **kwds):
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
- self.weekday)
+ self.weekday)
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=day<=3, got %d' %
- self.week)
+ self.week)
self.kwds = kwds
@@ -773,7 +780,7 @@ def apply(self, other):
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
- % 7) - 4, 0)
+ % 7) - 4, 0)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
@@ -1068,7 +1075,7 @@ def _decrement(date):
def _rollf(date):
if (date.month != self.month or
- date.day < tslib.monthrange(date.year, date.month)[1]):
+ date.day < tslib.monthrange(date.year, date.month)[1]):
date = _increment(date)
return date
@@ -1120,7 +1127,7 @@ def _increment(date):
def _decrement(date):
year = date.year
if date.month < self.month or (date.month == self.month and
- date.day == 1):
+ date.day == 1):
year -= 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
@@ -1164,6 +1171,7 @@ def rule_code(self):
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
+
return f
@@ -1191,6 +1199,7 @@ def __add__(self, other):
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
+
other = to_offset(other)
if isinstance(other, Tick):
@@ -1206,6 +1215,7 @@ def __hash__(self):
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
+
other = to_offset(other)
if isinstance(other, Tick):
@@ -1265,8 +1275,11 @@ def _delta_to_tick(delta):
def _delta_to_nanoseconds(delta):
- if isinstance(delta, Tick):
+ if isinstance(delta, np.timedelta64):
+ return delta.astype('timedelta64[ns]').item()
+ elif isinstance(delta, Tick):
delta = delta.delta
+
return (delta.days * 24 * 60 * 60 * 1000000
+ delta.seconds * 1000000
+ delta.microseconds) * 1000
@@ -1302,9 +1315,10 @@ class Micro(Tick):
class Nano(Tick):
- _inc = 1
+ _inc = np.timedelta64(1, 'ns') if not _np_version_under1p7 else 1
_rule_base = 'N'
+
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
@@ -1355,6 +1369,7 @@ def generate_range(start=None, end=None, periods=None,
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
+
offset = get_offset(time_rule)
start = to_datetime(start)
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index b6f3c3c83f3d8..cd81867ff8f08 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -125,7 +125,7 @@ def __init__(self, value=None, freq=None, ordinal=None,
if self.ordinal is None:
self.ordinal = tslib.period_ordinal(dt.year, dt.month, dt.day,
- dt.hour, dt.minute, dt.second,
+ dt.hour, dt.minute, dt.second, dt.microsecond, 0,
base)
self.freq = _freq_mod._get_freq_str(base)
@@ -447,6 +447,11 @@ def _get_date_and_freq(value, freq):
freq = 'T'
elif reso == 'second':
freq = 'S'
+ elif reso == 'microsecond':
+ if dt.microsecond % 1000 == 0:
+ freq = 'L'
+ else:
+ freq = 'U'
else:
raise ValueError("Invalid frequency or could not infer: %s" % reso)
@@ -1238,7 +1243,7 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None,
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
- val = tslib.period_ordinal(y, m, 1, 1, 1, 1, base)
+ val = tslib.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
@@ -1247,7 +1252,7 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None,
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
- ordinals.append(tslib.period_ordinal(y, mth, d, h, mn, s, base))
+ ordinals.append(tslib.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
@@ -1276,7 +1281,7 @@ def _ordinal_from_fields(year, month, quarter, day, hour, minute,
if quarter is not None:
year, month = _quarter_to_myear(year, quarter, freq)
- return tslib.period_ordinal(year, month, day, hour, minute, second, base)
+ return tslib.period_ordinal(year, month, day, hour, minute, second, 0, 0, base)
def _quarter_to_myear(year, quarter, freq):
diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py
index 6386f61a24a85..00a3d392a45c0 100644
--- a/pandas/tseries/tests/test_frequencies.py
+++ b/pandas/tseries/tests/test_frequencies.py
@@ -8,7 +8,7 @@
import numpy as np
-from pandas import Index, DatetimeIndex, date_range, period_range
+from pandas import Index, DatetimeIndex, Timestamp, date_range, period_range
from pandas.tseries.frequencies import to_offset, infer_freq
from pandas.tseries.tools import to_datetime
@@ -17,6 +17,8 @@
import pandas.lib as lib
+from pandas import _np_version_under1p7
+
def test_to_offset_multiple():
freqstr = '2h30min'
@@ -47,6 +49,12 @@ def test_to_offset_multiple():
expected = offsets.Milli(10075)
assert(result == expected)
+ if not _np_version_under1p7:
+ freqstr = '2800N'
+ result = to_offset(freqstr)
+ expected = offsets.Nano(2800)
+ assert(result == expected)
+
# malformed
try:
to_offset('2h20m')
@@ -116,13 +124,12 @@ def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
- idx = DatetimeIndex(np.arange(0, 100, 10))
- inferred = idx.inferred_freq
-
- self.assert_(inferred == '10N')
+ if _np_version_under1p7:
+ raise nose.SkipTest("requires numpy >= 1.7 to run")
+ self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
- b = datetime.now()
+ b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index 5b4e3251683bb..a77b0afb20b52 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -27,6 +27,8 @@
import pandas.util.testing as tm
from pandas.tseries.offsets import BusinessMonthEnd, CacheableOffset
+from pandas import _np_version_under1p7
+
_multiprocess_can_split_ = True
@@ -1620,14 +1622,15 @@ def test_onOffset(self):
def assertEq(offset, base, expected):
actual = offset + base
+ actual_swapped = base + offset
try:
assert actual == expected
+ assert actual_swapped == expected
except AssertionError:
raise AssertionError("\nExpected: %s\nActual: %s\nFor Offset: %s)"
"\nAt Date: %s" %
(expected, actual, offset, base))
-
def test_Hour():
assertEq(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assertEq(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
@@ -1668,6 +1671,58 @@ def test_Second():
assert not Second().isAnchored()
+def test_Millisecond():
+ assertEq(Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1000))
+ assertEq(Milli(-1), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1))
+ assertEq(Milli(2), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000))
+ assertEq(2 * Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000))
+ assertEq(-1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1))
+
+ assert (Milli(3) + Milli(2)) == Milli(5)
+ assert (Milli(3) - Milli(2)) == Milli()
+
+
+def test_MillisecondTimestampArithmetic():
+ assertEq(Milli(), Timestamp('2010-01-01'), Timestamp('2010-01-01 00:00:00.001'))
+ assertEq(Milli(-1), Timestamp('2010-01-01 00:00:00.001'), Timestamp('2010-01-01'))
+
+
+def test_Microsecond():
+ assertEq(Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1))
+ assertEq(Micro(-1), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1))
+ assertEq(2 * Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2))
+ assertEq(-1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1))
+
+ assert (Micro(3) + Micro(2)) == Micro(5)
+ assert (Micro(3) - Micro(2)) == Micro()
+
+
+def test_NanosecondGeneric():
+ timestamp = Timestamp(datetime(2010, 1, 1))
+ assert timestamp.nanosecond == 0
+
+ result = timestamp + Nano(10)
+ assert result.nanosecond == 10
+
+ reverse_result = Nano(10) + timestamp
+ assert reverse_result.nanosecond == 10
+
+
+def test_Nanosecond():
+ if _np_version_under1p7:
+ import nose
+ raise nose.SkipTest('numpy >= 1.7 required')
+
+ timestamp = Timestamp(datetime(2010, 1, 1))
+ assertEq(Nano(), timestamp, timestamp + np.timedelta64(1, 'ns'))
+ assertEq(Nano(-1), timestamp + np.timedelta64(1, 'ns'), timestamp)
+ assertEq(2 * Nano(), timestamp, timestamp + np.timedelta64(2, 'ns'))
+ assertEq(-1 * Nano(), timestamp + np.timedelta64(1, 'ns'), timestamp)
+
+ assert (Nano(3) + Nano(2)) == Nano(5)
+ assert (Nano(3) - Nano(2)) == Nano()
+
+
def test_tick_offset():
assert not Day().isAnchored()
assert not Milli().isAnchored()
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 173ebeb199b3b..9abecc0aeeec6 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -195,7 +195,7 @@ def test_period_constructor(self):
self.assertRaises(ValueError, Period, ordinal=200701)
- self.assertRaises(KeyError, Period, '2007-1-1', freq='U')
+ self.assertRaises(KeyError, Period, '2007-1-1', freq='X')
def test_freq_str(self):
i1 = Period('1982', freq='Min')
@@ -208,6 +208,16 @@ def test_repr(self):
p = Period('2000-12-15')
self.assert_('2000-12-15' in repr(p))
+ def test_millisecond_repr(self):
+ p = Period('2000-01-01 12:15:02.123')
+
+ self.assertEquals("Period('2000-01-01 12:15:02.123', 'L')", repr(p))
+
+ def test_microsecond_repr(self):
+ p = Period('2000-01-01 12:15:02.123567')
+
+ self.assertEquals("Period('2000-01-01 12:15:02.123567', 'U')", repr(p))
+
def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
@@ -466,7 +476,14 @@ def test_constructor_infer_freq(self):
p = Period('2007-01-01 07:10:15')
self.assert_(p.freq == 'S')
- self.assertRaises(ValueError, Period, '2007-01-01 07:10:15.123456')
+ p = Period('2007-01-01 07:10:15.123')
+ self.assert_(p.freq == 'L')
+
+ p = Period('2007-01-01 07:10:15.123000')
+ self.assert_(p.freq == 'L')
+
+ p = Period('2007-01-01 07:10:15.123400')
+ self.assert_(p.freq == 'U')
def noWrap(item):
@@ -1115,9 +1132,9 @@ def test_constructor_field_arrays(self):
self.assert_(idx.equals(exp))
def test_constructor_U(self):
- # U was used as undefined period
+ # X was used as undefined period
self.assertRaises(KeyError, period_range, '2007-1-1', periods=500,
- freq='U')
+ freq='X')
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000).repeat(4)
@@ -2168,6 +2185,15 @@ def test_minutely(self):
def test_secondly(self):
self._check_freq('S', '1970-01-01')
+
+ def test_millisecondly(self):
+ self._check_freq('L', '1970-01-01')
+
+ def test_microsecondly(self):
+ self._check_freq('U', '1970-01-01')
+
+ def test_nanosecondly(self):
+ self._check_freq('N', '1970-01-01')
def _check_freq(self, freq, base_date):
rng = PeriodIndex(start=base_date, periods=10, freq=freq)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 07903f9a9374a..e4504420bacc2 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -13,7 +13,7 @@
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
- isnull, date_range, Timestamp, DatetimeIndex,
+ isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range)
from pandas.core.daterange import DateRange
@@ -43,6 +43,7 @@
from pandas.core.datetools import BDay
import pandas.core.common as com
from pandas import concat
+from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
@@ -663,7 +664,7 @@ def test_index_cast_datetime64_other_units(self):
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
- if np.__version__ >= LooseVersion('1.7'):
+ if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
@@ -1332,6 +1333,28 @@ def test_to_period(self):
pts = ts.to_period('M')
self.assert_(pts.index.equals(exp.index.asfreq('M')))
+ def create_dt64_based_index(self):
+ data = [Timestamp('2007-01-01 10:11:12.123456Z'),
+ Timestamp('2007-01-01 10:11:13.789123Z')]
+ index = DatetimeIndex(data)
+ return index
+
+ def test_to_period_millisecond(self):
+ index = self.create_dt64_based_index()
+
+ period = index.to_period(freq='L')
+ self.assertEqual(2, len(period))
+ self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
+ self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
+
+ def test_to_period_microsecond(self):
+ index = self.create_dt64_based_index()
+
+ period = index.to_period(freq='U')
+ self.assertEqual(2, len(period))
+ self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
+ self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
+
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
@@ -1598,7 +1621,7 @@ def test_frame_datetime64_handling_groupby(self):
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
- self.assertEqual(result['date'][3], datetime(2012,7,3))
+ self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
@@ -2020,6 +2043,27 @@ def test_join_self(self):
joined = index.join(index, how=kind)
self.assert_(index is joined)
+ def assert_index_parameters(self, index):
+ assert index.freq == '40960N'
+ assert index.inferred_freq == '40960N'
+
+ def test_ns_index(self):
+
+ if _np_version_under1p7:
+ raise nose.SkipTest
+
+ nsamples = 400
+ ns = int(1e9 / 24414)
+ dtstart = np.datetime64('2012-09-20T00:00:00')
+
+ dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
+ freq = ns * pd.datetools.Nano()
+ index = pd.DatetimeIndex(dt, freq=freq, name='time')
+ self.assert_index_parameters(index)
+
+ new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
+ self.assert_index_parameters(new_index)
+
class TestDatetime64(unittest.TestCase):
"""
diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py
index d1f4f647db0e1..2e8418d8f50b2 100644
--- a/pandas/tseries/tests/test_timeseries_legacy.py
+++ b/pandas/tseries/tests/test_timeseries_legacy.py
@@ -255,6 +255,7 @@ def test_rule_aliases(self):
rule = datetools.to_offset('10us')
self.assert_(rule == datetools.Micro(10))
+
class TestLegacyCompat(unittest.TestCase):
def setUp(self):
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index b9a7356412a10..4e7daede03085 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -1,10 +1,20 @@
import unittest
+import nose
import numpy as np
from pandas import tslib
from datetime import datetime
+from pandas.core.api import Timestamp
+
+from pandas.tslib import period_asfreq
+
+from pandas.tseries.frequencies import get_freq
+
+from pandas import _np_version_under1p7
+
+
class TestDatetimeParsingWrappers(unittest.TestCase):
def test_verify_datetime_bounds(self):
for year in (1, 1000, 1677, 2262, 5000):
@@ -46,6 +56,7 @@ def test_does_not_convert_mixed_integer(self):
tslib._does_string_look_like_datetime(good_date_string)
)
+
class TestArrayToDatetime(unittest.TestCase):
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
@@ -118,6 +129,67 @@ def test_coerce_of_invalid_datetimes(self):
)
)
+
+class TestTimestamp(unittest.TestCase):
+ def setUp(self):
+ if _np_version_under1p7:
+ raise nose.SkipTest('numpy >= 1.7 required')
+ self.timestamp = Timestamp(datetime.utcnow())
+
+ def assert_ns_timedelta(self, modified_timestamp, expected_value):
+ value = self.timestamp.value
+ modified_value = modified_timestamp.value
+
+ self.assertEquals(modified_value - value, expected_value)
+
+ def test_timedelta_ns_arithmetic(self):
+ self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'), -123)
+
+ def test_timedelta_ns_based_arithmetic(self):
+ self.assert_ns_timedelta(self.timestamp + np.timedelta64(1234567898, 'ns'), 1234567898)
+
+ def test_timedelta_us_arithmetic(self):
+ self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'), -123000)
+
+ def test_timedelta_ns_arithmetic(self):
+ time = self.timestamp + np.timedelta64(-123, 'ms')
+ self.assert_ns_timedelta(time, -123000000)
+
+ def test_nanosecond_string_parsing(self):
+ self.timestamp = Timestamp('2013-05-01 07:15:45.123456789')
+ self.assertEqual(self.timestamp.value, 1367392545123456000)
+
+
+class TestTslib(unittest.TestCase):
+
+ def test_intraday_conversion_factors(self):
+ self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('H'), False), 24)
+ self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('T'), False), 1440)
+ self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('S'), False), 86400)
+ self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('L'), False), 86400000)
+ self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('U'), False), 86400000000)
+ self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('N'), False), 86400000000000)
+
+ self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('T'), False), 60)
+ self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('S'), False), 3600)
+ self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('L'), False), 3600000)
+ self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('U'), False), 3600000000)
+ self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('N'), False), 3600000000000)
+
+ self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('S'), False), 60)
+ self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('L'), False), 60000)
+ self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('U'), False), 60000000)
+ self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('N'), False), 60000000000)
+
+ self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('L'), False), 1000)
+ self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('U'), False), 1000000)
+ self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('N'), False), 1000000000)
+
+ self.assertEqual(period_asfreq(1, get_freq('L'), get_freq('U'), False), 1000)
+ self.assertEqual(period_asfreq(1, get_freq('L'), get_freq('N'), False), 1000000)
+
+ self.assertEqual(period_asfreq(1, get_freq('U'), get_freq('N'), False), 1000)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 99b09446be232..0df0fc377d000 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -19,10 +19,12 @@ cdef extern from "Python.h":
cdef PyTypeObject *Py_TYPE(object)
int PySlice_Check(object)
+# this is our datetime.pxd
+from datetime cimport *
+from util cimport is_integer_object, is_datetime64_object, is_timedelta64_object
from libc.stdlib cimport free
-from util cimport is_integer_object, is_datetime64_object
cimport util
from datetime cimport *
@@ -332,6 +334,9 @@ class NaTType(_NaT):
def __repr__(self):
return 'NaT'
+ def __hash__(self):
+ return iNaT
+
def weekday(self):
return -1
@@ -568,23 +573,27 @@ cdef class _Timestamp(datetime):
dts.us, ts.tzinfo)
def __add__(self, other):
+ if is_timedelta64_object(other):
+ return Timestamp(self.value + other.astype('timedelta64[ns]').item(), tz=self.tzinfo)
+
if is_integer_object(other):
if self.offset is None:
+ return Timestamp(self.value + other, tz=self.tzinfo)
msg = ("Cannot add integral value to Timestamp "
"without offset.")
raise ValueError(msg)
else:
return Timestamp((self.offset.__mul__(other)).apply(self))
- else:
- if isinstance(other, timedelta) or hasattr(other, 'delta'):
- nanos = _delta_to_nanoseconds(other)
- return Timestamp(self.value + nanos, tz=self.tzinfo)
- else:
- result = datetime.__add__(self, other)
- if isinstance(result, datetime):
- result = Timestamp(result)
- result.nanosecond = self.nanosecond
- return result
+
+ if isinstance(other, timedelta) or hasattr(other, 'delta'):
+ nanos = _delta_to_nanoseconds(other)
+ return Timestamp(self.value + nanos, tz=self.tzinfo)
+
+ result = datetime.__add__(self, other)
+ if isinstance(result, datetime):
+ result = Timestamp(result)
+ result.nanosecond = self.nanosecond
+ return result
def __sub__(self, other):
if is_integer_object(other):
@@ -636,10 +645,12 @@ cdef class _NaT(_Timestamp):
def _delta_to_nanoseconds(delta):
- try:
+ if hasattr(delta, 'delta'):
delta = delta.delta
- except:
- pass
+ if is_timedelta64_object(delta):
+ return delta.astype("timedelta64[ns]").item()
+ if is_integer_object(delta):
+ return delta
return (delta.days * 24 * 60 * 60 * 1000000
+ delta.seconds * 1000000
+ delta.microseconds) * 1000
@@ -2140,7 +2151,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
continue
pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, freq)
+ dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
elif _is_tzlocal(tz):
for i in range(n):
@@ -2155,7 +2166,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
pandas_datetime_to_datetimestruct(stamps[i] + delta,
PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, freq)
+ dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
else:
# Adjust datetime64 timestamp, recompute datetimestruct
trans = _get_transitions(tz)
@@ -2174,7 +2185,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
pandas_datetime_to_datetimestruct(stamps[i] + deltas[0],
PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, freq)
+ dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
else:
for i in range(n):
if stamps[i] == NPY_NAT:
@@ -2183,7 +2194,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
pandas_datetime_to_datetimestruct(stamps[i] + deltas[pos[i]],
PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, freq)
+ dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
return result
@@ -2220,7 +2231,7 @@ cdef extern from "period.h":
void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info)
int64_t get_period_ordinal(int year, int month, int day,
- int hour, int minute, int second,
+ int hour, int minute, int second, int microseconds, int picoseconds,
int freq) except INT32_MIN
int64_t get_python_ordinal(int64_t period_ordinal, int freq) except INT32_MIN
@@ -2284,7 +2295,7 @@ def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None):
for i in range(l):
pandas_datetime_to_datetimestruct(dtarr[i], PANDAS_FR_ns, &dts)
out[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, freq)
+ dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
else:
out = localize_dt64arr_to_period(dtarr, freq, tz)
return out
@@ -2318,7 +2329,7 @@ cpdef int64_t period_asfreq(int64_t period_ordinal, int freq1, int freq2,
"""
cdef:
int64_t retval
-
+
if end:
retval = asfreq(period_ordinal, freq1, freq2, END)
else:
@@ -2361,17 +2372,18 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end):
return result
-def period_ordinal(int y, int m, int d, int h, int min, int s, int freq):
+def period_ordinal(int y, int m, int d, int h, int min, int s, int us, int ps, int freq):
cdef:
int64_t ordinal
- return get_period_ordinal(y, m, d, h, min, s, freq)
+ return get_period_ordinal(y, m, d, h, min, s, us, ps, freq)
cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq):
cdef:
pandas_datetimestruct dts
date_info dinfo
+ float subsecond_fraction
get_date_info(ordinal, freq, &dinfo)
@@ -2381,7 +2393,9 @@ cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq):
dts.hour = dinfo.hour
dts.min = dinfo.minute
dts.sec = int(dinfo.second)
- dts.us = dts.ps = 0
+ subsecond_fraction = dinfo.second - dts.sec
+ dts.us = int((subsecond_fraction) * 1e6)
+ dts.ps = int(((subsecond_fraction) * 1e6 - dts.us) * 1e6)
return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
@@ -2411,6 +2425,12 @@ def period_format(int64_t value, int freq, object fmt=None):
fmt = b'%Y-%m-%d %H:%M'
elif freq_group == 9000: # SEC
fmt = b'%Y-%m-%d %H:%M:%S'
+ elif freq_group == 10000: # MILLISEC
+ fmt = b'%Y-%m-%d %H:%M:%S.%l'
+ elif freq_group == 11000: # MICROSEC
+ fmt = b'%Y-%m-%d %H:%M:%S.%u'
+ elif freq_group == 12000: # NANOSEC
+ fmt = b'%Y-%m-%d %H:%M:%S.%n'
else:
raise ValueError('Unknown freq: %d' % freq)
@@ -2419,9 +2439,12 @@ def period_format(int64_t value, int freq, object fmt=None):
cdef list extra_fmts = [(b"%q", b"^`AB`^"),
(b"%f", b"^`CD`^"),
- (b"%F", b"^`EF`^")]
+ (b"%F", b"^`EF`^"),
+ (b"%l", b"^`GH`^"),
+ (b"%u", b"^`IJ`^"),
+ (b"%n", b"^`KL`^")]
-cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^"]
+cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^", "^`GH`^", "^`IJ`^", "^`KL`^"]
cdef _period_strftime(int64_t value, int freq, object fmt):
import sys
@@ -2460,6 +2483,12 @@ cdef _period_strftime(int64_t value, int freq, object fmt):
repl = '%.2d' % (year % 100)
elif i == 2:
repl = '%d' % year
+ elif i == 3:
+ repl = '%03d' % (value % 1000)
+ elif i == 4:
+ repl = '%06d' % (value % 1000000)
+ elif i == 5:
+ repl = '%09d' % (value % 1000000000)
result = result.replace(str_extra_fmts[i], repl)
diff --git a/test.sh b/test.sh
index 324ac68d66b73..4a9ffd7be98b1 100755
--- a/test.sh
+++ b/test.sh
@@ -1,5 +1,6 @@
#!/bin/sh
-coverage erase
+command -v coverage >/dev/null && coverage erase
+command -v python-coverage >/dev/null && python-coverage erase
# nosetests pandas/tests/test_index.py --with-coverage --cover-package=pandas.core --pdb-failure --pdb
#nosetests -w pandas --with-coverage --cover-package=pandas --pdb-failure --pdb #--cover-inclusive
#nosetests -A "not slow" -w pandas/tseries --with-coverage --cover-package=pandas.tseries $* #--cover-inclusive
| closes #1812
This pull request fixes handling of nanosecond times in periods and offsets. In addition it simplifies the internal processing of intraday periods.
``` python
In []: pd.date_range('2013-01-01', periods=5, freq=pd.offsets.Nano(5))
Out[]:
<class 'pandas.tseries.index.DatetimeIndex'>
[2013-01-01 00:00:00, ..., 2013-01-01 00:00:00.000000020]
Length: 5, Freq: 5N, Timezone: None
```
It requires Numpy 1.7 to work, using Numpy 1.6 yields the original behaviour before this PR.
This is the continuation of #2555 after it was merged without effect fixing issue #1812.
The biggest part is a refactoring of pandas/src/period.c to use a conversion factor matrix for intraday conversion. This minimizes the impact of adding three more intraday time units (ms, us and ns). Thus the amount of frequency conversion methods is reduced to one per intraday frequency source or target. The intraday conversion factor is looked up in the conversion factor matrix inside the method.
Nanosecond support is only available when using numpy >= 1.7. When using with numpy < 1.7 everything except nanosecond support should work as expected.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3060 | 2013-03-15T07:24:05Z | 2013-09-30T19:43:55Z | 2013-09-30T19:43:55Z | 2014-06-18T11:32:43Z |
ENH: improve performance of df.to_csv GH3054 | diff --git a/RELEASE.rst b/RELEASE.rst
index 2eb7980458f8e..51fdd527afdfa 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -47,6 +47,7 @@ pandas 0.11.0
**Improvements to existing features**
+ - Improved performance of dv.to_csv() by up to 10x in some cases. (GH3059_)
- added ``blocks`` attribute to DataFrames, to return a dict of dtypes to
homogeneously dtyped DataFrames
- added keyword ``convert_numeric`` to ``convert_objects()`` to try to
@@ -62,6 +63,8 @@ pandas 0.11.0
strings that can be parsed with datetime.strptime
- Add ``axes`` property to ``Series`` for compatibility
- Add ``xs`` function to ``Series`` for compatibility
+ - Add ``chunksize`` parameter to ``to_csv`` to allow writing in chunks
+ to enable constant memory usage
**API Changes**
@@ -183,6 +186,7 @@ pandas 0.11.0
.. _GH3012: https://github.com/pydata/pandas/issues/3012
.. _GH3029: https://github.com/pydata/pandas/issues/3029
.. _GH3041: https://github.com/pydata/pandas/issues/3041
+.. _GH3059: https://github.com/pydata/pandas/issues/3039
pandas 0.10.1
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt
index 60ec7de5c4d8e..09289bab5a0f4 100644
--- a/doc/source/v0.11.0.txt
+++ b/doc/source/v0.11.0.txt
@@ -229,6 +229,8 @@ API changes
Enhancements
~~~~~~~~~~~~
+ - Improved performance of dv.to_csv() by up to 10x in some cases. (GH3059_)
+
- Numexpr is now a :ref:`Recommended Dependencies <install.recommended_dependencies>`, to accelerate certain
types of numerical and boolean operations
@@ -331,3 +333,4 @@ on GitHub for a complete list.
.. _GH2806: https://github.com/pydata/pandas/issues/2806
.. _GH2807: https://github.com/pydata/pandas/issues/2807
.. _GH2918: https://github.com/pydata/pandas/issues/2918
+.. _GH3059: https://github.com/pydata/pandas/issues/3059
diff --git a/pandas/core/common.py b/pandas/core/common.py
index a3e8c09839891..207ed2edac4bc 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -101,7 +101,6 @@ def _isnull_old(obj):
_isnull = _isnull_new
-
def _use_inf_as_null(key):
'''Option change callback for null/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
@@ -1594,6 +1593,26 @@ def _check_as_is(x):
# empty queue
self.queue.truncate(0)
+ def writerows(self, rows):
+ def _check_as_is(x):
+ return (self.quoting == csv.QUOTE_NONNUMERIC and
+ is_number(x)) or isinstance(x, str)
+
+ for i, row in enumerate(rows):
+ rows[i] = [x if _check_as_is(x)
+ else pprint_thing(x).encode('utf-8') for x in row]
+
+ self.writer.writerows([[s for s in row] for row in rows])
+ # Fetch UTF-8 output from the queue ...
+ data = self.queue.getvalue()
+ data = data.decode("utf-8")
+ # ... and reencode it into the target encoding
+ data = self.encoder.encode(data)
+ # write to the target stream
+ self.stream.write(data)
+ # empty queue
+ self.queue.truncate(0)
+
_NS_DTYPE = np.dtype('M8[ns]')
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 003b1fefd01f7..ef14c830e1c37 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -9,7 +9,7 @@
from io import StringIO
from pandas.core.common import adjoin, isnull, notnull
-from pandas.core.index import MultiIndex, _ensure_index
+from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.util import py3compat
from pandas.core.config import get_option, set_option, reset_option
import pandas.core.common as com
@@ -18,6 +18,7 @@
import numpy as np
import itertools
+import csv
from pandas.tseries.period import PeriodIndex
@@ -763,6 +764,260 @@ def grouper(x):
return result
+class CSVFormatter(object):
+
+ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
+ cols=None, header=True, index=True, index_label=None,
+ mode='w', nanRep=None, encoding=None, quoting=None,
+ line_terminator='\n', chunksize=None,legacy=False):
+ self.legacy=legacy # remove for 0.12
+ self.obj = obj
+ self.path_or_buf = path_or_buf
+ self.sep = sep
+ self.na_rep = na_rep
+ self.float_format = float_format
+
+ self.header = header
+ self.index = index
+ self.index_label = index_label
+ self.mode = mode
+ self.encoding = encoding
+
+ if quoting is None:
+ quoting = csv.QUOTE_MINIMAL
+ self.quoting = quoting
+
+ self.line_terminator = line_terminator
+
+ if cols is None:
+ cols = obj.columns
+
+ if isinstance(cols,Index):
+ cols = cols.to_native_types(na_rep=na_rep,float_format=float_format)
+ else:
+ cols=list(cols)
+ self.cols = cols
+
+ # preallocate data 2d list
+ self.blocks = self.obj._data.blocks
+ ncols = sum(len(b.items) for b in self.blocks)
+ self.data =[None] * ncols
+
+ # fail early if we have duplicate columns
+ if len(set(self.cols)) != len(self.cols):
+ raise Exception("duplicate columns are not permitted in to_csv")
+
+ self.colname_map = dict((k,i) for i,k in enumerate(obj.columns))
+
+ if chunksize is None:
+ chunksize = (100000/ (len(self.cols) or 1)) or 1
+ self.chunksize = chunksize
+
+ self.data_index = obj.index
+ if isinstance(obj.index, PeriodIndex):
+ self.data_index = obj.index.to_timestamp()
+
+ self.nlevels = getattr(self.data_index, 'nlevels', 1)
+ if not index:
+ self.nlevels = 0
+
+ # legacy to be removed in 0.12
+ def _helper_csv(self, writer, na_rep=None, cols=None,
+ header=True, index=True,
+ index_label=None, float_format=None):
+ if cols is None:
+ cols = self.columns
+
+ series = {}
+ for k, v in self.obj._series.iteritems():
+ series[k] = v.values
+
+
+ has_aliases = isinstance(header, (tuple, list, np.ndarray))
+ if has_aliases or header:
+ if index:
+ # should write something for index label
+ if index_label is not False:
+ if index_label is None:
+ if isinstance(self.obj.index, MultiIndex):
+ index_label = []
+ for i, name in enumerate(self.obj.index.names):
+ if name is None:
+ name = ''
+ index_label.append(name)
+ else:
+ index_label = self.obj.index.name
+ if index_label is None:
+ index_label = ['']
+ else:
+ index_label = [index_label]
+ elif not isinstance(index_label, (list, tuple, np.ndarray)):
+ # given a string for a DF with Index
+ index_label = [index_label]
+
+ encoded_labels = list(index_label)
+ else:
+ encoded_labels = []
+
+ if has_aliases:
+ if len(header) != len(cols):
+ raise ValueError(('Writing %d cols but got %d aliases'
+ % (len(cols), len(header))))
+ else:
+ write_cols = header
+ else:
+ write_cols = cols
+ encoded_cols = list(write_cols)
+
+ writer.writerow(encoded_labels + encoded_cols)
+ else:
+ encoded_cols = list(cols)
+ writer.writerow(encoded_cols)
+
+ data_index = self.obj.index
+ if isinstance(self.obj.index, PeriodIndex):
+ data_index = self.obj.index.to_timestamp()
+
+ nlevels = getattr(data_index, 'nlevels', 1)
+ for j, idx in enumerate(data_index):
+ row_fields = []
+ if index:
+ if nlevels == 1:
+ row_fields = [idx]
+ else: # handle MultiIndex
+ row_fields = list(idx)
+ for i, col in enumerate(cols):
+ val = series[col][j]
+ if lib.checknull(val):
+ val = na_rep
+
+ if float_format is not None and com.is_float(val):
+ val = float_format % val
+ elif isinstance(val, np.datetime64):
+ val = lib.Timestamp(val)._repr_base
+
+ row_fields.append(val)
+
+ writer.writerow(row_fields)
+
+ def save(self):
+ # create the writer & save
+ if hasattr(self.path_or_buf, 'read'):
+ f = self.path_or_buf
+ close = False
+ else:
+ f = com._get_handle(self.path_or_buf, self.mode, encoding=self.encoding)
+ close = True
+
+ try:
+ if self.encoding is not None:
+ self.writer = com.UnicodeWriter(f, lineterminator=self.line_terminator,
+ delimiter=self.sep, encoding=self.encoding,
+ quoting=self.quoting)
+ else:
+ self.writer = csv.writer(f, lineterminator=self.line_terminator,
+ delimiter=self.sep, quoting=self.quoting)
+
+ if self.legacy:
+ # to be removed in 0.12
+ self._helper_csv(self.writer, na_rep=self.na_rep,
+ float_format=self.float_format, cols=self.cols,
+ header=self.header, index=self.index,
+ index_label=self.index_label)
+
+ else:
+ self._save()
+
+
+ finally:
+ if close:
+ f.close()
+
+ def _save_header(self):
+
+ writer = self.writer
+ obj = self.obj
+ index_label = self.index_label
+ cols = self.cols
+ header = self.header
+
+ has_aliases = isinstance(header, (tuple, list, np.ndarray))
+ if has_aliases or self.header:
+ if self.index:
+ # should write something for index label
+ if index_label is not False:
+ if index_label is None:
+ if isinstance(obj.index, MultiIndex):
+ index_label = []
+ for i, name in enumerate(obj.index.names):
+ if name is None:
+ name = ''
+ index_label.append(name)
+ else:
+ index_label = obj.index.name
+ if index_label is None:
+ index_label = ['']
+ else:
+ index_label = [index_label]
+ elif not isinstance(index_label, (list, tuple, np.ndarray)):
+ # given a string for a DF with Index
+ index_label = [index_label]
+
+ encoded_labels = list(index_label)
+ else:
+ encoded_labels = []
+
+ if has_aliases:
+ if len(header) != len(cols):
+ raise ValueError(('Writing %d cols but got %d aliases'
+ % (len(cols), len(header))))
+ else:
+ write_cols = header
+ else:
+ write_cols = cols
+ encoded_cols = list(write_cols)
+
+ writer.writerow(encoded_labels + encoded_cols)
+ else:
+ encoded_cols = list(cols)
+ writer.writerow(encoded_cols)
+
+ def _save(self):
+
+ self._save_header()
+
+ nrows = len(self.data_index)
+
+ # write in chunksize bites
+ chunksize = self.chunksize
+ chunks = int(nrows / chunksize)+1
+
+ for i in xrange(chunks):
+ start_i = i * chunksize
+ end_i = min((i + 1) * chunksize, nrows)
+ if start_i >= end_i:
+ break
+
+ self._save_chunk(start_i, end_i)
+
+ def _save_chunk(self, start_i, end_i):
+
+ colname_map = self.colname_map
+ data_index = self.data_index
+
+ # create the data for a chunk
+ slicer = slice(start_i,end_i)
+ for i in range(len(self.blocks)):
+ b = self.blocks[i]
+ d = b.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format)
+ for j, k in enumerate(b.items):
+ # self.data is a preallocated list
+ self.data[colname_map[k]] = d[j]
+
+ ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format)
+
+ lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
+
# from collections import namedtuple
# ExcelCell = namedtuple("ExcelCell",
# 'row, col, val, style, mergestart, mergeend')
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ee586a2101f62..7cfb9ec03ba83 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -14,7 +14,6 @@
from itertools import izip
from StringIO import StringIO
-import csv
import operator
import sys
@@ -1289,87 +1288,10 @@ def to_panel(self):
to_wide = deprecate('to_wide', to_panel)
- def _helper_csv(self, writer, na_rep=None, cols=None,
- header=True, index=True,
- index_label=None, float_format=None):
- if cols is None:
- cols = self.columns
-
- series = {}
- for k, v in self._series.iteritems():
- series[k] = v.values
-
- has_aliases = isinstance(header, (tuple, list, np.ndarray))
- if has_aliases or header:
- if index:
- # should write something for index label
- if index_label is not False:
- if index_label is None:
- if isinstance(self.index, MultiIndex):
- index_label = []
- for i, name in enumerate(self.index.names):
- if name is None:
- name = ''
- index_label.append(name)
- else:
- index_label = self.index.name
- if index_label is None:
- index_label = ['']
- else:
- index_label = [index_label]
- elif not isinstance(index_label, (list, tuple, np.ndarray)):
- # given a string for a DF with Index
- index_label = [index_label]
-
- encoded_labels = list(index_label)
- else:
- encoded_labels = []
-
- if has_aliases:
- if len(header) != len(cols):
- raise ValueError(('Writing %d cols but got %d aliases'
- % (len(cols), len(header))))
- else:
- write_cols = header
- else:
- write_cols = cols
- encoded_cols = list(write_cols)
-
- writer.writerow(encoded_labels + encoded_cols)
- else:
- encoded_cols = list(cols)
- writer.writerow(encoded_cols)
-
- data_index = self.index
- if isinstance(self.index, PeriodIndex):
- data_index = self.index.to_timestamp()
-
- nlevels = getattr(data_index, 'nlevels', 1)
- for j, idx in enumerate(data_index):
- row_fields = []
- if index:
- if nlevels == 1:
- row_fields = [idx]
- else: # handle MultiIndex
- row_fields = list(idx)
- for i, col in enumerate(cols):
- val = series[col][j]
- if lib.checknull(val):
- val = na_rep
-
- if float_format is not None and com.is_float(val):
- val = float_format % val
- elif isinstance(val, np.datetime64):
- val = lib.Timestamp(val)._repr_base
-
- row_fields.append(val)
-
- writer.writerow(row_fields)
-
def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
mode='w', nanRep=None, encoding=None, quoting=None,
- line_terminator='\n'):
+ line_terminator='\n', chunksize=None,**kwds):
"""
Write DataFrame to a comma-separated values (csv) file
@@ -1406,6 +1328,7 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
+ chunksize : rows to write at a time
"""
if nanRep is not None: # pragma: no cover
import warnings
@@ -1413,32 +1336,17 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
FutureWarning)
na_rep = nanRep
- if hasattr(path_or_buf, 'read'):
- f = path_or_buf
- close = False
- else:
- f = com._get_handle(path_or_buf, mode, encoding=encoding)
- close = True
-
- if quoting is None:
- quoting = csv.QUOTE_MINIMAL
- try:
- if encoding is not None:
- csvout = com.UnicodeWriter(f, lineterminator=line_terminator,
- delimiter=sep, encoding=encoding,
- quoting=quoting)
- else:
- csvout = csv.writer(f, lineterminator=line_terminator,
- delimiter=sep, quoting=quoting)
- self._helper_csv(csvout, na_rep=na_rep,
- float_format=float_format, cols=cols,
- header=header, index=index,
- index_label=index_label)
-
- finally:
- if close:
- f.close()
+ else:
+ formatter = fmt.CSVFormatter(self, path_or_buf,
+ line_terminator=line_terminator,
+ sep=sep, encoding=encoding,
+ quoting=quoting,na_rep=na_rep,
+ float_format=float_format, cols=cols,
+ header=header, index=index,
+ index_label=index_label,
+ chunksize=chunksize,legacy=kwds.get("legacy",False) )
+ formatter.save()
def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
float_format=None, cols=None, header=True, index=True,
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 0f9776e202c00..8b42f2146a7cf 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -441,16 +441,7 @@ def format(self, name=False, formatter=None, na_rep='NaN'):
return header + list(self.map(formatter))
if self.is_all_dates:
- zero_time = time(0, 0)
- result = []
- for dt in self:
- if isnull(dt):
- result.append(u'NaT')
- else:
- if dt.time() != zero_time or dt.tzinfo is not None:
- return header + [u'%s' % x for x in self]
- result.append(u'%d-%.2d-%.2d' % (dt.year, dt.month, dt.day))
- return header + result
+ return header + _date_formatter(self)
values = self.values
@@ -472,6 +463,20 @@ def format(self, name=False, formatter=None, na_rep='NaN'):
result = _trim_front(format_array(values, None, justify='left'))
return header + result
+ def to_native_types(self, slicer=None, na_rep='', float_format=None):
+ values = self
+ if slicer is not None:
+ values = values[slicer]
+ mask = isnull(values)
+ values = np.array(values,dtype=object)
+
+ if self.is_all_dates:
+ return _date_formatter(self)
+ else:
+ values[mask] = na_rep
+
+ return values.tolist()
+
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
@@ -1481,6 +1486,12 @@ def __repr__(self):
def __len__(self):
return len(self.labels[0])
+ def to_native_types(self, slicer=None, na_rep='', float_format=None):
+ ix = self
+ if slicer:
+ ix = self[slicer]
+ return ix.tolist()
+
@property
def _constructor(self):
return MultiIndex.from_tuples
@@ -2578,6 +2589,22 @@ def _wrap_joined_index(self, joined, other):
# For utility purposes
+def _date_formatter(obj, na_rep=u'NaT'):
+ data = list(obj)
+
+ # tz formatter or time formatter
+ zero_time = time(0, 0)
+ for d in data:
+ if d.time() != zero_time or d.tzinfo is not None:
+ return [u'%s' % x for x in data ]
+
+ values = np.array(data,dtype=object)
+ mask = isnull(obj.values)
+ values[mask] = na_rep
+
+ imask = -mask
+ values[imask] = np.array([ u'%d-%.2d-%.2d' % (dt.year, dt.month, dt.day) for dt in values[imask] ])
+ return values.tolist()
def _sparsify(label_list, start=0):
pivoted = zip(*label_list)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 2a41bbffa3b83..3467b72541481 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -4,13 +4,14 @@
from numpy import nan
import numpy as np
-from pandas.core.common import _possibly_downcast_to_dtype
+from pandas.core.common import isnull, _possibly_downcast_to_dtype
from pandas.core.index import Index, _ensure_index, _handle_legacy_indexes
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
+from pandas.tslib import Timestamp
from pandas.util import py3compat
@@ -259,6 +260,17 @@ def _try_cast_result(self, result):
we may have roundtripped thru object in the mean-time """
return result
+ def to_native_types(self, slicer=None, na_rep='', **kwargs):
+ """ convert to our native types format, slicing if desired """
+
+ values = self.values
+ if slicer is not None:
+ values = values[:,slicer]
+ values = np.array(values,dtype=object)
+ mask = isnull(values)
+ values[mask] = na_rep
+ return values.tolist()
+
def replace(self, to_replace, value, inplace=False):
new_values = self.values if inplace else self.values.copy()
if self._can_hold_element(value):
@@ -577,6 +589,20 @@ def _try_cast(self, element):
except: # pragma: no cover
return element
+ def to_native_types(self, slicer=None, na_rep='', float_format=None, **kwargs):
+ """ convert to our native types format, slicing if desired """
+
+ values = self.values
+ if slicer is not None:
+ values = values[:,slicer]
+ values = np.array(values,dtype=object)
+ mask = isnull(values)
+ values[mask] = na_rep
+ if float_format:
+ imask = (-mask).ravel()
+ values.flat[imask] = np.array([ float_format % val for val in values.ravel()[imask] ])
+ return values.tolist()
+
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
@@ -701,6 +727,25 @@ def _try_cast(self, element):
except:
return element
+ def to_native_types(self, slicer=None, na_rep=None, **kwargs):
+ """ convert to our native types format, slicing if desired """
+
+ values = self.values
+ if slicer is not None:
+ values = values[:,slicer]
+ mask = isnull(values)
+
+ rvalues = np.empty(self.shape,dtype=object)
+ if na_rep is None:
+ na_rep = 'NaT'
+ rvalues[mask] = na_rep
+ imask = (-mask).ravel()
+ if self.dtype == 'datetime64[ns]':
+ rvalues.flat[imask] = np.array([ Timestamp(val)._repr_base for val in values.ravel()[imask] ],dtype=object)
+ elif self.dtype == 'timedelta64[ns]':
+ rvalues.flat[imask] = np.array([ lib.repr_timedelta64(val) for val in values.ravel()[imask] ],dtype=object)
+ return rvalues.tolist()
+
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 1fd579553f094..e12b524dda736 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -784,6 +784,54 @@ def array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_rep, obje
return arr
+@cython.boundscheck(False)
+@cython.wraparound(False)
+
+def write_csv_rows(list data, list data_index, int nlevels, list cols, object writer):
+
+ cdef int N, j, i, ncols
+ cdef list rows
+ cdef object val
+
+ # In crude testing, N>100 yields little marginal improvement
+ N=100
+
+ # pre-allocate rows
+ ncols = len(cols)
+ rows = [[None]*(nlevels+ncols) for x in range(N)]
+
+ j = -1
+ if nlevels == 1:
+ for j in range(len(data_index)):
+ row = rows[j % N]
+ row[0] = data_index[j]
+ for i in range(ncols):
+ row[1+i] = data[i][j]
+
+ if j >= N-1 and j % N == N-1:
+ writer.writerows(rows)
+ elif nlevels > 1:
+ for j in range(len(data_index)):
+ row = rows[j % N]
+ row[:nlevels] = list(data_index[j])
+ for i in range(ncols):
+ row[nlevels+i] = data[i][j]
+
+ if j >= N-1 and j % N == N-1:
+ writer.writerows(rows)
+ else:
+ for j in range(len(data_index)):
+ row = rows[j % N]
+ for i in range(ncols):
+ row[i] = data[i][j]
+
+ if j >= N-1 and j % N == N-1:
+ writer.writerows(rows)
+
+ if j >= 0 and (j < N-1 or (j % N) != N-1 ):
+ writer.writerows(rows[:((j+1) % N)])
+
+
@cython.boundscheck(False)
@cython.wraparound(False)
def create_hdf_rows_2d(ndarray indexer0,
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 1c30dfd1abced..7051c193dffd4 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4450,6 +4450,115 @@ def test_to_csv_from_csv(self):
os.remove(path)
+ def test_to_csv_moar(self):
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ path = '__tmp_to_csv_dupe_cols__'
+ def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None):
+ try:
+ df.to_csv(path,encoding='utf8')
+ recons = DataFrame.from_csv(path)
+ except:
+ os.remove(path)
+ raise
+ else:
+ def _to_uni(x):
+ if not isinstance(x,unicode):
+ return x.decode('utf8')
+ return x
+ if rnlvl:
+ delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)]
+ ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl)
+ recons.index = ix
+ recons = recons.iloc[:,rnlvl-1:]
+
+ if cnlvl:
+ def stuple_to_tuple(x):
+ import re
+ x = x.split(",")
+ x = map(lambda x: re.sub("[\'\"\s\(\)]","",x),x)
+ return x
+
+ cols=MultiIndex.from_tuples(map(stuple_to_tuple,recons.columns))
+ recons.columns = cols
+
+ type_map = dict(i='i',f='f',s='O',u='O',dt='O')
+ if r_dtype:
+ if r_dtype == 'u': # unicode
+ r_dtype='O'
+ recons.index = np.array(map(_to_uni,recons.index),
+ dtype=r_dtype )
+ df.index = np.array(map(_to_uni,df.index),dtype=r_dtype )
+ if r_dtype == 'dt': # unicode
+ r_dtype='O'
+ recons.index = np.array(map(Timestamp,recons.index),
+ dtype=r_dtype )
+ df.index = np.array(map(Timestamp,df.index),dtype=r_dtype )
+ else:
+ r_dtype= type_map.get(r_dtype)
+ recons.index = np.array(recons.index,dtype=r_dtype )
+ df.index = np.array(df.index,dtype=r_dtype )
+ if c_dtype:
+ if c_dtype == 'u':
+ c_dtype='O'
+ recons.columns = np.array(map(_to_uni,recons.columns),
+ dtype=c_dtype )
+ df.Columns = np.array(map(_to_uni,df.columns),dtype=c_dtype )
+ elif c_dtype == 'dt':
+ c_dtype='O'
+ recons.columns = np.array(map(Timestamp,recons.columns),
+ dtype=c_dtype )
+ df.Columns = np.array(map(Timestamp,df.columns),dtype=c_dtype )
+ else:
+ c_dtype= type_map.get(c_dtype)
+ recons.columns = np.array(recons.columns,dtype=c_dtype )
+ df.columns = np.array(df.columns,dtype=c_dtype )
+
+ assert_frame_equal(df, recons,check_names=False)
+
+ N = 100
+
+ for ncols in [1,10,30]:
+ base = int((100000/ ncols or 1) or 1)
+ for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
+ base-1,base,base+1]:
+ print( nrows,ncols)
+ _do_test(mkdf(nrows, ncols),path)
+
+ for nrows in [10,N-2,N-1,N,N+1,N+2]:
+ df = mkdf(nrows, 10)
+ cols = list(df.columns)
+ cols[:1] = ["dupe","dupe"]
+ cols[-1:] = ["dupe","dupe"]
+ ix = list(df.index)
+ ix[:2] = ["rdupe","rdupe"]
+ ix[-2:] = ["rdupe","rdupe"]
+ print( nrows)
+
+ df.index=ix
+ _do_test(df,path)
+
+ for r_idx_type in ['i', 'f','s','u','dt']:
+ for c_idx_type in ['i', 'f','s','u','dt']:
+ print(r_idx_type,c_idx_type)
+ _do_test(mkdf(100, 1,r_idx_type=r_idx_type,
+ c_idx_type=c_idx_type),path,r_idx_type,c_idx_type)
+ _do_test(mkdf(100, 2,r_idx_type=r_idx_type,
+ c_idx_type=c_idx_type),path,r_idx_type,c_idx_type)
+
+ _do_test(DataFrame(index=range(10)),path)
+ _do_test(mkdf(50001, 2,r_idx_nlevels=2),path,rnlvl=2)
+ for ncols in [2,10,30]:
+ base = int(100000/ncols)
+ for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
+ base-1,base,base+1]:
+ print(nrows, ncols)
+ _do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2)
+ _do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2)
+ _do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2),
+ path,rnlvl=2,cnlvl=2)
+
+
+
def test_to_csv_from_csv_w_some_infs(self):
path = '__%s__' % tm.rands(10)
@@ -4562,6 +4671,69 @@ def test_to_csv_withcommas(self):
os.remove(path)
+ def test_to_csv_mixed(self):
+ filename = '__tmp_to_csv_mixed__.csv'
+ def create_cols(name):
+ return [ "%s%03d" % (name,i) for i in xrange(5) ]
+
+ df_float = DataFrame(np.random.randn(100, 5),dtype='float64',columns=create_cols('float'))
+ df_int = DataFrame(np.random.randn(100, 5),dtype='int64',columns=create_cols('int'))
+ df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))
+ df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object'))
+ df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date'))
+
+ # add in some nans
+ df_float.ix[30:50,1:3] = np.nan
+
+ #### this is a bug in read_csv right now ####
+ #df_dt.ix[30:50,1:3] = np.nan
+
+ df = pan.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
+
+ # dtype
+ dtypes = dict()
+ for n,dtype in [('float',np.float64),('int',np.int64),('bool',np.bool),('object',np.object)]:
+ for c in create_cols(n):
+ dtypes[c] = dtype
+
+ df.to_csv(filename)
+
+ rs = pan.read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))
+ assert_frame_equal(rs, df)
+ os.remove(filename)
+
+ def test_to_csv_dups_cols(self):
+ filename = '__tmp_to_csv_dup_cols__.csv'
+
+ df = DataFrame(np.random.randn(1000, 30),columns=range(15)+range(15),dtype='float64')
+ self.assertRaises(Exception, df.to_csv, filename)
+
+ df_float = DataFrame(np.random.randn(1000, 30),dtype='float64')
+ df_int = DataFrame(np.random.randn(1000, 30),dtype='int64')
+ df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns)
+ df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns)
+ df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns)
+ df = pan.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
+
+ #### this raises because we have duplicate column names across dtypes ####
+ self.assertRaises(Exception, df.to_csv, filename)
+
+ def test_to_csv_chunking(self):
+ filename = '__tmp_to_csv_chunking__.csv'
+
+ aa=DataFrame({'A':range(100000)})
+
+ aa['B'] = aa.A + 1.0
+ aa['C'] = aa.A + 2.0
+ aa['D'] = aa.A + 3.0
+
+ for chunksize in [10000,50000,100000]:
+ aa.to_csv(filename,chunksize=chunksize)
+ rs = pan.read_csv(filename,index_col=0)
+ assert_frame_equal(rs, aa)
+
+ os.remove(filename)
+
def test_to_csv_bug(self):
path = '__tmp_to_csv_bug__.csv'
f1 = StringIO('a,1.0\nb,2.0')
diff --git a/vb_suite/io_bench.py b/vb_suite/io_bench.py
index ba386bd0e9649..dc335a4f994d5 100644
--- a/vb_suite/io_bench.py
+++ b/vb_suite/io_bench.py
@@ -44,17 +44,34 @@
"""
frame_to_csv = Benchmark("df.to_csv('__test__.csv')", setup,
start_date=datetime(2011, 1, 1))
+#----------------------------------
+
+setup = common_setup + """
+df=DataFrame({'A':range(100000)})
+df['B'] = df.A + 1.0
+df['C'] = df.A + 2.0
+df['D'] = df.A + 3.0
+"""
+frame_to_csv2 = Benchmark("df.to_csv('__test__.csv')", setup,
+ start_date=datetime(2011, 1, 1))
#----------------------------------
setup = common_setup + """
from pandas import concat, Timestamp
-df_float = DataFrame(np.random.randn(1000, 30),dtype='float64')
-df_int = DataFrame(np.random.randn(1000, 30),dtype='int64')
-df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns)
-df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns)
-df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns)
+def create_cols(name):
+ return [ "%s%03d" % (name,i) for i in xrange(5) ]
+df_float = DataFrame(np.random.randn(10000, 5),dtype='float64',columns=create_cols('float'))
+df_int = DataFrame(np.random.randn(10000, 5),dtype='int64',columns=create_cols('int'))
+df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))
+df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object'))
+df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date'))
+
+# add in some nans
+df_float.ix[30:500,1:3] = np.nan
+
df = concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
+
"""
frame_to_csv_mixed = Benchmark("df.to_csv('__test__.csv')", setup,
start_date=datetime(2012, 6, 1))
| Needs more testing before merging.
Following [SO question](http://stackoverflow.com/questions/15417574) mentioned in #3054:
``` python
In [7]: def df_to_csv(df,fname):
...: fh=open(fname,'w')
...: fh.write(','.join(df.columns) + '\n')
...: for row in df.itertuples(index=False):
...: slist = [str(x) for x in row]
...: ss = ','.join(slist) + "\n"
...: fh.write(ss)
...: fh.close()
...:
...: aa=pd.DataFrame({'A':range(100000)})
...: aa['B'] = aa.A + 1.0
...: aa['C'] = aa.A + 2.0
...: aa['D'] = aa.A + 3.0
...:
...: %timeit -r10 aa.to_csv('/tmp/junk1',index=False)
...: %timeit -r10 df_to_csv(aa,'/tmp/junk2')
...: from hashlib import sha1
...: print sha1(open("/tmp/junk1").read()).hexdigest()
...: print sha1(open("/tmp/junk2").read()).hexdigest()
```
<table>
<tr>
<th>current pandas</th><th>with PR</th><th>example code</th>
</tr>
<tr>
<td>2.3 s</td><td>1.29 s</td><td>1.28 s</td>
</tr>
</table>
wins:
- convert numpy numerics to native types to eliminate expensive numpy specific
stringify calls.
- if number of columns is < 10000, precompute the `cols` loop range rather
then creating and walking a generator at each iteration of the inner loop.
- some cargo cult stuff that's probably in the noise.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3059 | 2013-03-15T03:26:12Z | 2013-03-19T11:37:53Z | 2013-03-19T11:37:53Z | 2014-06-12T04:44:09Z |
BUG: Handle concat of series with same name, GH2972 | diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 88b25f160cdfd..9ab83cf828188 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -947,9 +947,10 @@ def get_result(self):
name = com._consensus_name_attr(self.objs)
return Series(new_data, index=self.new_axes[0], name=name)
elif self._is_series:
- data = dict(zip(self.new_axes[1], self.objs))
- return DataFrame(data, index=self.new_axes[0],
- columns=self.new_axes[1])
+ data = dict(itertools.izip(xrange(len(self.objs)), self.objs))
+ tmpdf = DataFrame(data, index=self.new_axes[0])
+ tmpdf.columns = self.new_axes[1]
+ return tmpdf
else:
new_data = self._get_concatenated_data()
return self.objs[0]._from_axes(new_data, self.new_axes)
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index a046cb83ee13a..b5c205eb5fede 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -1671,6 +1671,16 @@ def test_concat_bug_1719(self):
self.assertEqual(len(left), len(right))
+ def test_concat_bug_2972(self):
+ ts0 = Series(np.zeros(5))
+ ts1 = Series(np.ones(5))
+ ts0.name = ts1.name = 'same name'
+ result = concat([ts0, ts1], axis=1)
+
+ expected = DataFrame({0: ts0, 1: ts1})
+ expected.columns=['same name', 'same name']
+ assert_frame_equal(result, expected)
+
class TestOrderedMerge(unittest.TestCase):
| https://api.github.com/repos/pandas-dev/pandas/pulls/3058 | 2013-03-15T03:13:33Z | 2013-03-16T00:21:25Z | 2013-03-16T00:21:25Z | 2014-06-30T15:11:12Z | |
BUG: loc was failing on integer slices (incorrect implementation), GH 3053 | diff --git a/RELEASE.rst b/RELEASE.rst
index 2eb7980458f8e..f4c13e5cbca98 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -38,7 +38,7 @@ pandas 0.11.0
- Add ``squeeze`` function to reduce dimensionality of 1-len objects
- Support slicing with time objects (GH2681_)
- Added ``.iloc`` attribute, to support strict integer based indexing, analagous to ``.ix`` (GH2922_)
- - Added ``.loc`` attribute, to support strict label based indexing, analagous to ``.ix``
+ - Added ``.loc`` attribute, to support strict label based indexing, analagous to ``.ix`` (GH3053_)
- Added ``.iat`` attribute, to support fast scalar access via integers (replaces ``iget_value/iset_value``)
- Added ``.at`` attribute, to support fast scalar access via labels (replaces ``get_value/set_value``)
- Moved functionaility from ``irow,icol,iget_value/iset_value`` to ``.iloc`` indexer
@@ -183,6 +183,7 @@ pandas 0.11.0
.. _GH3012: https://github.com/pydata/pandas/issues/3012
.. _GH3029: https://github.com/pydata/pandas/issues/3029
.. _GH3041: https://github.com/pydata/pandas/issues/3041
+.. _GH3053: https://github.com/pydata/pandas/issues/3053
pandas 0.10.1
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 17423075b6bfb..cab9e967519de 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -647,6 +647,19 @@ def _getbool_axis(self, key, axis=0):
return self.obj.take(inds, axis=axis)
except (Exception), detail:
raise self._exception(detail)
+ def _get_slice_axis(self, slice_obj, axis=0):
+ """ this is pretty simple as we just have to deal with labels """
+ obj = self.obj
+ if not _need_slice(slice_obj):
+ return obj
+
+ labels = obj._get_axis(axis)
+ indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step)
+
+ if isinstance(indexer, slice):
+ return self._slice(indexer, axis=axis)
+ else:
+ return self.obj.take(indexer, axis=axis)
class _LocIndexer(_LocationIndexer):
""" purely label based location based indexing """
@@ -667,11 +680,8 @@ def _has_valid_type(self, key, axis):
if key.start not in ax:
raise KeyError("start bound [%s] is not the [%s]" % (key.start,self.obj._get_axis_name(axis)))
if key.stop is not None:
- stop = key.stop
- if com.is_integer(stop):
- stop -= 1
- if stop not in ax:
- raise KeyError("stop bound [%s] is not in the [%s]" % (stop,self.obj._get_axis_name(axis)))
+ if key.stop not in ax:
+ raise KeyError("stop bound [%s] is not in the [%s]" % (key.stop,self.obj._get_axis_name(axis)))
elif com._is_bool_indexer(key):
return True
@@ -700,9 +710,6 @@ def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
- ltype = labels.inferred_type
- if ltype == 'mixed-integer-float' or ltype == 'mixed-integer':
- raise ValueError('cannot slice with a non-single type label array')
return self._get_slice_axis(key, axis=axis)
elif com._is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index e48d8dbdcb498..f1ac1a288d45a 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -450,12 +450,38 @@ def test_loc_getitem_bool(self):
def test_loc_getitem_int_slice(self):
# int slices in int
- self.check_result('int slice1', 'loc', slice(1,3), 'ix', { 0 : [2,4], 1: [3,6], 2: [4,8] }, typs = ['ints'], fails=KeyError)
+ self.check_result('int slice1', 'loc', slice(2,4), 'ix', { 0 : [2,4], 1: [3,6], 2: [4,8] }, typs = ['ints'], fails=KeyError)
# ok
- self.check_result('int slice2', 'loc', slice(2,5), 'ix', [2,4], typs = ['ints'], axes = 0)
- self.check_result('int slice2', 'loc', slice(3,7), 'ix', [3,6], typs = ['ints'], axes = 1)
- self.check_result('int slice2', 'loc', slice(4,9), 'ix', [4,8], typs = ['ints'], axes = 2)
+ self.check_result('int slice2', 'loc', slice(2,4), 'ix', [2,4], typs = ['ints'], axes = 0)
+ self.check_result('int slice2', 'loc', slice(3,6), 'ix', [3,6], typs = ['ints'], axes = 1)
+ self.check_result('int slice2', 'loc', slice(4,8), 'ix', [4,8], typs = ['ints'], axes = 2)
+
+ # GH 3053
+ # loc should treat integer slices like label slices
+ from itertools import product
+
+ index = MultiIndex.from_tuples([t for t in product([6,7,8], ['a', 'b'])])
+ df = DataFrame(np.random.randn(6, 6), index, index)
+ result = df.loc[6:8,:]
+ expected = df.ix[6:8,:]
+ assert_frame_equal(result,expected)
+
+ index = MultiIndex.from_tuples([t for t in product([10, 20, 30], ['a', 'b'])])
+ df = DataFrame(np.random.randn(6, 6), index, index)
+ result = df.loc[20:30,:]
+ expected = df.ix[20:30,:]
+ assert_frame_equal(result,expected)
+
+ # doc examples
+ result = df.loc[10,:]
+ expected = df.ix[10,:]
+ assert_frame_equal(result,expected)
+
+ result = df.loc[:,10]
+ #expected = df.ix[:,10] (this fails)
+ expected = df[10]
+ assert_frame_equal(result,expected)
def test_loc_getitem_label_slice(self):
@@ -475,8 +501,7 @@ def test_loc_getitem_label_slice(self):
self.check_result('mixed slice', 'loc', slice(2,8), 'ix', slice(2,8), typs = ['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2,8), 'ix', slice(2,8), typs = ['mixed'], axes=2, fails=KeyError)
- # you would think this would work, but we don't have an ordering, so fail
- self.check_result('mixed slice', 'loc', slice(2,5,2), 'ix', slice(2,4,2), typs = ['mixed'], axes=0, fails=ValueError)
+ self.check_result('mixed slice', 'loc', slice(2,4,2), 'ix', slice(2,4,2), typs = ['mixed'], axes=0)
def test_loc_general(self):
| fixes #3053
see also #2904
| https://api.github.com/repos/pandas-dev/pandas/pulls/3055 | 2013-03-14T23:21:09Z | 2013-03-14T23:58:07Z | 2013-03-14T23:58:07Z | 2014-06-27T18:27:59Z |
DOC: more examples | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index e824a3a925f4e..a83bca267213f 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -19,8 +19,11 @@
Cookbook
********
-This is a respository for *short and sweet* example and links for useful pandas recipes.
-We encourage users to add to this documentation. This is a great *First Pull Request*.
+This is a respository for *short and sweet* examples and links for useful pandas recipes.
+We encourage users to add to this documentation.
+
+This is a great *First Pull Request* (to add interesting links and/or put short code inline
+for existing links)
Selection
---------
@@ -102,6 +105,9 @@ Timeseries
`Between times
<http://stackoverflow.com/questions/14539992/pandas-drop-rows-outside-of-time-range>`__
+`Vectorized Lookup
+<http://stackoverflow.com/questions/13893227/vectorized-look-up-of-values-in-pandas-dataframe>`__
+
Resampling
~~~~~~~~~~
@@ -138,6 +144,9 @@ Plotting
`Make Matplotlib look like R
<http://stackoverflow.com/questions/14349055/making-matplotlib-graphs-look-like-r-by-default>`__
+`Setting x-axis major and minor labels
+<http://stackoverflow.com/questions/12945971/pandas-timeseries-plot-setting-x-axis-major-and-minor-ticks-and-labels>`__
+
Data In/Out
-----------
@@ -150,6 +159,12 @@ CSV
`Reading the first few lines of a frame
<http://stackoverflow.com/questions/15008970/way-to-read-first-few-lines-for-pandas-dataframe>`__
+SQL
+~~~
+
+`Reading from databases with SQL
+<http://stackoverflow.com/questions/10065051/python-pandas-and-databases-like-mysql>`__
+
HDF5
~~~~
@@ -165,6 +180,24 @@ HDF5
`Large Data work flows
<http://stackoverflow.com/questions/14262433/large-data-work-flows-using-pandas>`__
+Storing Attributes to a group node
+
+.. ipython:: python
+
+ df = DataFrame(np.random.randn(8,3))
+ store = HDFStore('test.h5')
+ store.put('df',df)
+
+ # you can store an arbitrary python object via pickle
+ store.get_storer('df').attrs.my_attribute = dict(A = 10)
+ store.get_storer('df').attrs.my_attribute
+
+.. ipython:: python
+ :suppress:
+
+ store.close()
+ os.remove('test.h5')
+
Miscellaneous
-------------
| https://api.github.com/repos/pandas-dev/pandas/pulls/3052 | 2013-03-14T16:02:17Z | 2013-03-14T16:02:23Z | 2013-03-14T16:02:23Z | 2013-03-14T16:02:23Z | |
DOC: more examples | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 6f0d76f6113f8..e824a3a925f4e 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -27,56 +27,116 @@ Selection
`Boolean Rows Indexing
<http://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing>`__
+`Using loc and iloc in selections
+<https://github.com/pydata/pandas/issues/2904>`__
+
`Extending a panel along the minor axis
<http://stackoverflow.com/questions/15364050/extending-a-pandas-panel-frame-along-the-minor-axis>`__
-`Using loc and iloc in selections
-<https://github.com/pydata/pandas/issues/2904>`__
+`Boolean masking in a panel
+<http://stackoverflow.com/questions/14650341/boolean-mask-in-pandas-panel>`__
+
+`Selecting via the complement
+<http://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas>`__
MultiIndexing
-------------
-`Prepending a level to a multiindex
-<http://stackoverflow.com/questions/14744068/prepend-a-level-to-a-pandas-multiindex>`__
+`Creating a multi-index from a labeled frame
+<http://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels>`__
+
+Slicing
+~~~~~~~
`Slicing a multi-index with xs
<http://stackoverflow.com/questions/12590131/how-to-slice-multindex-columns-in-pandas-dataframes>`__
+`Slicing a multi-index with xs #2
+<http://stackoverflow.com/questions/14964493/multiindex-based-indexing-in-pandas>`__
+
+Sorting
+~~~~~~~
+
`Multi-index sorting
<http://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas>`__
`Partial Selection, the need for sortedness
<https://github.com/pydata/pandas/issues/2995>`__
+Levels
+~~~~~~
+
+`Prepending a level to a multiindex
+<http://stackoverflow.com/questions/14744068/prepend-a-level-to-a-pandas-multiindex>`__
+
+`Flatten Hierarchical columns
+<http://stackoverflow.com/questions/14507794/python-pandas-how-to-flatten-a-hierarchical-index-in-columns>`__
+
Grouping
--------
`Basic grouping with apply
<http://stackoverflow.com/questions/15322632/python-pandas-df-groupy-agg-column-reference-in-agg>`__
+`Using get_group
+<http://stackoverflow.com/questions/14734533/how-to-access-pandas-groupby-dataframe-by-key>`__
+
`Apply to different items in a group
<http://stackoverflow.com/questions/15262134/apply-different-functions-to-different-items-in-group-object-python-pandas>`__
+`Expanding Apply
+<http://stackoverflow.com/questions/14542145/reductions-down-a-column-in-pandas>`__
+
`Replacing values with groupby means
<http://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means>`__
+`Sort by group with aggregation
+<http://stackoverflow.com/questions/14941366/pandas-sort-by-group-aggregate-and-column>`__
+
+`Create multiple aggregated columns
+<http://stackoverflow.com/questions/14897100/create-multiple-columns-in-pandas-aggregation-function>`__
+
+Timeseries
+----------
+
+`Between times
+<http://stackoverflow.com/questions/14539992/pandas-drop-rows-outside-of-time-range>`__
+
+Resampling
+~~~~~~~~~~
+
`TimeGrouping of values grouped across time
<http://stackoverflow.com/questions/15297053/how-can-i-divide-single-values-of-a-dataframe-by-monthly-averages>`__
+`TimeGrouping #2
+<http://stackoverflow.com/questions/14569223/timegrouper-pandas>`__
+
+`Resampling with custom periods
+<http://stackoverflow.com/questions/15408156/resampling-with-custom-periods>`__
+
+`Resample intraday frame without adding new days
+<http://stackoverflow.com/questions/14898574/resample-intrday-pandas-dataframe-without-add-new-days>`__
+
+`Resample minute data
+<http://stackoverflow.com/questions/14861023/resampling-minute-data>`__
+
Merge
-----
-Join
-~~~~
+`emulate R rbind
+<http://stackoverflow.com/questions/14988480/pandas-version-of-rbind>`__
-`Joining a DataFrame to itself
+`Self Join
<https://github.com/pydata/pandas/issues/2996>`__
-Timeseries
-----------
+`How to set the index and join
+<http://stackoverflow.com/questions/14341805/pandas-merge-pd-merge-how-to-set-the-index-and-join>`__
-`Resample intraday frame without adding new days
-<http://stackoverflow.com/questions/14898574/resample-intrday-pandas-dataframe-without-add-new-days>`__
+Plotting
+--------
+
+`Make Matplotlib look like R
+<http://stackoverflow.com/questions/14349055/making-matplotlib-graphs-look-like-r-by-default>`__
Data In/Out
-----------
@@ -84,15 +144,29 @@ Data In/Out
CSV
~~~
+`Reading a csv chunk-by-chunk
+<http://stackoverflow.com/questions/11622652/large-persistent-dataframe-in-pandas/12193309#12193309>`__
+
+`Reading the first few lines of a frame
+<http://stackoverflow.com/questions/15008970/way-to-read-first-few-lines-for-pandas-dataframe>`__
+
HDF5
~~~~
+`Simple Queries with a Timestamp Index
+<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__
+
`Managing heteregenous data using a linked multiple table hierarchy
<https://github.com/pydata/pandas/issues/3032>`__
-`Simple Queries with a Timestamp Index
-<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__
+`Merging on-disk tables with millions of rows
+<http://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925>`__
+
+`Large Data work flows
+<http://stackoverflow.com/questions/14262433/large-data-work-flows-using-pandas>`__
Miscellaneous
-------------
+`Operating with timedeltas
+<https://github.com/pydata/pandas/pull/2899>`__
| https://api.github.com/repos/pandas-dev/pandas/pulls/3051 | 2013-03-14T13:18:15Z | 2013-03-14T13:18:21Z | 2013-03-14T13:18:21Z | 2013-03-14T13:18:21Z | |
ENH: Allow setitem in a frame where only mixed numerics are present | diff --git a/RELEASE.rst b/RELEASE.rst
index 2eb7980458f8e..032bca22d6f24 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -62,6 +62,7 @@ pandas 0.11.0
strings that can be parsed with datetime.strptime
- Add ``axes`` property to ``Series`` for compatibility
- Add ``xs`` function to ``Series`` for compatibility
+ - Allow setitem in a frame where only mixed numerics are present (e.g. int and float), (GH3037_)
**API Changes**
@@ -182,6 +183,7 @@ pandas 0.11.0
.. _GH3010: https://github.com/pydata/pandas/issues/3010
.. _GH3012: https://github.com/pydata/pandas/issues/3012
.. _GH3029: https://github.com/pydata/pandas/issues/3029
+.. _GH3037: https://github.com/pydata/pandas/issues/3037
.. _GH3041: https://github.com/pydata/pandas/issues/3041
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ee586a2101f62..08db4e11b8ee1 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2121,7 +2121,8 @@ def _setitem_frame(self, key, value):
raise ValueError('Must pass DataFrame with boolean values only')
if self._is_mixed_type:
- raise ValueError('Cannot do boolean setting on mixed-type frame')
+ if not self._is_numeric_mixed_type:
+ raise ValueError('Cannot do boolean setting on mixed-type frame')
self.where(-key, value, inplace=True)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 093f600f8c4ea..2c5c8c4d088be 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -671,8 +671,11 @@ def consolidate(self, inplace=False):
@property
def _is_mixed_type(self):
- self._consolidate_inplace()
- return len(self._data.blocks) > 1
+ return self._data.is_mixed_type
+
+ @property
+ def _is_numeric_mixed_type(self):
+ return self._data.is_numeric_mixed_type
def _reindex_axis(self, new_index, fill_method, axis, copy):
new_data = self._data.reindex_axis(new_index, axis=axis,
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 2a41bbffa3b83..d2c3f4104950b 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -980,6 +980,16 @@ def _consolidate_check(self):
self._is_consolidated = len(dtypes) == len(set(dtypes))
self._known_consolidated = True
+ @property
+ def is_mixed_type(self):
+ self._consolidate_inplace()
+ return len(self.blocks) > 1
+
+ @property
+ def is_numeric_mixed_type(self):
+ self._consolidate_inplace()
+ return all([ block.is_numeric for block in self.blocks ])
+
def get_numeric_data(self, copy=False, type_list=None, as_blocks = False):
"""
Parameters
@@ -1227,9 +1237,10 @@ def consolidate(self):
return BlockManager(new_blocks, self.axes)
def _consolidate_inplace(self):
- self.blocks = _consolidate(self.blocks, self.items)
- self._is_consolidated = True
- self._known_consolidated = True
+ if not self.is_consolidated():
+ self.blocks = _consolidate(self.blocks, self.items)
+ self._is_consolidated = True
+ self._known_consolidated = True
def get(self, item):
_, block = self._find_block(item)
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 4598b37d7da6a..2e7ec3ad9c280 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -159,7 +159,7 @@ def get_new_values(self):
dtype, fill_value = _maybe_promote(values.dtype)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
-
+
new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 1c30dfd1abced..4f7472fa46dbe 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -8660,6 +8660,33 @@ def test_boolean_indexing(self):
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
+ def test_boolean_indexing_mixed(self):
+ df = DataFrame(
+ {0L: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
+ 1L: {35: np.nan,
+ 40: 0.32632316859446198,
+ 43: np.nan,
+ 49: 0.32632316859446198,
+ 50: 0.39114724480578139},
+ 2L: {35: np.nan, 40: np.nan, 43: 0.29012581014105987, 49: np.nan, 50: np.nan},
+ 3L: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
+ 4L: {35: 0.34215328467153283, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
+ 'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
+
+ # mixed int/float ok
+ df2 = df.copy()
+ df2[df2>0.3] = 1
+ expected = df.copy()
+ expected.loc[40,1] = 1
+ expected.loc[49,1] = 1
+ expected.loc[50,1] = 1
+ expected.loc[35,4] = 1
+ assert_frame_equal(df2,expected)
+
+ # add object, should this raise?
+ df['foo'] = 'test'
+ self.assertRaises(ValueError, df.__setitem__, df>0.3, 1)
+
def test_sum_bools(self):
df = DataFrame(index=range(1), columns=range(10))
bools = isnull(df)
| this implements #3037
previously would failed if mixed_types was true,
so mixed int/float would fail
| https://api.github.com/repos/pandas-dev/pandas/pulls/3050 | 2013-03-14T11:39:34Z | 2013-03-15T12:22:45Z | 2013-03-15T12:22:45Z | 2014-06-27T07:44:21Z |
DOC: more cook examples | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index a805496fa56ca..6f0d76f6113f8 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -30,9 +30,24 @@ Selection
`Extending a panel along the minor axis
<http://stackoverflow.com/questions/15364050/extending-a-pandas-panel-frame-along-the-minor-axis>`__
+`Using loc and iloc in selections
+<https://github.com/pydata/pandas/issues/2904>`__
+
+MultiIndexing
+-------------
+
`Prepending a level to a multiindex
<http://stackoverflow.com/questions/14744068/prepend-a-level-to-a-pandas-multiindex>`__
+`Slicing a multi-index with xs
+<http://stackoverflow.com/questions/12590131/how-to-slice-multindex-columns-in-pandas-dataframes>`__
+
+`Multi-index sorting
+<http://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas>`__
+
+`Partial Selection, the need for sortedness
+<https://github.com/pydata/pandas/issues/2995>`__
+
Grouping
--------
@@ -81,5 +96,3 @@ HDF5
Miscellaneous
-------------
-`Multi-index sorting
-<http://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas>`__
| https://api.github.com/repos/pandas-dev/pandas/pulls/3049 | 2013-03-14T11:36:54Z | 2013-03-14T11:36:59Z | 2013-03-14T11:36:59Z | 2013-03-14T11:36:59Z | |
BUG: str.contains ignored na argument GH2806 | diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt
index c53ffa9ac1997..5d4ae7eee2cf2 100644
--- a/doc/source/v0.11.0.txt
+++ b/doc/source/v0.11.0.txt
@@ -307,7 +307,7 @@ Bug Fixes
td - timedelta(minutes=5,seconds=5,microseconds=5)
- Fix pretty-printing of infinite data structures (closes GH2978_)
-
+ - str.contains ignored na argument (GH2806_)
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -319,3 +319,4 @@ on GitHub for a complete list.
.. _GH2898: https://github.com/pydata/pandas/issues/2898
.. _GH2978: https://github.com/pydata/pandas/issues/2978
.. _GH2739: https://github.com/pydata/pandas/issues/2739
+.. _GH2806: https://github.com/pydata/pandas/issues/2806
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index a24b213c4e8e0..99a45b5c58ee4 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -154,7 +154,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan):
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
- na : bool, default NaN
+ na : default NaN, fill value for missing values.
Returns
-------
@@ -688,7 +688,7 @@ def join(self, sep):
@copy(str_contains)
def contains(self, pat, case=True, flags=0, na=np.nan):
result = str_contains(self.series, pat, case=case, flags=flags,
- na=np.nan)
+ na=na)
return self._wrap_result(result)
@copy(str_replace)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index a75ea8dc8259e..da7ad5b5d40e4 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -128,6 +128,11 @@ def test_contains(self):
self.assert_(result.dtype == np.bool_)
tm.assert_almost_equal(result, expected)
+ # na
+ values = Series(['om', 'foo',np.nan])
+ res = values.str.contains('foo', na="foo")
+ self.assertEqual (res.ix[2], "foo" )
+
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
| closes #2806
| https://api.github.com/repos/pandas-dev/pandas/pulls/3048 | 2013-03-14T03:14:26Z | 2013-03-14T03:40:17Z | 2013-03-14T03:40:17Z | 2014-07-01T21:30:07Z |
BUG: Bug in groupby with first/last where dtypes could change (GH3041_) | diff --git a/RELEASE.rst b/RELEASE.rst
index b132b962fcd0e..2eb7980458f8e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -145,8 +145,9 @@ pandas 0.11.0
values (see GH2922_, GH2892_), also check for out-of-bounds indices (GH3029_)
- Bug in DataFrame column insertion when the column creation fails, existing frame is left in
an irrecoverable state (GH3010_)
- - Bug in DataFrame update where non-specified values could cause dtype changes (GH3016_)
- - Bug in DataFrame combine_first where non-specified values could cause dtype changes (GH3041_)
+ - Bug in DataFrame update, combine_first where non-specified values could cause
+ dtype changes (GH3016_, GH3041_)
+ - Bug in groupby with first/last where dtypes could change (GH3041_)
- Formatting of an index that has ``nan`` was inconsistent or wrong (would fill from
other values), (GH2850_)
- Unstack of a frame with no nans would always cause dtype upcasting (GH2929_)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 17a2ccac5e30e..a3e8c09839891 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -777,6 +777,26 @@ def _possibly_cast_item(obj, item, dtype):
raise ValueError("Unexpected dtype encountered: %s" % dtype)
+def _possibly_downcast_to_dtype(result, dtype):
+ """ try to cast to the specified dtype (e.g. convert back to bool/int
+ or could be an astype of float64->float32 """
+
+ if not isinstance(result, np.ndarray):
+ return result
+
+ try:
+ if dtype == np.float_:
+ return result.astype(dtype)
+ elif dtype == np.bool_ or dtype == np.int_:
+ if issubclass(result.dtype.type, np.number) and notnull(result).all():
+ new_result = result.astype(dtype)
+ if (new_result == result).all():
+ return new_result
+ except:
+ pass
+
+ return result
+
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
@@ -936,7 +956,9 @@ def _possibly_convert_platform(values):
return values
def _possibly_cast_to_timedelta(value, coerce=True):
- """ try to cast to timedelta64 w/o coercion """
+ """ try to cast to timedelta64, if already a timedeltalike, then make
+ sure that we are [ns] (as numpy 1.6.2 is very buggy in this regards,
+ don't force the conversion unless coerce is True """
# deal with numpy not being able to handle certain timedelta operations
if isinstance(value,np.ndarray) and value.dtype.kind == 'm':
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index fe7c281afb1b9..3f12f773db96a 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1594,6 +1594,10 @@ def _cython_agg_blocks(self, how, numeric_only=True):
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
+
+ # see if we can cast the block back to the original dtype
+ result = block._try_cast_result(result)
+
newb = make_block(result, block.items, block.ref_items)
new_blocks.append(newb)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 96cc41be26b92..2a41bbffa3b83 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -4,6 +4,7 @@
from numpy import nan
import numpy as np
+from pandas.core.common import _possibly_downcast_to_dtype
from pandas.core.index import Index, _ensure_index, _handle_legacy_indexes
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
import pandas.core.common as com
@@ -560,6 +561,9 @@ class NumericBlock(Block):
is_numeric = True
_can_hold_na = True
+ def _try_cast_result(self, result):
+ return _possibly_downcast_to_dtype(result, self.dtype)
+
class FloatBlock(NumericBlock):
def _can_hold_element(self, element):
@@ -608,20 +612,6 @@ def _try_cast(self, element):
except: # pragma: no cover
return element
- def _try_cast_result(self, result):
- # this is quite restrictive to convert
- try:
- if (isinstance(result, np.ndarray) and
- issubclass(result.dtype.type, np.floating)):
- if com.notnull(result).all():
- new_result = result.astype(self.dtype)
- if (new_result == result).all():
- return new_result
- except:
- pass
-
- return result
-
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
@@ -639,6 +629,9 @@ def _try_cast(self, element):
except: # pragma: no cover
return element
+ def _try_cast_result(self, result):
+ return _possibly_downcast_to_dtype(result, self.dtype)
+
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 4dde7eeea98ce..4b1770dd4f5df 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -163,21 +163,25 @@ def test_first_last_nth(self):
self.assert_(com.isnull(grouped['B'].nth(0)['foo']))
def test_first_last_nth_dtypes(self):
- # tests for first / last / nth
- grouped = self.df_mixed_floats.groupby('A')
+ df = self.df_mixed_floats.copy()
+ df['E'] = True
+ df['F'] = 1
+
+ # tests for first / last / nth
+ grouped = df.groupby('A')
first = grouped.first()
- expected = self.df_mixed_floats.ix[[1, 0], ['B', 'C', 'D']]
+ expected = df.ix[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = ['bar', 'foo']
assert_frame_equal(first, expected, check_names=False)
last = grouped.last()
- expected = self.df_mixed_floats.ix[[5, 7], ['B', 'C', 'D']]
+ expected = df.ix[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = ['bar', 'foo']
assert_frame_equal(last, expected, check_names=False)
nth = grouped.nth(1)
- expected = self.df_mixed_floats.ix[[3, 2], ['B', 'C', 'D']]
+ expected = df.ix[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = ['bar', 'foo']
assert_frame_equal(nth, expected, check_names=False)
| fixed #3041 (completed now)
DOC: docstring updates in core/common.py for _possibily_cast_to_timedelta
| https://api.github.com/repos/pandas-dev/pandas/pulls/3044 | 2013-03-14T01:24:53Z | 2013-03-14T02:00:20Z | 2013-03-14T02:00:20Z | 2014-08-16T18:47:32Z |
BUG: frame combine_first where non-specified values could cause dtype changes (#3041) | diff --git a/RELEASE.rst b/RELEASE.rst
index 8894df02ed989..baad73652b577 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -146,6 +146,7 @@ pandas 0.11.0
- Bug in DataFrame column insertion when the column creation fails, existing frame is left in
an irrecoverable state (GH3010_)
- Bug in DataFrame update where non-specified values could cause dtype changes (GH3016_)
+ - Bug in DataFrame combine_first where non-specified values could cause dtype changes (GH3041_)
- Formatting of an index that has ``nan`` was inconsistent or wrong (would fill from
other values), (GH2850_)
@@ -178,6 +179,7 @@ pandas 0.11.0
.. _GH3010: https://github.com/pydata/pandas/issues/3010
.. _GH3012: https://github.com/pydata/pandas/issues/3012
.. _GH3029: https://github.com/pydata/pandas/issues/3029
+.. _GH3041: https://github.com/pydata/pandas/issues/3041
pandas 0.10.1
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c108cc677b9c6..8e7dbed742c3f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3723,7 +3723,7 @@ def _compare(a, b):
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
- def combine(self, other, func, fill_value=None):
+ def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
@@ -3734,6 +3734,8 @@ def combine(self, other, func, fill_value=None):
other : DataFrame
func : function
fill_value : scalar value
+ overwrite : boolean, default True
+ If True then overwrite values for common keys in the calling frame
Returns
-------
@@ -3760,9 +3762,16 @@ def combine(self, other, func, fill_value=None):
series = this[col].values
otherSeries = other[col].values
+ this_mask = isnull(series)
+ other_mask = isnull(otherSeries)
+
+ # don't overwrite columns unecessarily
+ # DO propogate if this column is not in the intersection
+ if not overwrite and other_mask.all():
+ result[col] = this[col].copy()
+ continue
+
if do_fill:
- this_mask = isnull(series)
- other_mask = isnull(otherSeries)
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
@@ -3798,7 +3807,7 @@ def combine_first(self, other):
combined : DataFrame
"""
combiner = lambda x, y: np.where(isnull(x), y, x)
- return self.combine(other, combiner)
+ return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index cba6adadb8d6c..d83bed55f2418 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7248,6 +7248,30 @@ def test_combine_first_mixed_bug(self):
combined = frame1.combine_first(frame2)
self.assertEqual(len(combined.columns), 5)
+ # gh 3016 (same as in update)
+ df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],
+ columns=['A','B','bool1','bool2'])
+
+ other = DataFrame([[45,45]],index=[0],columns=['A','B'])
+ result = df.combine_first(other)
+ assert_frame_equal(result, df)
+
+ df.ix[0,'A'] = np.nan
+ result = df.combine_first(other)
+ df.ix[0,'A'] = 45
+ assert_frame_equal(result, df)
+
+ # doc example
+ df1 = DataFrame({'A' : [1., np.nan, 3., 5., np.nan],
+ 'B' : [np.nan, 2., 3., np.nan, 6.]})
+
+ df2 = DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.],
+ 'B' : [np.nan, np.nan, 3., 4., 6., 8.]})
+
+ result = df1.combine_first(df2)
+ expected = DataFrame({ 'A' : [1,2,3,5,3,7.], 'B' : [np.nan,2,3,4,6,8] })
+ assert_frame_equal(result,expected)
+
def test_update(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
| fixes combine_first on #3041
| https://api.github.com/repos/pandas-dev/pandas/pulls/3043 | 2013-03-14T00:36:19Z | 2013-03-14T01:18:49Z | 2013-03-14T01:18:49Z | 2014-06-17T22:38:49Z |
BUG: Unstack of a frame with no nans would always cause dtype upcasting (GH #2929) | diff --git a/RELEASE.rst b/RELEASE.rst
index 8894df02ed989..c6abe3575277e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -148,6 +148,7 @@ pandas 0.11.0
- Bug in DataFrame update where non-specified values could cause dtype changes (GH3016_)
- Formatting of an index that has ``nan`` was inconsistent or wrong (would fill from
other values), (GH2850_)
+ - Unstack of a frame with no nans would always cause dtype upcasting (GH2929_)
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
@@ -169,6 +170,7 @@ pandas 0.11.0
.. _GH2892: https://github.com/pydata/pandas/issues/2892
.. _GH2909: https://github.com/pydata/pandas/issues/2909
.. _GH2922: https://github.com/pydata/pandas/issues/2922
+.. _GH2929: https://github.com/pydata/pandas/issues/2929
.. _GH2931: https://github.com/pydata/pandas/issues/2931
.. _GH2973: https://github.com/pydata/pandas/issues/2973
.. _GH2967: https://github.com/pydata/pandas/issues/2967
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index c86273b8a1cca..4598b37d7da6a 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -144,15 +144,23 @@ def get_result(self):
def get_new_values(self):
values = self.values
+
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
+ result_shape = (length, result_width)
- dtype, fill_value = _maybe_promote(values.dtype)
- new_values = np.empty((length, result_width), dtype=dtype)
- new_values.fill(fill_value)
- new_mask = np.zeros((length, result_width), dtype=bool)
+ # if our mask is all True, then we can use our existing dtype
+ if self.mask.all():
+ dtype = values.dtype
+ new_values = np.empty(result_shape, dtype=dtype)
+ else:
+ dtype, fill_value = _maybe_promote(values.dtype)
+ new_values = np.empty(result_shape, dtype=dtype)
+ new_values.fill(fill_value)
+
+ new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
for i in xrange(values.shape[1]):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index cba6adadb8d6c..3f13df5ce0415 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -8242,6 +8242,41 @@ def test_unstack_to_series(self):
data = data.unstack()
assert_frame_equal(old_data, data)
+ def test_unstack_dtypes(self):
+
+ # GH 2929
+ rows = [[1, 1, 3, 4],
+ [1, 2, 3, 4],
+ [2, 1, 3, 4],
+ [2, 2, 3, 4]]
+
+ df = DataFrame(rows, columns=list('ABCD'))
+ result = df.get_dtype_counts()
+ expected = Series({'int64' : 4})
+ assert_series_equal(result, expected)
+
+ # single dtype
+ df2 = df.set_index(['A','B'])
+ df3 = df2.unstack('B')
+ result = df3.get_dtype_counts()
+ expected = Series({'int64' : 4})
+ assert_series_equal(result, expected)
+
+ # mixed
+ df2 = df.set_index(['A','B'])
+ df2['C'] = 3.
+ df3 = df2.unstack('B')
+ result = df3.get_dtype_counts()
+ expected = Series({'int64' : 2, 'float64' : 2})
+ assert_series_equal(result, expected)
+
+ df2['D'] = 'foo'
+ df3 = df2.unstack('B')
+ result = df3.get_dtype_counts()
+ expected = Series({'float64' : 2, 'object' : 2})
+ assert_series_equal(result, expected)
+
+
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 99c081c0cc6cb..c93dcf386e1c9 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1346,7 +1346,7 @@ def test_unstack_group_index_overflow(self):
# test roundtrip
stacked = result.stack()
- assert_series_equal(s.astype(np.float64),
+ assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
| fixes #2929
| https://api.github.com/repos/pandas-dev/pandas/pulls/3040 | 2013-03-13T22:04:45Z | 2013-03-14T00:37:44Z | 2013-03-14T00:37:44Z | 2014-07-26T15:38:19Z |
Fix some pprint_thing warts | diff --git a/pandas/core/format.py b/pandas/core/format.py
index 0f0029167ce64..dc79a1cbc762a 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -139,14 +139,9 @@ def to_string(self):
maxlen = max(len(x) for x in fmt_index)
pad_space = min(maxlen, 60)
- _encode_diff = _encode_diff_func()
-
result = ['%s %s'] * len(fmt_values)
for i, (k, v) in enumerate(izip(fmt_index[1:], fmt_values)):
- try:
- idx = k.ljust(pad_space + _encode_diff(k))
- except UnicodeEncodeError:
- idx = k.ljust(pad_space)
+ idx = k.ljust(pad_space)
result[i] = result[i] % (idx, v)
if self.header and have_header:
@@ -158,21 +153,6 @@ def to_string(self):
return unicode(u'\n'.join(result))
-
-def _encode_diff_func():
- if py3compat.PY3: # pragma: no cover
- _encode_diff = lambda x: 0
- else:
- encoding = get_option("display.encoding")
-
- def _encode_diff(x):
- if not isinstance(x,unicode):
- return len(x) - len(x.decode(encoding))
- return 0
-
- return _encode_diff
-
-
def _strlen_func():
if py3compat.PY3: # pragma: no cover
_strlen = len
@@ -1490,7 +1470,6 @@ def _make_fixed_width(strings, justify='right', minimum=None):
return strings
_strlen = _strlen_func()
- _encode_diff = _encode_diff_func()
max_len = np.max([_strlen(x) for x in strings])
@@ -1507,10 +1486,7 @@ def _make_fixed_width(strings, justify='right', minimum=None):
justfunc = lambda self, x: self.rjust(x)
def just(x):
- try:
- eff_len = max_len + _encode_diff(x)
- except UnicodeError:
- eff_len = max_len
+ eff_len = max_len
if conf_max is not None:
if (conf_max > 3) & (_strlen(x) > max_len):
| **Part 1**
currently:
``` python
In [4]: from pandas.core.common import pprint_thing as pp_t
...: pp_t([1,None])
Out[4]: u'[1, ]'
```
after PR:
``` python
In [13]: from pandas.core.common import pprint_thing as pp_t
...: pp_t([1,None])
Out[13]: u'[1, None]'
```
and jeff fixed index NaN printing in #3034, at a higher level.
**Part 2**
currently:
``` python
In [1]: pd.Index(['a','b'])
...:
Out[1]: Index([a, b], dtype=object)
```
after PR:
``` python
pd.Index(['a','b'])
Out[3]: Index([u'a', u'b'], dtype=object)
```
so:
``` python
In [7]: from pandas import Index
In [10]: eval(repr(pd.Index([u'a', u'b'], dtype=object)))
Out[10]: Index([u'a', u'b'], dtype=object)
```
Which is what `repr()` should enable, in principle.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3038 | 2013-03-13T19:26:34Z | 2013-04-23T02:11:03Z | 2013-04-23T02:11:03Z | 2014-06-19T09:13:11Z |
DOC: added cookbook.rst to main docs | diff --git a/RELEASE.rst b/RELEASE.rst
index d79ede4dad26e..b91426db2741c 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -30,6 +30,7 @@ pandas 0.11.0
**New features**
- New documentation section, ``10 Minutes to Pandas``
+ - New documentation section, ``Cookbook``
- Allow mixed dtypes (e.g ``float32/float64/int32/int16/int8``) to coexist in
DataFrames and propogate in operations
- Add function to pandas.io.data for retrieving stock index components from
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
new file mode 100644
index 0000000000000..a805496fa56ca
--- /dev/null
+++ b/doc/source/cookbook.rst
@@ -0,0 +1,85 @@
+.. _cookbook:
+
+.. currentmodule:: pandas
+
+.. ipython:: python
+ :suppress:
+
+ import numpy as np
+ import random
+ import os
+ np.random.seed(123456)
+ from pandas import *
+ import pandas as pd
+ randn = np.random.randn
+ randint = np.random.randint
+ np.set_printoptions(precision=4, suppress=True)
+
+********
+Cookbook
+********
+
+This is a respository for *short and sweet* example and links for useful pandas recipes.
+We encourage users to add to this documentation. This is a great *First Pull Request*.
+
+Selection
+---------
+`Boolean Rows Indexing
+<http://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing>`__
+
+`Extending a panel along the minor axis
+<http://stackoverflow.com/questions/15364050/extending-a-pandas-panel-frame-along-the-minor-axis>`__
+
+`Prepending a level to a multiindex
+<http://stackoverflow.com/questions/14744068/prepend-a-level-to-a-pandas-multiindex>`__
+
+Grouping
+--------
+
+`Basic grouping with apply
+<http://stackoverflow.com/questions/15322632/python-pandas-df-groupy-agg-column-reference-in-agg>`__
+
+`Apply to different items in a group
+<http://stackoverflow.com/questions/15262134/apply-different-functions-to-different-items-in-group-object-python-pandas>`__
+
+`Replacing values with groupby means
+<http://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means>`__
+
+`TimeGrouping of values grouped across time
+<http://stackoverflow.com/questions/15297053/how-can-i-divide-single-values-of-a-dataframe-by-monthly-averages>`__
+
+Merge
+-----
+
+Join
+~~~~
+
+`Joining a DataFrame to itself
+<https://github.com/pydata/pandas/issues/2996>`__
+
+Timeseries
+----------
+
+`Resample intraday frame without adding new days
+<http://stackoverflow.com/questions/14898574/resample-intrday-pandas-dataframe-without-add-new-days>`__
+
+Data In/Out
+-----------
+
+CSV
+~~~
+
+HDF5
+~~~~
+
+`Managing heteregenous data using a linked multiple table hierarchy
+<https://github.com/pydata/pandas/issues/3032>`__
+
+`Simple Queries with a Timestamp Index
+<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__
+
+Miscellaneous
+-------------
+
+`Multi-index sorting
+<http://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas>`__
diff --git a/doc/source/index.rst b/doc/source/index.rst
index d59cb6d7a816b..b919657b64fc2 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -113,6 +113,7 @@ See the package overview for more detail about what's in the library.
faq
overview
10min
+ cookbook
dsintro
basics
indexing
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt
index 9397be36697b2..c53ffa9ac1997 100644
--- a/doc/source/v0.11.0.txt
+++ b/doc/source/v0.11.0.txt
@@ -12,6 +12,9 @@ pay close attention to.
There is a new section in the documentation, :ref:`10 Minutes to Pandas <10min>`,
primarily geared to new users.
+There is a new section in the documentation, :ref:`Cookbook <cookbook>`, a collection
+of useful recipes in pandas (and that we want contributions!).
+
There are several libraries that are now :ref:`Recommended Dependencies <install.recommended_dependencies>`
Selection Choices
| https://api.github.com/repos/pandas-dev/pandas/pulls/3036 | 2013-03-13T11:46:14Z | 2013-03-13T16:38:44Z | 2013-03-13T16:38:44Z | 2014-06-26T19:46:41Z | |
BUG: Formatting of an index that has nan was inconsistent or wrong | diff --git a/RELEASE.rst b/RELEASE.rst
index d79ede4dad26e..94bcf6cb7b187 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -145,6 +145,8 @@ pandas 0.11.0
- Bug in DataFrame column insertion when the column creation fails, existing frame is left in
an irrecoverable state (GH3010_)
- Bug in DataFrame update where non-specified values could cause dtype changes (GH3016_)
+ - Formatting of an index that has ``nan`` was inconsistent or wrong (would fill from
+ other values), (GH2850_)
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
@@ -161,6 +163,7 @@ pandas 0.11.0
.. _GH2867: https://github.com/pydata/pandas/issues/2867
.. _GH2807: https://github.com/pydata/pandas/issues/2807
.. _GH2849: https://github.com/pydata/pandas/issues/2849
+.. _GH2850: https://github.com/pydata/pandas/issues/2850
.. _GH2898: https://github.com/pydata/pandas/issues/2898
.. _GH2892: https://github.com/pydata/pandas/issues/2892
.. _GH2909: https://github.com/pydata/pandas/issues/2909
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 42fe1c4ccb928..0f9776e202c00 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -173,9 +173,9 @@ def __unicode__(self):
Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3.
"""
if len(self) > 6 and len(self) > np.get_printoptions()['threshold']:
- data = self[:3].tolist() + ["..."] + self[-3:].tolist()
+ data = self[:3].format() + ["..."] + self[-3:].format()
else:
- data = self
+ data = self.format()
prepr = com.pprint_thing(data, escape_chars=('\t', '\r', '\n'))
return '%s(%s, dtype=%s)' % (type(self).__name__, prepr, self.dtype)
@@ -247,8 +247,14 @@ def _has_complex_internals(self):
def summary(self, name=None):
if len(self) > 0:
- index_summary = ', %s to %s' % (com.pprint_thing(self[0]),
- com.pprint_thing(self[-1]))
+ head = self[0]
+ if hasattr(head,'format'):
+ head = head.format()
+ tail = self[-1]
+ if hasattr(tail,'format'):
+ tail = tail.format()
+ index_summary = ', %s to %s' % (com.pprint_thing(head),
+ com.pprint_thing(tail))
else:
index_summary = ''
@@ -419,7 +425,7 @@ def take(self, indexer, axis=0):
taken = self.view(np.ndarray).take(indexer)
return self._constructor(taken, name=self.name)
- def format(self, name=False, formatter=None):
+ def format(self, name=False, formatter=None, na_rep='NaN'):
"""
Render a string representation of the Index
"""
@@ -454,6 +460,14 @@ def format(self, name=False, formatter=None):
if values.dtype == np.object_:
result = [com.pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
+
+ # could have nans
+ mask = isnull(values)
+ if mask.any():
+ result = np.array(result)
+ result[mask] = na_rep
+ result = result.tolist()
+
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
@@ -1446,10 +1460,9 @@ def __unicode__(self):
np.set_printoptions(threshold=50)
if len(self) > 100:
- values = np.concatenate([self[:50].values,
- self[-50:].values])
+ values = self[:50].format() + self[-50:].format()
else:
- values = self.values
+ values = self.format()
summary = com.pprint_thing(values, escape_chars=('\t', '\r', '\n'))
@@ -1618,7 +1631,16 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
stringified_levels = []
for lev, lab in zip(self.levels, self.labels):
if len(lev) > 0:
+
formatted = lev.take(lab).format(formatter=formatter)
+
+ # we have some NA
+ mask = lab==-1
+ if mask.any():
+ formatted = np.array(formatted)
+ formatted[mask] = na_rep
+ formatted = formatted.tolist()
+
else:
# weird all NA case
formatted = [com.pprint_thing(x, escape_chars=('\t', '\r', '\n'))
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 1b436bfd443fc..d32a50a37f667 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -603,6 +603,31 @@ def test_long_series(self):
nmatches = len(re.findall('dtype',str_rep))
self.assert_(nmatches == 1)
+ def test_index_with_nan(self):
+ # GH 2850
+ df = DataFrame({'id1': {0: '1a3', 1: '9h4'}, 'id2': {0: np.nan, 1: 'd67'},
+ 'id3': {0: '78d', 1: '79d'}, 'value': {0: 123, 1: 64}})
+
+ # multi-index
+ y = df.set_index(['id1', 'id2', 'id3'])
+ result = y.to_string()
+ expected = u' value\nid1 id2 id3 \n1a3 NaN 78d 123\n9h4 d67 79d 64'
+ self.assert_(result == expected)
+
+ # index
+ y = df.set_index('id2')
+ result = y.to_string()
+ expected = u' id1 id3 value\nid2 \nNaN 1a3 78d 123\nd67 9h4 79d 64'
+ self.assert_(result == expected)
+
+ # all-nan in mi
+ df2 = df.copy()
+ df2.ix[:,'id2'] = np.nan
+ y = df2.set_index('id2')
+ result = y.to_string()
+ expected = u' id1 id3 value\nid2 \nNaN 1a3 78d 123\nNaN 9h4 79d 64'
+ self.assert_(result == expected)
+
def test_to_string(self):
from pandas import read_table
import re
@@ -1234,10 +1259,16 @@ def test_datetimeindex(self):
result = s.to_string()
self.assertTrue('2013-01-02' in result)
- s = Series(2, index=[ Timestamp('20130111'), NaT ]).append(s)
+ # nat in index
+ s2 = Series(2, index=[ Timestamp('20130111'), NaT ])
+ s = s2.append(s)
result = s.to_string()
self.assertTrue('NaT' in result)
+ # nat in summary
+ result = str(s2.index)
+ self.assertTrue('NaT' in result)
+
def test_timedelta64(self):
from pandas import date_range
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index a5732f252d617..aad2a7d988890 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -351,12 +351,13 @@ def test_format(self):
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
- expected = [str(index[0]), str(index[1]), str(index[2])]
+ expected = [str(index[0]), str(index[1]), u'NaN']
self.assertEquals(formatted, expected)
+ # is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
- expected = [str(index[0]), str(index[1]), '']
+ expected = [str(index[0]), str(index[1]), u'NaN']
self.assertEquals(formatted, expected)
self.strIndex[:0].format()
| Formatting of an index that has `nan` was inconsistent or wrong,
(would fill from last value), closes GH #2850
Also changed in test_format.py/test_index
1) printing of 'nan' rather than the na_rep (NaN) is inconcsistent
with everywhere else
2) a 'None' in the index is defacto treated as NaN, is this wrong?
| https://api.github.com/repos/pandas-dev/pandas/pulls/3034 | 2013-03-13T01:17:49Z | 2013-03-13T16:37:44Z | 2013-03-13T16:37:44Z | 2014-08-13T08:36:17Z |
ENH: Dtype._is_immutable | diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index c6c162001d147..bc776434b2e6e 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -396,6 +396,16 @@ def _can_hold_na(self) -> bool:
"""
return True
+ @property
+ def _is_immutable(self) -> bool:
+ """
+ Can arrays with this dtype be modified with __setitem__? If not, return
+ True.
+
+ Immutable arrays are expected to raise TypeError on __setitem__ calls.
+ """
+ return False
+
class StorageExtensionDtype(ExtensionDtype):
"""ExtensionDtype that may be backed by more than one implementation."""
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 0d3e955696d81..53f0fb2843653 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -1608,6 +1608,8 @@ class SparseDtype(ExtensionDtype):
0.3333333333333333
"""
+ _is_immutable = True
+
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 05577fb971061..1533dc77321cb 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -39,7 +39,6 @@
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
- SparseDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
@@ -943,7 +942,7 @@ def fast_xs(self, loc: int) -> SingleBlockManager:
n = len(self)
# GH#46406
- immutable_ea = isinstance(dtype, SparseDtype)
+ immutable_ea = isinstance(dtype, ExtensionDtype) and dtype._is_immutable
if isinstance(dtype, ExtensionDtype) and not immutable_ea:
cls = dtype.construct_array_type()
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 3e8a754c8c527..92d50e5bd9a66 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes.dtypes import ExtensionDtype
@@ -102,6 +103,9 @@ def test_copy(self, data):
assert data[0] != data[1]
result = data.copy()
+ if data.dtype._is_immutable:
+ pytest.skip("test_copy assumes mutability")
+
data[1] = data[0]
assert result[1] != result[0]
@@ -114,6 +118,9 @@ def test_view(self, data):
assert result is not data
assert type(result) == type(data)
+ if data.dtype._is_immutable:
+ pytest.skip("test_view assumes mutability")
+
result[1] = result[0]
assert data[1] == data[0]
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index 3f89ef5395006..ea618ead7c84d 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -334,6 +334,9 @@ def test_ravel(self, data):
result = data.ravel()
assert type(result) == type(data)
+ if data.dtype._is_immutable:
+ pytest.skip("test_ravel assumes mutability")
+
# Check that we have a view, not a copy
result[0] = result[1]
assert data[0] == data[1]
@@ -348,6 +351,9 @@ def test_transpose(self, data):
# If we ever _did_ support 2D, shape should be reversed
assert result.shape == data.shape[::-1]
+ if data.dtype._is_immutable:
+ pytest.skip("test_transpose assumes mutability")
+
# Check that we have a view, not a copy
result[0] = result[1]
assert data[0] == data[1]
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 1085ada920ccc..66842dbc18145 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -36,6 +36,24 @@ def full_indexer(self, request):
"""
return request.param
+ @pytest.fixture(autouse=True)
+ def skip_if_immutable(self, dtype, request):
+ if dtype._is_immutable:
+ node = request.node
+ if node.name.split("[")[0] == "test_is_immutable":
+ # This fixture is auto-used, but we want to not-skip
+ # test_is_immutable.
+ return
+ pytest.skip("__setitem__ test not applicable with immutable dtype")
+
+ def test_is_immutable(self, data):
+ if data.dtype._is_immutable:
+ with pytest.raises(TypeError):
+ data[0] = data[0]
+ else:
+ data[0] = data[1]
+ assert data[0] == data[1]
+
def test_setitem_scalar_series(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index a39133c784380..90997160f2b08 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -108,10 +108,6 @@ def _check_unsupported(self, data):
if data.dtype == SparseDtype(int, 0):
pytest.skip("Can't store nan in int array.")
- @pytest.mark.xfail(reason="SparseArray does not support setitem")
- def test_ravel(self, data):
- super().test_ravel(data)
-
class TestDtype(BaseSparseTests, base.BaseDtypeTests):
def test_array_type_with_arg(self, data, dtype):
@@ -119,13 +115,7 @@ def test_array_type_with_arg(self, data, dtype):
class TestInterface(BaseSparseTests, base.BaseInterfaceTests):
- def test_copy(self, data):
- # __setitem__ does not work, so we only have a smoke-test
- data.copy()
-
- def test_view(self, data):
- # __setitem__ does not work, so we only have a smoke-test
- data.view()
+ pass
class TestConstructors(BaseSparseTests, base.BaseConstructorsTests):
@@ -185,10 +175,6 @@ def test_merge(self, data, na_value):
self._check_unsupported(data)
super().test_merge(data, na_value)
- @pytest.mark.xfail(reason="SparseArray does not support setitem")
- def test_transpose(self, data):
- super().test_transpose(data)
-
class TestGetitem(BaseSparseTests, base.BaseGetitemTests):
def test_get(self, data):
@@ -204,7 +190,8 @@ def test_reindex(self, data, na_value):
super().test_reindex(data, na_value)
-# Skipping TestSetitem, since we don't implement it.
+class TestSetitem(BaseSparseTests, base.BaseSetitemTests):
+ pass
class TestIndex(base.BaseIndexTests):
| cc @phofl | https://api.github.com/repos/pandas-dev/pandas/pulls/54421 | 2023-08-04T22:05:11Z | 2023-08-06T10:20:05Z | 2023-08-06T10:20:05Z | 2023-08-06T21:19:44Z |
DOC: Simplify enhancingperf.rst | diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst
index 0909d5cd91355..2ddc3e709be85 100644
--- a/doc/source/user_guide/enhancingperf.rst
+++ b/doc/source/user_guide/enhancingperf.rst
@@ -7,11 +7,9 @@ Enhancing performance
*********************
In this part of the tutorial, we will investigate how to speed up certain
-functions operating on pandas :class:`DataFrame` using three different techniques:
-Cython, Numba and :func:`pandas.eval`. We will see a speed improvement of ~200
-when we use Cython and Numba on a test function operating row-wise on the
-:class:`DataFrame`. Using :func:`pandas.eval` we will speed up a sum by an order of
-~2.
+functions operating on pandas :class:`DataFrame` using Cython, Numba and :func:`pandas.eval`.
+Generally, using Cython and Numba can offer a larger speedup than using :func:`pandas.eval`
+but will require a lot more code.
.. note::
@@ -79,12 +77,12 @@ We achieve our result by using :meth:`DataFrame.apply` (row-wise):
%timeit df.apply(lambda x: integrate_f(x["a"], x["b"], x["N"]), axis=1)
-But clearly this isn't fast enough for us. Let's take a look and see where the
-time is spent during this operation (limited to the most time consuming
-four calls) using the `prun ipython magic function <https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-prun>`__:
+Let's take a look and see where the time is spent during this operation
+using the `prun ipython magic function <https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-prun>`__:
.. ipython:: python
+ # most time consuming 4 calls
%prun -l 4 df.apply(lambda x: integrate_f(x["a"], x["b"], x["N"]), axis=1) # noqa E999
By far the majority of time is spend inside either ``integrate_f`` or ``f``,
@@ -103,8 +101,7 @@ First we're going to need to import the Cython magic function to IPython:
%load_ext Cython
-Now, let's simply copy our functions over to Cython as is (the suffix
-is here to distinguish between function versions):
+Now, let's simply copy our functions over to Cython:
.. ipython::
@@ -119,24 +116,20 @@ is here to distinguish between function versions):
...: return s * dx
...:
-.. note::
-
- If you're having trouble pasting the above into your ipython, you may need
- to be using bleeding edge IPython for paste to play well with cell magics.
-
.. ipython:: python
%timeit df.apply(lambda x: integrate_f_plain(x["a"], x["b"], x["N"]), axis=1)
-Already this has shaved a third off, not too bad for a simple copy and paste.
+This has improved the performance compared to the pure Python approach by one-third.
.. _enhancingperf.type:
-Adding type
-~~~~~~~~~~~
+Declaring C types
+~~~~~~~~~~~~~~~~~
-We get another huge improvement simply by providing type information:
+We can annotate the function variables and return types as well as use ``cdef``
+and ``cpdef`` to improve performance:
.. ipython::
@@ -157,27 +150,21 @@ We get another huge improvement simply by providing type information:
%timeit df.apply(lambda x: integrate_f_typed(x["a"], x["b"], x["N"]), axis=1)
-Now, we're talking! It's now over ten times faster than the original Python
-implementation, and we haven't *really* modified the code. Let's have another
-look at what's eating up time:
-
-.. ipython:: python
-
- %prun -l 4 df.apply(lambda x: integrate_f_typed(x["a"], x["b"], x["N"]), axis=1)
+Annotating the functions with C types yields an over ten times performance improvement compared to
+the original Python implementation.
.. _enhancingperf.ndarray:
Using ndarray
~~~~~~~~~~~~~
-It's calling series a lot! It's creating a :class:`Series` from each row, and calling get from both
-the index and the series (three times for each row). Function calls are expensive
-in Python, so maybe we could minimize these by cythonizing the apply part.
+When re-profiling, time is spent creating a :class:`Series` from each row, and calling ``__getitem__`` from both
+the index and the series (three times for each row). These Python function calls are expensive and
+can be improved by passing an ``np.ndarray``.
-.. note::
+.. ipython:: python
- We are now passing ndarrays into the Cython function, fortunately Cython plays
- very nicely with NumPy.
+ %prun -l 4 df.apply(lambda x: integrate_f_typed(x["a"], x["b"], x["N"]), axis=1)
.. ipython::
@@ -207,55 +194,30 @@ in Python, so maybe we could minimize these by cythonizing the apply part.
...:
-The implementation is simple, it creates an array of zeros and loops over
-the rows, applying our ``integrate_f_typed``, and putting this in the zeros array.
-
-
-.. warning::
-
- You can **not pass** a :class:`Series` directly as a ``ndarray`` typed parameter
- to a Cython function. Instead pass the actual ``ndarray`` using the
- :meth:`Series.to_numpy`. The reason is that the Cython
- definition is specific to an ndarray and not the passed :class:`Series`.
-
- So, do not do this:
-
- .. code-block:: python
-
- apply_integrate_f(df["a"], df["b"], df["N"])
-
- But rather, use :meth:`Series.to_numpy` to get the underlying ``ndarray``:
-
- .. code-block:: python
-
- apply_integrate_f(df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy())
-
-.. note::
+This implementation creates an array of zeros and inserts the result
+of ``integrate_f_typed`` applied over each row. Looping over an ``ndarray`` is faster
+in Cython than looping over a :class:`Series` object.
- Loops like this would be *extremely* slow in Python, but in Cython looping
- over NumPy arrays is *fast*.
+Since ``apply_integrate_f`` is typed to accept an ``np.ndarray``, :meth:`Series.to_numpy`
+calls are needed to utilize this function.
.. ipython:: python
%timeit apply_integrate_f(df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy())
-We've gotten another big improvement. Let's check again where the time is spent:
+Performance has improved from the prior implementation by almost ten times.
-.. ipython:: python
-
- %prun -l 4 apply_integrate_f(df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy())
+.. _enhancingperf.boundswrap:
-As one might expect, the majority of the time is now spent in ``apply_integrate_f``,
-so if we wanted to make anymore efficiencies we must continue to concentrate our
-efforts here.
+Disabling compiler directives
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. _enhancingperf.boundswrap:
+The majority of the time is now spent in ``apply_integrate_f``. Disabling Cython's ``boundscheck``
+and ``wraparound`` checks can yield more performance.
-More advanced techniques
-~~~~~~~~~~~~~~~~~~~~~~~~
+.. ipython:: python
-There is still hope for improvement. Here's an example of using some more
-advanced Cython techniques:
+ %prun -l 4 apply_integrate_f(df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy())
.. ipython::
@@ -291,10 +253,9 @@ advanced Cython techniques:
%timeit apply_integrate_f_wrap(df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy())
-Even faster, with the caveat that a bug in our Cython code (an off-by-one error,
-for example) might cause a segfault because memory access isn't checked.
+However, a loop indexer ``i`` accessing an invalid location in an array would cause a segfault because memory access isn't checked.
For more about ``boundscheck`` and ``wraparound``, see the Cython docs on
-`compiler directives <https://cython.readthedocs.io/en/latest/src/reference/compilation.html?highlight=wraparound#compiler-directives>`__.
+`compiler directives <https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#compiler-directives>`__.
.. _enhancingperf.numba:
@@ -317,7 +278,7 @@ Numba supports compilation of Python to run on either CPU or GPU hardware and is
Numba can be used in 2 ways with pandas:
#. Specify the ``engine="numba"`` keyword in select pandas methods
-#. Define your own Python function decorated with ``@jit`` and pass the underlying NumPy array of :class:`Series` or :class:`DataFrame` (using ``to_numpy()``) into the function
+#. Define your own Python function decorated with ``@jit`` and pass the underlying NumPy array of :class:`Series` or :class:`DataFrame` (using :meth:`Series.to_numpy`) into the function
pandas Numba Engine
~~~~~~~~~~~~~~~~~~~
@@ -327,28 +288,30 @@ Methods that support ``engine="numba"`` will also have an ``engine_kwargs`` keyw
``"nogil"``, ``"nopython"`` and ``"parallel"`` keys with boolean values to pass into the ``@jit`` decorator.
If ``engine_kwargs`` is not specified, it defaults to ``{"nogil": False, "nopython": True, "parallel": False}`` unless otherwise specified.
-In terms of performance, **the first time a function is run using the Numba engine will be slow**
-as Numba will have some function compilation overhead. However, the JIT compiled functions are cached,
-and subsequent calls will be fast. In general, the Numba engine is performant with
-a larger amount of data points (e.g. 1+ million).
+.. note::
-.. code-block:: ipython
+ In terms of performance, **the first time a function is run using the Numba engine will be slow**
+ as Numba will have some function compilation overhead. However, the JIT compiled functions are cached,
+ and subsequent calls will be fast. In general, the Numba engine is performant with
+ a larger amount of data points (e.g. 1+ million).
+
+ .. code-block:: ipython
- In [1]: data = pd.Series(range(1_000_000)) # noqa: E225
+ In [1]: data = pd.Series(range(1_000_000)) # noqa: E225
- In [2]: roll = data.rolling(10)
+ In [2]: roll = data.rolling(10)
- In [3]: def f(x):
- ...: return np.sum(x) + 5
- # Run the first time, compilation time will affect performance
- In [4]: %timeit -r 1 -n 1 roll.apply(f, engine='numba', raw=True)
- 1.23 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
- # Function is cached and performance will improve
- In [5]: %timeit roll.apply(f, engine='numba', raw=True)
- 188 ms ± 1.93 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ In [3]: def f(x):
+ ...: return np.sum(x) + 5
+ # Run the first time, compilation time will affect performance
+ In [4]: %timeit -r 1 -n 1 roll.apply(f, engine='numba', raw=True)
+ 1.23 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
+ # Function is cached and performance will improve
+ In [5]: %timeit roll.apply(f, engine='numba', raw=True)
+ 188 ms ± 1.93 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [6]: %timeit roll.apply(f, engine='cython', raw=True)
- 3.92 s ± 59 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+ In [6]: %timeit roll.apply(f, engine='cython', raw=True)
+ 3.92 s ± 59 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
If your compute hardware contains multiple CPUs, the largest performance gain can be realized by setting ``parallel`` to ``True``
to leverage more than 1 CPU. Internally, pandas leverages numba to parallelize computations over the columns of a :class:`DataFrame`;
@@ -376,7 +339,7 @@ Custom Function Examples
~~~~~~~~~~~~~~~~~~~~~~~~
A custom Python function decorated with ``@jit`` can be used with pandas objects by passing their NumPy array
-representations with ``to_numpy()``.
+representations with :meth:`Series.to_numpy`.
.. code-block:: python
@@ -476,41 +439,22 @@ to the `Numba issue tracker. <https://github.com/numba/numba/issues/new/choose>`
.. _enhancingperf.eval:
Expression evaluation via :func:`~pandas.eval`
------------------------------------------------
+----------------------------------------------
-The top-level function :func:`pandas.eval` implements expression evaluation of
-:class:`~pandas.Series` and :class:`~pandas.DataFrame` objects.
-
-.. note::
-
- To benefit from using :func:`~pandas.eval` you need to
- install ``numexpr``. See the :ref:`recommended dependencies section
- <install.recommended_dependencies>` for more details.
-
-The point of using :func:`~pandas.eval` for expression evaluation rather than
-plain Python is two-fold: 1) large :class:`~pandas.DataFrame` objects are
-evaluated more efficiently and 2) large arithmetic and boolean expressions are
-evaluated all at once by the underlying engine (by default ``numexpr`` is used
-for evaluation).
+The top-level function :func:`pandas.eval` implements performant expression evaluation of
+:class:`~pandas.Series` and :class:`~pandas.DataFrame`. Expression evaluation allows operations
+to be expressed as strings and can potentially provide a performance improvement
+by evaluate arithmetic and boolean expression all at once for large :class:`~pandas.DataFrame`.
.. note::
You should not use :func:`~pandas.eval` for simple
expressions or for expressions involving small DataFrames. In fact,
:func:`~pandas.eval` is many orders of magnitude slower for
- smaller expressions/objects than plain ol' Python. A good rule of thumb is
+ smaller expressions or objects than plain Python. A good rule of thumb is
to only use :func:`~pandas.eval` when you have a
:class:`~pandas.core.frame.DataFrame` with more than 10,000 rows.
-
-:func:`~pandas.eval` supports all arithmetic expressions supported by the
-engine in addition to some extensions available only in pandas.
-
-.. note::
-
- The larger the frame and the larger the expression the more speedup you will
- see from using :func:`~pandas.eval`.
-
Supported syntax
~~~~~~~~~~~~~~~~
@@ -528,7 +472,7 @@ These operations are supported by :func:`pandas.eval`:
``sqrt``, ``sinh``, ``cosh``, ``tanh``, ``arcsin``, ``arccos``, ``arctan``, ``arccosh``,
``arcsinh``, ``arctanh``, ``abs``, ``arctan2`` and ``log10``.
-This Python syntax is **not** allowed:
+The following Python syntax is **not** allowed:
* Expressions
@@ -545,73 +489,120 @@ This Python syntax is **not** allowed:
* Statements
* Neither `simple <https://docs.python.org/3/reference/simple_stmts.html>`__
- nor `compound <https://docs.python.org/3/reference/compound_stmts.html>`__
- statements are allowed. This includes things like ``for``, ``while``, and
+ or `compound <https://docs.python.org/3/reference/compound_stmts.html>`__
+ statements are allowed. This includes ``for``, ``while``, and
``if``.
+Local variables
+~~~~~~~~~~~~~~~
+You must *explicitly reference* any local variable that you want to use in an
+expression by placing the ``@`` character in front of the name. This mechanism is
+the same for both :meth:`DataFrame.query` and :meth:`DataFrame.eval`. For example,
-:func:`~pandas.eval` examples
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. ipython:: python
-:func:`pandas.eval` works well with expressions containing large arrays.
+ df = pd.DataFrame(np.random.randn(5, 2), columns=list("ab"))
+ newcol = np.random.randn(len(df))
+ df.eval("b + @newcol")
+ df.query("b < @newcol")
+
+If you don't prefix the local variable with ``@``, pandas will raise an
+exception telling you the variable is undefined.
+
+When using :meth:`DataFrame.eval` and :meth:`DataFrame.query`, this allows you
+to have a local variable and a :class:`~pandas.DataFrame` column with the same
+name in an expression.
-First let's create a few decent-sized arrays to play with:
.. ipython:: python
- nrows, ncols = 20000, 100
- df1, df2, df3, df4 = [pd.DataFrame(np.random.randn(nrows, ncols)) for _ in range(4)]
+ a = np.random.randn()
+ df.query("@a < a")
+ df.loc[a < df["a"]] # same as the previous expression
+.. warning::
-Now let's compare adding them together using plain ol' Python versus
-:func:`~pandas.eval`:
+ :func:`pandas.eval` will raise an exception if you cannot use the ``@`` prefix because it
+ isn't defined in that context.
-.. ipython:: python
+ .. ipython:: python
+ :okexcept:
- %timeit df1 + df2 + df3 + df4
+ a, b = 1, 2
+ pd.eval("@a + b")
-.. ipython:: python
+ In this case, you should simply refer to the variables like you would in
+ standard Python.
- %timeit pd.eval("df1 + df2 + df3 + df4")
+ .. ipython:: python
+ pd.eval("a + b")
-Now let's do the same thing but with comparisons:
-.. ipython:: python
+:func:`pandas.eval` parsers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %timeit (df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)
+There are two different expression syntax parsers.
+
+The default ``'pandas'`` parser allows a more intuitive syntax for expressing
+query-like operations (comparisons, conjunctions and disjunctions). In
+particular, the precedence of the ``&`` and ``|`` operators is made equal to
+the precedence of the corresponding boolean operations ``and`` and ``or``.
+
+For example, the above conjunction can be written without parentheses.
+Alternatively, you can use the ``'python'`` parser to enforce strict Python
+semantics.
.. ipython:: python
- %timeit pd.eval("(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)")
+ nrows, ncols = 20000, 100
+ df1, df2, df3, df4 = [pd.DataFrame(np.random.randn(nrows, ncols)) for _ in range(4)]
+
+ expr = "(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)"
+ x = pd.eval(expr, parser="python")
+ expr_no_parens = "df1 > 0 & df2 > 0 & df3 > 0 & df4 > 0"
+ y = pd.eval(expr_no_parens, parser="pandas")
+ np.all(x == y)
-:func:`~pandas.eval` also works with unaligned pandas objects:
+The same expression can be "anded" together with the word :keyword:`and` as
+well:
.. ipython:: python
- s = pd.Series(np.random.randn(50))
- %timeit df1 + df2 + df3 + df4 + s
+ expr = "(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)"
+ x = pd.eval(expr, parser="python")
+ expr_with_ands = "df1 > 0 and df2 > 0 and df3 > 0 and df4 > 0"
+ y = pd.eval(expr_with_ands, parser="pandas")
+ np.all(x == y)
-.. ipython:: python
+The :keyword:`and` and :keyword:`or` operators here have the same precedence that they would
+in Python.
- %timeit pd.eval("df1 + df2 + df3 + df4 + s")
-.. note::
+:func:`pandas.eval` engines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Operations such as
+There are two different expression engines.
- .. code-block:: python
+The ``'numexpr'`` engine is the more performant engine that can yield performance improvements
+compared to standard Python syntax for large :class:`DataFrame`. This engine requires the
+optional dependency ``numexpr`` to be installed.
- 1 and 2 # would parse to 1 & 2, but should evaluate to 2
- 3 or 4 # would parse to 3 | 4, but should evaluate to 3
- ~1 # this is okay, but slower when using eval
+The ``'python'`` engine is generally *not* useful except for testing
+other evaluation engines against it. You will achieve **no** performance
+benefits using :func:`~pandas.eval` with ``engine='python'`` and may
+incur a performance hit.
+
+.. ipython:: python
+
+ %timeit df1 + df2 + df3 + df4
+
+.. ipython:: python
+
+ %timeit pd.eval("df1 + df2 + df3 + df4", engine="python")
- should be performed in Python. An exception will be raised if you try to
- perform any boolean/bitwise operations with scalar operands that are not
- of type ``bool`` or ``np.bool_``. Again, you should perform these kinds of
- operations in plain Python.
The :meth:`DataFrame.eval` method
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -643,7 +634,7 @@ prefix the name of the :class:`~pandas.DataFrame` to the column(s) you're
interested in evaluating.
In addition, you can perform assignment of columns within an expression.
-This allows for *formulaic evaluation*. The assignment target can be a
+This allows for *formulaic evaluation*. The assignment target can be a
new column name or an existing column name, and it must be a valid Python
identifier.
@@ -656,7 +647,7 @@ identifier.
df
A copy of the :class:`DataFrame` with the
-new or modified columns is returned and the original frame is unchanged.
+new or modified columns is returned, and the original frame is unchanged.
.. ipython:: python
@@ -664,8 +655,7 @@ new or modified columns is returned and the original frame is unchanged.
df.eval("e = a - c")
df
-As a convenience, multiple assignments can be performed by using a
-multi-line string.
+Multiple column assignments can be performed by using a multi-line string.
.. ipython:: python
@@ -686,124 +676,66 @@ The equivalent in standard Python would be
df["a"] = 1
df
-Local variables
-~~~~~~~~~~~~~~~
-You must *explicitly reference* any local variable that you want to use in an
-expression by placing the ``@`` character in front of the name. This mechanism is
-the same for both :meth:`DataFrame.query` and :meth:`DataFrame.eval`. For example,
-
-.. ipython:: python
-
- df = pd.DataFrame(np.random.randn(5, 2), columns=list("ab"))
- newcol = np.random.randn(len(df))
- df.eval("b + @newcol")
- df.query("b < @newcol")
-
-If you don't prefix the local variable with ``@``, pandas will raise an
-exception telling you the variable is undefined.
-
-When using :meth:`DataFrame.eval` and :meth:`DataFrame.query`, this allows you
-to have a local variable and a :class:`~pandas.DataFrame` column with the same
-name in an expression.
+:func:`~pandas.eval` performance comparison
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+:func:`pandas.eval` works well with expressions containing large arrays.
.. ipython:: python
- a = np.random.randn()
- df.query("@a < a")
- df.loc[a < df["a"]] # same as the previous expression
+ nrows, ncols = 20000, 100
+ df1, df2, df3, df4 = [pd.DataFrame(np.random.randn(nrows, ncols)) for _ in range(4)]
-With :func:`pandas.eval` you cannot use the ``@`` prefix *at all*, because it
-isn't defined in that context. pandas will let you know this if you try to
-use ``@`` in a top-level call to :func:`pandas.eval`. For example,
-.. ipython:: python
- :okexcept:
+:class:`DataFrame` arithmetic:
- a, b = 1, 2
- pd.eval("@a + b")
+.. ipython:: python
-In this case, you should simply refer to the variables like you would in
-standard Python.
+ %timeit df1 + df2 + df3 + df4
.. ipython:: python
- pd.eval("a + b")
-
+ %timeit pd.eval("df1 + df2 + df3 + df4")
-:func:`pandas.eval` parsers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-There are two different parsers and two different engines you can use as
-the backend.
+:class:`DataFrame` comparison:
-The default ``'pandas'`` parser allows a more intuitive syntax for expressing
-query-like operations (comparisons, conjunctions and disjunctions). In
-particular, the precedence of the ``&`` and ``|`` operators is made equal to
-the precedence of the corresponding boolean operations ``and`` and ``or``.
+.. ipython:: python
-For example, the above conjunction can be written without parentheses.
-Alternatively, you can use the ``'python'`` parser to enforce strict Python
-semantics.
+ %timeit (df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)
.. ipython:: python
- expr = "(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)"
- x = pd.eval(expr, parser="python")
- expr_no_parens = "df1 > 0 & df2 > 0 & df3 > 0 & df4 > 0"
- y = pd.eval(expr_no_parens, parser="pandas")
- np.all(x == y)
+ %timeit pd.eval("(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)")
-The same expression can be "anded" together with the word :keyword:`and` as
-well:
+:class:`DataFrame` arithmetic with unaligned axes.
.. ipython:: python
- expr = "(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)"
- x = pd.eval(expr, parser="python")
- expr_with_ands = "df1 > 0 and df2 > 0 and df3 > 0 and df4 > 0"
- y = pd.eval(expr_with_ands, parser="pandas")
- np.all(x == y)
-
-
-The ``and`` and ``or`` operators here have the same precedence that they would
-in vanilla Python.
-
+ s = pd.Series(np.random.randn(50))
+ %timeit df1 + df2 + df3 + df4 + s
-:func:`pandas.eval` backends
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. ipython:: python
-There's also the option to make :func:`~pandas.eval` operate identical to plain
-ol' Python.
+ %timeit pd.eval("df1 + df2 + df3 + df4 + s")
.. note::
- Using the ``'python'`` engine is generally *not* useful, except for testing
- other evaluation engines against it. You will achieve **no** performance
- benefits using :func:`~pandas.eval` with ``engine='python'`` and in fact may
- incur a performance hit.
-
-You can see this by using :func:`pandas.eval` with the ``'python'`` engine. It
-is a bit slower (not by much) than evaluating the same expression in Python
-
-.. ipython:: python
-
- %timeit df1 + df2 + df3 + df4
-
-.. ipython:: python
+ Operations such as
- %timeit pd.eval("df1 + df2 + df3 + df4", engine="python")
+ .. code-block:: python
+ 1 and 2 # would parse to 1 & 2, but should evaluate to 2
+ 3 or 4 # would parse to 3 | 4, but should evaluate to 3
+ ~1 # this is okay, but slower when using eval
-:func:`pandas.eval` performance
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ should be performed in Python. An exception will be raised if you try to
+ perform any boolean/bitwise operations with scalar operands that are not
+ of type ``bool`` or ``np.bool_``.
-:func:`~pandas.eval` is intended to speed up certain kinds of operations. In
-particular, those operations involving complex expressions with large
-:class:`~pandas.DataFrame`/:class:`~pandas.Series` objects should see a
-significant performance benefit. Here is a plot showing the running time of
+Here is a plot showing the running time of
:func:`pandas.eval` as function of the size of the frame involved in the
computation. The two lines are two different engines.
@@ -812,24 +744,18 @@ computation. The two lines are two different engines.
.. image:: ../_static/eval-perf.png
-You will only see the performance benefits of using the ``numexpr`` engine with :func:`pandas.eval` if your frame has more than approximately 100,000 rows.
+You will only see the performance benefits of using the ``numexpr`` engine with :func:`pandas.eval` if your :class:`~pandas.DataFrame`
+has more than approximately 100,000 rows.
This plot was created using a :class:`DataFrame` with 3 columns each containing
floating point values generated using ``numpy.random.randn()``.
-Technical minutia regarding expression evaluation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Expression evaluation limitations with ``numexpr``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expressions that would result in an object dtype or involve datetime operations
-(because of ``NaT``) must be evaluated in Python space. The main reason for
-this behavior is to maintain backwards compatibility with versions of NumPy <
-1.7. In those versions of NumPy a call to ``ndarray.astype(str)`` will
-truncate any strings that are more than 60 characters in length. Second, we
-can't pass ``object`` arrays to ``numexpr`` thus string comparisons must be
-evaluated in Python space.
-
-The upshot is that this *only* applies to object-dtype expressions. So, if
-you have an expression--for example
+because of ``NaT`` must be evaluated in Python space, but part of an expression
+can still be evaluated with ``numexpr``. For example:
.. ipython:: python
@@ -839,10 +765,6 @@ you have an expression--for example
df
df.query("strings == 'a' and nums == 1")
-the numeric part of the comparison (``nums == 1``) will be evaluated by
-``numexpr``.
-
-In general, :meth:`DataFrame.query`/:func:`pandas.eval` will
-evaluate the subexpressions that *can* be evaluated by ``numexpr`` and those
-that must be evaluated in Python space transparently to the user. This is done
-by inferring the result type of an expression from its arguments and operators.
+The numeric part of the comparison (``nums == 1``) will be evaluated by
+``numexpr`` and the object part of the comparison (``"strings == 'a'``) will
+be evaluated by Python.
| * Simplified some language; removed language that was outdated
* Fixed some formatting
* Reorganized the eval section | https://api.github.com/repos/pandas-dev/pandas/pulls/54419 | 2023-08-04T21:10:37Z | 2023-08-05T18:10:29Z | 2023-08-05T18:10:29Z | 2023-08-05T18:23:35Z |
DOC: Simplify scale.rst | diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst
index b66ddcd33a684..bc49c7f958cb7 100644
--- a/doc/source/user_guide/scale.rst
+++ b/doc/source/user_guide/scale.rst
@@ -13,33 +13,10 @@ This document provides a few recommendations for scaling your analysis to larger
It's a complement to :ref:`enhancingperf`, which focuses on speeding up analysis
for datasets that fit in memory.
-But first, it's worth considering *not using pandas*. pandas isn't the right
-tool for all situations. If you're working with very large datasets and a tool
-like PostgreSQL fits your needs, then you should probably be using that.
-Assuming you want or need the expressiveness and power of pandas, let's carry on.
-
Load less data
--------------
-Suppose our raw dataset on disk has many columns::
-
- id_0 name_0 x_0 y_0 id_1 name_1 x_1 ... name_8 x_8 y_8 id_9 name_9 x_9 y_9
- timestamp ...
- 2000-01-01 00:00:00 1015 Michael -0.399453 0.095427 994 Frank -0.176842 ... Dan -0.315310 0.713892 1025 Victor -0.135779 0.346801
- 2000-01-01 00:01:00 969 Patricia 0.650773 -0.874275 1003 Laura 0.459153 ... Ursula 0.913244 -0.630308 1047 Wendy -0.886285 0.035852
- 2000-01-01 00:02:00 1016 Victor -0.721465 -0.584710 1046 Michael 0.524994 ... Ray -0.656593 0.692568 1064 Yvonne 0.070426 0.432047
- 2000-01-01 00:03:00 939 Alice -0.746004 -0.908008 996 Ingrid -0.414523 ... Jerry -0.958994 0.608210 978 Wendy 0.855949 -0.648988
- 2000-01-01 00:04:00 1017 Dan 0.919451 -0.803504 1048 Jerry -0.569235 ... Frank -0.577022 -0.409088 994 Bob -0.270132 0.335176
- ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
- 2000-12-30 23:56:00 999 Tim 0.162578 0.512817 973 Kevin -0.403352 ... Tim -0.380415 0.008097 1041 Charlie 0.191477 -0.599519
- 2000-12-30 23:57:00 970 Laura -0.433586 -0.600289 958 Oliver -0.966577 ... Zelda 0.971274 0.402032 1038 Ursula 0.574016 -0.930992
- 2000-12-30 23:58:00 1065 Edith 0.232211 -0.454540 971 Tim 0.158484 ... Alice -0.222079 -0.919274 1022 Dan 0.031345 -0.657755
- 2000-12-30 23:59:00 1019 Ingrid 0.322208 -0.615974 981 Hannah 0.607517 ... Sarah -0.424440 -0.117274 990 George -0.375530 0.563312
- 2000-12-31 00:00:00 937 Ursula -0.906523 0.943178 1018 Alice -0.564513 ... Jerry 0.236837 0.807650 985 Oliver 0.777642 0.783392
-
- [525601 rows x 40 columns]
-
-That can be generated by the following code snippet:
+Suppose our raw dataset on disk has many columns.
.. ipython:: python
:okwarning:
@@ -67,6 +44,7 @@ That can be generated by the following code snippet:
for i in range(10)
]
ts_wide = pd.concat(timeseries, axis=1)
+ ts_wide.head()
ts_wide.to_parquet("timeseries_wide.parquet")
To load the columns we want, we have two options.
@@ -170,9 +148,8 @@ for an overview of all of pandas' dtypes.
Use chunking
------------
-Some workloads can be achieved with chunking: splitting a large problem like "convert this
-directory of CSVs to parquet" into a bunch of small problems ("convert this individual CSV
-file into a Parquet file. Now repeat that for each file in this directory."). As long as each chunk
+Some workloads can be achieved with chunking by splitting a large problem into a bunch of small problems. For example,
+converting an individual CSV file into a Parquet file and repeating that for each file in a directory. As long as each chunk
fits in memory, you can work with datasets that are much larger than memory.
.. note::
@@ -242,8 +219,8 @@ different library that implements these out-of-core algorithms for you.
.. _scale.other_libraries:
-Use other libraries
--------------------
+Use Dask
+--------
pandas is just one library offering a DataFrame API. Because of its popularity,
pandas' API has become something of a standard that other libraries implement.
| * Simplified showing data
* Simplified some unnecessary language | https://api.github.com/repos/pandas-dev/pandas/pulls/54418 | 2023-08-04T19:21:46Z | 2023-08-05T18:06:00Z | 2023-08-05T18:06:00Z | 2023-08-05T18:23:23Z |
DOC: Simplify gotchas.rst | diff --git a/doc/source/user_guide/gotchas.rst b/doc/source/user_guide/gotchas.rst
index 47f1b74c0b894..67106df328361 100644
--- a/doc/source/user_guide/gotchas.rst
+++ b/doc/source/user_guide/gotchas.rst
@@ -13,7 +13,7 @@ DataFrame memory usage
The memory usage of a :class:`DataFrame` (including the index) is shown when calling
the :meth:`~DataFrame.info`. A configuration option, ``display.memory_usage``
(see :ref:`the list of options <options.available>`), specifies if the
-:class:`DataFrame` memory usage will be displayed when invoking the ``df.info()``
+:class:`DataFrame` memory usage will be displayed when invoking the :meth:`~DataFrame.info`
method.
For example, the memory usage of the :class:`DataFrame` below is shown
@@ -50,13 +50,13 @@ as it can be expensive to do this deeper introspection.
df.info(memory_usage="deep")
By default the display option is set to ``True`` but can be explicitly
-overridden by passing the ``memory_usage`` argument when invoking ``df.info()``.
+overridden by passing the ``memory_usage`` argument when invoking :meth:`~DataFrame.info`.
The memory usage of each column can be found by calling the
:meth:`~DataFrame.memory_usage` method. This returns a :class:`Series` with an index
represented by column names and memory usage of each column shown in bytes. For
the :class:`DataFrame` above, the memory usage of each column and the total memory
-usage can be found with the ``memory_usage`` method:
+usage can be found with the :meth:`~DataFrame.memory_usage` method:
.. ipython:: python
@@ -164,7 +164,8 @@ Mutating with User Defined Function (UDF) methods
-------------------------------------------------
This section applies to pandas methods that take a UDF. In particular, the methods
-``.apply``, ``.aggregate``, ``.transform``, and ``.filter``.
+:meth:`DataFrame.apply`, :meth:`DataFrame.aggregate`, :meth:`DataFrame.transform`, and
+:meth:`DataFrame.filter`.
It is a general rule in programming that one should not mutate a container
while it is being iterated over. Mutation will invalidate the iterator,
@@ -192,16 +193,14 @@ the :class:`DataFrame`, unexpected behavior can arise.
Here is a similar example with :meth:`DataFrame.apply`:
.. ipython:: python
+ :okexcept:
def f(s):
s.pop("a")
return s
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
- try:
- df.apply(f, axis="columns")
- except Exception as err:
- print(repr(err))
+ df.apply(f, axis="columns")
To resolve this issue, one can make a copy so that the mutation does
not apply to the container being iterated over.
@@ -229,29 +228,41 @@ not apply to the container being iterated over.
df = pd.DataFrame({"a": [1, 2, 3], 'b': [4, 5, 6]})
df.apply(f, axis="columns")
-``NaN``, Integer ``NA`` values and ``NA`` type promotions
----------------------------------------------------------
+Missing value representation for NumPy types
+--------------------------------------------
-Choice of ``NA`` representation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+``np.nan`` as the ``NA`` representation for NumPy types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For lack of ``NA`` (missing) support from the ground up in NumPy and Python in
-general, we were given the difficult choice between either:
+general, ``NA`` could have been represented with:
* A *masked array* solution: an array of data and an array of boolean values
indicating whether a value is there or is missing.
* Using a special sentinel value, bit pattern, or set of sentinel values to
denote ``NA`` across the dtypes.
-For many reasons we chose the latter. After years of production use it has
-proven, at least in my opinion, to be the best decision given the state of
-affairs in NumPy and Python in general. The special value ``NaN``
-(Not-A-Number) is used everywhere as the ``NA`` value, and there are API
-functions :meth:`DataFrame.isna` and :meth:`DataFrame.notna` which can be used across the dtypes to
-detect NA values.
+The special value ``np.nan`` (Not-A-Number) was chosen as the ``NA`` value for NumPy types, and there are API
+functions like :meth:`DataFrame.isna` and :meth:`DataFrame.notna` which can be used across the dtypes to
+detect NA values. However, this choice has a downside of coercing missing integer data as float types as
+shown in :ref:`gotchas.intna`.
+
+``NA`` type promotions for NumPy types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When introducing NAs into an existing :class:`Series` or :class:`DataFrame` via
+:meth:`~Series.reindex` or some other means, boolean and integer types will be
+promoted to a different dtype in order to store the NAs. The promotions are
+summarized in this table:
-However, it comes with it a couple of trade-offs which I most certainly have
-not ignored.
+.. csv-table::
+ :header: "Typeclass","Promotion dtype for storing NAs"
+ :widths: 40,60
+
+ ``floating``, no change
+ ``object``, no change
+ ``integer``, cast to ``float64``
+ ``boolean``, cast to ``object``
.. _gotchas.intna:
@@ -276,12 +287,13 @@ This trade-off is made largely for memory and performance reasons, and also so
that the resulting :class:`Series` continues to be "numeric".
If you need to represent integers with possibly missing values, use one of
-the nullable-integer extension dtypes provided by pandas
+the nullable-integer extension dtypes provided by pandas or pyarrow
* :class:`Int8Dtype`
* :class:`Int16Dtype`
* :class:`Int32Dtype`
* :class:`Int64Dtype`
+* :class:`ArrowDtype`
.. ipython:: python
@@ -293,28 +305,10 @@ the nullable-integer extension dtypes provided by pandas
s2_int
s2_int.dtype
-See :ref:`integer_na` for more.
-
-``NA`` type promotions
-~~~~~~~~~~~~~~~~~~~~~~
-
-When introducing NAs into an existing :class:`Series` or :class:`DataFrame` via
-:meth:`~Series.reindex` or some other means, boolean and integer types will be
-promoted to a different dtype in order to store the NAs. The promotions are
-summarized in this table:
-
-.. csv-table::
- :header: "Typeclass","Promotion dtype for storing NAs"
- :widths: 40,60
-
- ``floating``, no change
- ``object``, no change
- ``integer``, cast to ``float64``
- ``boolean``, cast to ``object``
+ s_int_pa = pd.Series([1, 2, None], dtype="int64[pyarrow]")
+ s_int_pa
-While this may seem like a heavy trade-off, I have found very few cases where
-this is an issue in practice i.e. storing values greater than 2**53. Some
-explanation for the motivation is in the next section.
+See :ref:`integer_na` and :ref:`pyarrow` for more.
Why not make NumPy like R?
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -342,16 +336,8 @@ each type to be used as the missing value. While doing this with the full NumPy
type hierarchy would be possible, it would be a more substantial trade-off
(especially for the 8- and 16-bit data types) and implementation undertaking.
-An alternate approach is that of using masked arrays. A masked array is an
-array of data with an associated boolean *mask* denoting whether each value
-should be considered ``NA`` or not. I am personally not in love with this
-approach as I feel that overall it places a fairly heavy burden on the user and
-the library implementer. Additionally, it exacts a fairly high performance cost
-when working with numerical data compared with the simple approach of using
-``NaN``. Thus, I have chosen the Pythonic "practicality beats purity" approach
-and traded integer ``NA`` capability for a much simpler approach of using a
-special value in float and object arrays to denote ``NA``, and promoting
-integer arrays to floating when NAs must be introduced.
+However, R ``NA`` semantics are now available by using masked NumPy types such as :class:`Int64Dtype`
+or PyArrow types (:class:`ArrowDtype`).
Differences with NumPy
| * Add more sphinx referencing
* Update information about missing value representation in pandas | https://api.github.com/repos/pandas-dev/pandas/pulls/54415 | 2023-08-04T18:59:50Z | 2023-08-05T18:11:37Z | 2023-08-05T18:11:37Z | 2023-08-05T18:23:48Z |
REF: de-duplicate test_combine_add | diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 7807ef366d280..dea43e53ffc40 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -345,13 +345,24 @@ def test_combine_add(self, data_repeated):
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
- result = s1.combine(s2, lambda x1, x2: x1 + x2)
- with np.errstate(over="ignore"):
- expected = pd.Series(
- orig_data1._from_sequence(
- [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
+
+ # Check if the operation is supported pointwise for our scalars. If not,
+ # we will expect Series.combine to raise as well.
+ try:
+ with np.errstate(over="ignore"):
+ expected = pd.Series(
+ orig_data1._from_sequence(
+ [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
+ )
)
- )
+ except TypeError:
+ # If the operation is not supported pointwise for our scalars,
+ # then Series.combine should also raise
+ with pytest.raises(TypeError):
+ s1.combine(s2, lambda x1, x2: x1 + x2)
+ return
+
+ result = s1.combine(s2, lambda x1, x2: x1 + x2)
tm.assert_series_equal(result, expected)
val = s1.iloc[0]
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 0c9abd45a51a5..b3967d33d7c1a 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -209,10 +209,6 @@ def test_sort_values_missing(
def test_combine_le(self, data_repeated):
super().test_combine_le(data_repeated)
- @pytest.mark.xfail(reason="combine for JSONArray not supported")
- def test_combine_add(self, data_repeated):
- super().test_combine_add(data_repeated)
-
@pytest.mark.xfail(
reason="combine for JSONArray not supported - "
"may pass depending on random data",
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 2438626cf0347..d945d7d495f22 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -821,19 +821,6 @@ def test_argreduce_series(
_combine_le_expected_dtype = "bool[pyarrow]"
- def test_combine_add(self, data_repeated, request):
- pa_dtype = next(data_repeated(1)).dtype.pyarrow_dtype
- if pa.types.is_temporal(pa_dtype) and not pa.types.is_duration(pa_dtype):
- # analogous to datetime64, these cannot be added
- orig_data1, orig_data2 = data_repeated(2)
- s1 = pd.Series(orig_data1)
- s2 = pd.Series(orig_data2)
- with pytest.raises(TypeError):
- s1.combine(s2, lambda x1, x2: x1 + x2)
-
- else:
- super().test_combine_add(data_repeated)
-
class TestBaseArithmeticOps(base.BaseArithmeticOpsTests):
divmod_exc = NotImplementedError
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index ab21f768e6521..654838ea11b67 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -113,10 +113,6 @@ class TestIndex(base.BaseIndexTests):
class TestMethods(BaseDatetimeTests, base.BaseMethodsTests):
- def test_combine_add(self, data_repeated):
- # Timestamp.__add__(Timestamp) not defined
- pass
-
@pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data, na_action):
result = data.map(lambda x: x, na_action=na_action)
diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py
index 4ef303289ee5c..53835dfb45331 100644
--- a/pandas/tests/extension/test_interval.py
+++ b/pandas/tests/extension/test_interval.py
@@ -121,10 +121,6 @@ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
class TestMethods(BaseInterval, base.BaseMethodsTests):
- @pytest.mark.xfail(reason="addition is not defined for intervals")
- def test_combine_add(self, data_repeated):
- super().test_combine_add(data_repeated)
-
@pytest.mark.xfail(
reason="Raises with incorrect message bc it disallows *all* listlikes "
"instead of just wrong-length listlikes"
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index 7b6bc98ee8c05..9ef838f1988f5 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -93,10 +93,6 @@ class TestIndex(base.BaseIndexTests):
class TestMethods(BasePeriodTests, base.BaseMethodsTests):
- def test_combine_add(self, data_repeated):
- # Period + Period is not defined.
- pass
-
@pytest.mark.parametrize("periods", [1, -2])
def test_diff(self, data, periods):
if is_platform_windows() and np_version_gte1p24:
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/54414 | 2023-08-04T18:28:16Z | 2023-08-04T21:17:53Z | 2023-08-04T21:17:53Z | 2023-08-04T21:23:17Z |
REF: de-duplicate test_divmod_series_array | diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py
index 85bbafbeb5129..9e1b44d3bac02 100644
--- a/pandas/tests/extension/conftest.py
+++ b/pandas/tests/extension/conftest.py
@@ -27,7 +27,11 @@ def data():
@pytest.fixture
def data_for_twos():
- """Length-100 array in which all the elements are two."""
+ """
+ Length-100 array in which all the elements are two.
+
+ Call pytest.skip in your fixture if the dtype does not support divmod.
+ """
raise NotImplementedError
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 0c9abd45a51a5..011e44357c6b0 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -78,6 +78,11 @@ def data_for_grouping():
)
+@pytest.fixture
+def data_for_twos(dtype):
+ pytest.skip("Not a numeric dtype")
+
+
class BaseJSON:
pass
@@ -317,12 +322,6 @@ def test_add_series_with_extension_array(self, data):
with pytest.raises(TypeError, match="unsupported"):
ser + data
- @pytest.mark.xfail(reason="not implemented")
- def test_divmod_series_array(self):
- # GH 23287
- # skipping because it is not implemented
- super().test_divmod_series_array()
-
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 2438626cf0347..5c00189da48a1 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -253,10 +253,16 @@ def data_missing_for_sorting(data_for_grouping):
def data_for_twos(data):
"""Length-100 array in which all the elements are two."""
pa_dtype = data.dtype.pyarrow_dtype
- if pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype):
+ if (
+ pa.types.is_integer(pa_dtype)
+ or pa.types.is_floating(pa_dtype)
+ or pa.types.is_decimal(pa_dtype)
+ or pa.types.is_duration(pa_dtype)
+ ):
return pd.array([2] * 100, dtype=data.dtype)
# tests will be xfailed where 2 is not a valid scalar for pa_dtype
return data
+ # TODO: skip otherwise?
@pytest.fixture
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index 5de1debb21d93..c5a8a1c85ecfe 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -71,6 +71,11 @@ def data_missing_for_sorting():
return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)
+@pytest.fixture
+def data_for_twos(dtype):
+ pytest.skip("Not a numeric dtype")
+
+
@pytest.fixture
def na_value():
return np.nan
@@ -263,11 +268,6 @@ def test_add_series_with_extension_array(self, data):
with pytest.raises(TypeError, match="cannot perform|unsupported operand"):
ser + data
- def test_divmod_series_array(self):
- # GH 23287
- # skipping because it is not implemented
- pass
-
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op, other):
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index ab21f768e6521..7183831329d27 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -73,6 +73,11 @@ def data_for_grouping(dtype):
)
+@pytest.fixture
+def data_for_twos(dtype):
+ pytest.skip("Not a numeric dtype.")
+
+
@pytest.fixture
def na_cmp():
def cmp(a, b):
@@ -142,11 +147,6 @@ def test_add_series_with_extension_array(self, data):
with pytest.raises(TypeError, match=msg):
ser + data
- def test_divmod_series_array(self):
- # GH 23287
- # skipping because it is not implemented
- pass
-
class TestCasting(BaseDatetimeTests, base.BaseCastingTests):
pass
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 13645065bce14..4e8d30f62278c 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -153,6 +153,14 @@ def data_for_grouping(allow_in_pandas, dtype):
)
+@pytest.fixture
+def data_for_twos(dtype):
+ if dtype.kind == "O":
+ pytest.skip("Not a numeric dtype")
+ arr = np.ones(100) * 2
+ return NumpyExtensionArray._from_sequence(arr, dtype=dtype)
+
+
@pytest.fixture
def skip_numpy_object(dtype, request):
"""
@@ -278,11 +286,6 @@ class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests):
def test_divmod(self, data):
super().test_divmod(data)
- @skip_nested
- def test_divmod_series_array(self, data):
- ser = pd.Series(data)
- self._check_divmod_op(ser, divmod, data)
-
@skip_nested
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index 7b6bc98ee8c05..8fc4d739922a7 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -40,7 +40,7 @@ def data(dtype):
@pytest.fixture
def data_for_twos(dtype):
- return PeriodArray(np.ones(100) * 2, dtype=dtype)
+ pytest.skip("Not a numeric dtype")
@pytest.fixture
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/54413 | 2023-08-04T18:17:21Z | 2023-08-04T21:19:06Z | 2023-08-04T21:19:06Z | 2023-08-04T21:21:05Z |
REF: de-duplicate test_add_series_with_extension_array | diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index 658018a7ac740..83d134161a8fb 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -161,7 +161,18 @@ def test_divmod_series_array(self, data, data_for_twos):
self._check_divmod_op(other, ops.rdivmod, ser)
def test_add_series_with_extension_array(self, data):
+ # Check adding an ExtensionArray to a Series of the same dtype matches
+ # the behavior of adding the arrays directly and then wrapping in a
+ # Series.
+
ser = pd.Series(data)
+
+ exc = self._get_expected_exception("__add__", ser, data)
+ if exc is not None:
+ with pytest.raises(exc):
+ ser + data
+ return
+
result = ser + data
expected = pd.Series(data + data)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index d5002a8fb91bf..710ba830f5591 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -313,11 +313,6 @@ def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
request.node.add_marker(mark)
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
- def test_add_series_with_extension_array(self, data):
- ser = pd.Series(data)
- with pytest.raises(TypeError, match="unsupported"):
- ser + data
-
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 3b54d8e948b14..897b21d2f7a68 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -988,6 +988,9 @@ def _get_expected_exception(
or pa.types.is_integer(pa_dtype)
or pa.types.is_decimal(pa_dtype)
):
+ # TODO: in many of these cases, e.g. non-duration temporal,
+ # these will *never* be allowed. Would it make more sense to
+ # re-raise as TypeError, more consistent with non-pyarrow cases?
exc = pa.ArrowNotImplementedError
else:
exc = None
@@ -1123,32 +1126,7 @@ def test_arith_series_with_array(self, data, all_arithmetic_operators, request):
def test_add_series_with_extension_array(self, data, request):
pa_dtype = data.dtype.pyarrow_dtype
- if pa.types.is_temporal(pa_dtype) and not pa.types.is_duration(pa_dtype):
- # i.e. timestamp, date, time, but not timedelta; these *should*
- # raise when trying to add
- ser = pd.Series(data)
- if pa_version_under7p0:
- msg = "Function add_checked has no kernel matching input types"
- else:
- msg = "Function 'add_checked' has no kernel matching input types"
- with pytest.raises(NotImplementedError, match=msg):
- # TODO: this is a pa.lib.ArrowNotImplementedError, might
- # be better to reraise a TypeError; more consistent with
- # non-pyarrow cases
- ser + data
-
- return
-
- if (pa_version_under8p0 and pa.types.is_duration(pa_dtype)) or (
- pa.types.is_boolean(pa_dtype)
- ):
- request.node.add_marker(
- pytest.mark.xfail(
- raises=NotImplementedError,
- reason=f"add_checked not implemented for {pa_dtype}",
- )
- )
- elif pa_dtype.equals("int8"):
+ if pa_dtype.equals("int8"):
request.node.add_marker(
pytest.mark.xfail(
raises=pa.ArrowInvalid,
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index 828cdfa538ba5..8ee84c862688b 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -263,11 +263,6 @@ def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request)
)
super().test_arith_series_with_scalar(data, op_name)
- def test_add_series_with_extension_array(self, data):
- ser = pd.Series(data)
- with pytest.raises(TypeError, match="cannot perform|unsupported operand"):
- ser + data
-
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op, other):
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index 4284b46a230a7..80e75824763b6 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -136,13 +136,6 @@ def _get_expected_exception(self, op_name, obj, other):
return None
return super()._get_expected_exception(op_name, obj, other)
- def test_add_series_with_extension_array(self, data):
- # Datetime + Datetime not implemented
- ser = pd.Series(data)
- msg = "cannot add DatetimeArray and DatetimeArray"
- with pytest.raises(TypeError, match=msg):
- ser + data
-
class TestCasting(BaseDatetimeTests, base.BaseCastingTests):
pass
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index 26936829d7a34..5afbbe5214a5a 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -117,16 +117,6 @@ def _get_expected_exception(self, op_name, obj, other):
return None
return super()._get_expected_exception(op_name, obj, other)
- def test_add_series_with_extension_array(self, data):
- # we don't implement + for Period
- s = pd.Series(data)
- msg = (
- r"unsupported operand type\(s\) for \+: "
- r"\'PeriodArray\' and \'PeriodArray\'"
- )
- with pytest.raises(TypeError, match=msg):
- s + data
-
class TestCasting(BasePeriodTests, base.BaseCastingTests):
pass
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/54412 | 2023-08-04T18:01:39Z | 2023-08-05T18:12:25Z | 2023-08-05T18:12:25Z | 2023-08-05T19:09:38Z |
CI: Build sdist again on macOS | diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index 77ab152ce712e..4e3544d5e443a 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -104,21 +104,42 @@ jobs:
with:
fetch-depth: 0
+ # TODO: Build wheels from sdist again
+ # There's some sort of weird race condition?
+ # within Github that makes the sdist be missing files
+
# We need to build wheels from the sdist since the sdist
# removes unnecessary files from the release
- - name: Download sdist
+ - name: Download sdist (not macOS)
+ if: ${{ matrix.buildplat[1] != 'macosx_*' }}
uses: actions/download-artifact@v3
with:
name: sdist
path: ./dist
+ - name: Set up Python (macOS)
+ if: ${{ matrix.buildplat[1] == 'macosx_*' }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.11'
+
+ # Python version used to build sdist doesn't matter
+ # wheel will be built from sdist with the correct version
+ - name: Build sdist (macOS)
+ if: ${{ matrix.buildplat[1] == 'macosx_*' }}
+ run: |
+ python -m pip install build
+ python -m build --sdist
+
+ - name: Output sdist name (macOS)
+ id: save-path
+ shell: bash -el {0}
+ run: echo "sdist_name=$(ls ./dist)" >> "$GITHUB_ENV"
+
- name: Build wheels
uses: pypa/cibuildwheel@v2.14.1
- # TODO: Build wheels from sdist again
- # There's some sort of weird race condition?
- # within Github that makes the sdist be missing files
with:
- package-dir: ./dist/${{ needs.build_sdist.outputs.sdist_file }}
+ package-dir: ./dist/${{ matrix.buildplat[1] == 'macosx_*' && env.sdist_name || needs.build_sdist.outputs.sdist_file }}
env:
CIBW_PRERELEASE_PYTHONS: True
CIBW_BUILD: ${{ matrix.python[0] }}-${{ matrix.buildplat[1] }}
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
The race condition still exists :( | https://api.github.com/repos/pandas-dev/pandas/pulls/54411 | 2023-08-04T17:56:19Z | 2023-08-04T21:22:19Z | 2023-08-04T21:22:19Z | 2023-08-04T21:23:58Z |
REF: de-duplicate test_direct_arith_with_ndframe_returns_not_implemented | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 483c5ad59872f..73835252c0329 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -269,6 +269,26 @@
EMPTY_STRING_PATTERN = re.compile("^$")
+arithmetic_dunder_methods = [
+ "__add__",
+ "__radd__",
+ "__sub__",
+ "__rsub__",
+ "__mul__",
+ "__rmul__",
+ "__floordiv__",
+ "__rfloordiv__",
+ "__truediv__",
+ "__rtruediv__",
+ "__pow__",
+ "__rpow__",
+ "__mod__",
+ "__rmod__",
+]
+
+comparison_dunder_methods = ["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"]
+
+
def reset_display_options() -> None:
"""
Reset the display options for printing and representing objects.
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 1dcf413f2edf6..9cb29903dc156 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -974,25 +974,9 @@ def ea_scalar_and_dtype(request):
# ----------------------------------------------------------------
# Operators & Operations
# ----------------------------------------------------------------
-_all_arithmetic_operators = [
- "__add__",
- "__radd__",
- "__sub__",
- "__rsub__",
- "__mul__",
- "__rmul__",
- "__floordiv__",
- "__rfloordiv__",
- "__truediv__",
- "__rtruediv__",
- "__pow__",
- "__rpow__",
- "__mod__",
- "__rmod__",
-]
-@pytest.fixture(params=_all_arithmetic_operators)
+@pytest.fixture(params=tm.arithmetic_dunder_methods)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations.
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 972a8fb800f92..2b43b090a43e0 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -157,6 +157,11 @@
DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType]
+def _make_unpacked_invalid_op(op_name: str):
+ op = make_invalid_op(op_name)
+ return unpack_zerodim_and_defer(op_name)(op)
+
+
def _period_dispatch(meth: F) -> F:
"""
For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results
@@ -979,18 +984,18 @@ def _cmp_method(self, other, op):
# pow is invalid for all three subclasses; TimedeltaArray will override
# the multiplication and division ops
- __pow__ = make_invalid_op("__pow__")
- __rpow__ = make_invalid_op("__rpow__")
- __mul__ = make_invalid_op("__mul__")
- __rmul__ = make_invalid_op("__rmul__")
- __truediv__ = make_invalid_op("__truediv__")
- __rtruediv__ = make_invalid_op("__rtruediv__")
- __floordiv__ = make_invalid_op("__floordiv__")
- __rfloordiv__ = make_invalid_op("__rfloordiv__")
- __mod__ = make_invalid_op("__mod__")
- __rmod__ = make_invalid_op("__rmod__")
- __divmod__ = make_invalid_op("__divmod__")
- __rdivmod__ = make_invalid_op("__rdivmod__")
+ __pow__ = _make_unpacked_invalid_op("__pow__")
+ __rpow__ = _make_unpacked_invalid_op("__rpow__")
+ __mul__ = _make_unpacked_invalid_op("__mul__")
+ __rmul__ = _make_unpacked_invalid_op("__rmul__")
+ __truediv__ = _make_unpacked_invalid_op("__truediv__")
+ __rtruediv__ = _make_unpacked_invalid_op("__rtruediv__")
+ __floordiv__ = _make_unpacked_invalid_op("__floordiv__")
+ __rfloordiv__ = _make_unpacked_invalid_op("__rfloordiv__")
+ __mod__ = _make_unpacked_invalid_op("__mod__")
+ __rmod__ = _make_unpacked_invalid_op("__rmod__")
+ __divmod__ = _make_unpacked_invalid_op("__divmod__")
+ __rdivmod__ = _make_unpacked_invalid_op("__rdivmod__")
@final
def _get_i8_values_and_mask(
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index aafb1900a4236..658018a7ac740 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -166,23 +166,25 @@ def test_add_series_with_extension_array(self, data):
expected = pd.Series(data + data)
tm.assert_series_equal(result, expected)
- @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
+ @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame, pd.Index])
+ @pytest.mark.parametrize(
+ "op_name",
+ [
+ x
+ for x in tm.arithmetic_dunder_methods + tm.comparison_dunder_methods
+ if not x.startswith("__r")
+ ],
+ )
def test_direct_arith_with_ndframe_returns_not_implemented(
- self, request, data, box
+ self, data, box, op_name
):
- # EAs should return NotImplemented for ops with Series/DataFrame
+ # EAs should return NotImplemented for ops with Series/DataFrame/Index
# Pandas takes care of unboxing the series and calling the EA's op.
- other = pd.Series(data)
- if box is pd.DataFrame:
- other = other.to_frame()
- if not hasattr(data, "__add__"):
- request.node.add_marker(
- pytest.mark.xfail(
- reason=f"{type(data).__name__} does not implement add"
- )
- )
- result = data.__add__(other)
- assert result is NotImplemented
+ other = box(data)
+
+ if hasattr(data, op_name):
+ result = getattr(data, op_name)(other)
+ assert result is NotImplemented
class BaseComparisonOpsTests(BaseOpsUtil):
@@ -219,26 +221,6 @@ def test_compare_array(self, data, comparison_op):
other = pd.Series([data[0]] * len(data))
self._compare_other(ser, data, comparison_op, other)
- @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
- def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box):
- # EAs should return NotImplemented for ops with Series/DataFrame
- # Pandas takes care of unboxing the series and calling the EA's op.
- other = pd.Series(data)
- if box is pd.DataFrame:
- other = other.to_frame()
-
- if hasattr(data, "__eq__"):
- result = data.__eq__(other)
- assert result is NotImplemented
- else:
- pytest.skip(f"{type(data).__name__} does not implement __eq__")
-
- if hasattr(data, "__ne__"):
- result = data.__ne__(other)
- assert result is NotImplemented
- else:
- pytest.skip(f"{type(data).__name__} does not implement __ne__")
-
class BaseUnaryOpsTests(BaseOpsUtil):
def test_invert(self, data):
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index c03c756fd1e81..26936829d7a34 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -127,17 +127,6 @@ def test_add_series_with_extension_array(self, data):
with pytest.raises(TypeError, match=msg):
s + data
- def test_direct_arith_with_ndframe_returns_not_implemented(
- self, data, frame_or_series
- ):
- # Override to use __sub__ instead of __add__
- other = pd.Series(data)
- if frame_or_series is pd.DataFrame:
- other = other.to_frame()
-
- result = data.__sub__(other)
- assert result is NotImplemented
-
class TestCasting(BasePeriodTests, base.BaseCastingTests):
pass
| Also extends it to test for Index and all the arithmetic/comparison methods instead of just `__add__`. Uses a loop inside the test bc im leaning towards trying to slow the expansion of the test suite, but not super-opposed to parametrizing if reviewers have strong opinions.
| https://api.github.com/repos/pandas-dev/pandas/pulls/54410 | 2023-08-04T17:53:53Z | 2023-08-04T23:29:41Z | 2023-08-04T23:29:41Z | 2023-08-05T01:02:46Z |
Fix #54391: Use memcpy() for unaligned loads | diff --git a/pandas/_libs/byteswap.pyx b/pandas/_libs/byteswap.pyx
index 511af5140b563..67cd7ad58d229 100644
--- a/pandas/_libs/byteswap.pyx
+++ b/pandas/_libs/byteswap.pyx
@@ -10,53 +10,57 @@ from libc.stdint cimport (
uint32_t,
uint64_t,
)
+from libc.string cimport memcpy
def read_float_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
- assert offset + 4 < len(data)
- cdef:
- const char *data_ptr = data
- float res = (<float*>(data_ptr + offset))[0]
+ cdef uint32_t value
+ assert offset + sizeof(value) < len(data)
+ cdef const void *ptr = <unsigned char*>(data) + offset
+ memcpy(&value, ptr, sizeof(value))
if byteswap:
- res = _byteswap_float(res)
+ value = _byteswap4(value)
+
+ cdef float res
+ memcpy(&res, &value, sizeof(res))
return res
def read_double_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
- assert offset + 8 < len(data)
- cdef:
- const char *data_ptr = data
- double res = (<double*>(data_ptr + offset))[0]
+ cdef uint64_t value
+ assert offset + sizeof(value) < len(data)
+ cdef const void *ptr = <unsigned char*>(data) + offset
+ memcpy(&value, ptr, sizeof(value))
if byteswap:
- res = _byteswap_double(res)
+ value = _byteswap8(value)
+
+ cdef double res
+ memcpy(&res, &value, sizeof(res))
return res
def read_uint16_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
- assert offset + 2 < len(data)
- cdef:
- const char *data_ptr = data
- uint16_t res = (<uint16_t *>(data_ptr + offset))[0]
+ cdef uint16_t res
+ assert offset + sizeof(res) < len(data)
+ memcpy(&res, <const unsigned char*>(data) + offset, sizeof(res))
if byteswap:
res = _byteswap2(res)
return res
def read_uint32_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
- assert offset + 4 < len(data)
- cdef:
- const char *data_ptr = data
- uint32_t res = (<uint32_t *>(data_ptr + offset))[0]
+ cdef uint32_t res
+ assert offset + sizeof(res) < len(data)
+ memcpy(&res, <const unsigned char*>(data) + offset, sizeof(res))
if byteswap:
res = _byteswap4(res)
return res
def read_uint64_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
- assert offset + 8 < len(data)
- cdef:
- const char *data_ptr = data
- uint64_t res = (<uint64_t *>(data_ptr + offset))[0]
+ cdef uint64_t res
+ assert offset + sizeof(res) < len(data)
+ memcpy(&res, <const unsigned char*>(data) + offset, sizeof(res))
if byteswap:
res = _byteswap8(res)
return res
@@ -79,15 +83,3 @@ cdef extern from *:
uint16_t _byteswap2(uint16_t)
uint32_t _byteswap4(uint32_t)
uint64_t _byteswap8(uint64_t)
-
-
-cdef float _byteswap_float(float num):
- cdef uint32_t *intptr = <uint32_t *>&num
- intptr[0] = _byteswap4(intptr[0])
- return num
-
-
-cdef double _byteswap_double(double num):
- cdef uint64_t *intptr = <uint64_t *>&num
- intptr[0] = _byteswap8(intptr[0])
- return num
| - [x] closes #54391
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
@jonashaag | https://api.github.com/repos/pandas-dev/pandas/pulls/54407 | 2023-08-04T13:29:38Z | 2023-08-04T21:23:59Z | 2023-08-04T21:23:59Z | 2023-08-05T11:35:36Z |
DOC: Added list of functions to ignore EX03 test | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 07c5ad1868d9e..aba42f3733a3f 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -61,6 +61,118 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
$BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01,EX02,EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05,SS06
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ MSG='Partially validate docstrings (EX03)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX03 --ignore_functions \
+ pandas.Series.loc \
+ pandas.Series.iloc \
+ pandas.Series.pop \
+ pandas.Series.describe \
+ pandas.Series.skew \
+ pandas.Series.var \
+ pandas.Series.last \
+ pandas.Series.tz_convert \
+ pandas.Series.tz_localize \
+ pandas.Series.dt.month_name \
+ pandas.Series.dt.day_name \
+ pandas.Series.str.len \
+ pandas.Series.cat.set_categories \
+ pandas.Series.plot.bar \
+ pandas.Series.plot.hist \
+ pandas.Series.plot.line \
+ pandas.Series.to_sql \
+ pandas.Series.to_latex \
+ pandas.errors.CategoricalConversionWarning \
+ pandas.errors.ChainedAssignmentError \
+ pandas.errors.ClosedFileError \
+ pandas.errors.DatabaseError \
+ pandas.errors.IndexingError \
+ pandas.errors.InvalidColumnName \
+ pandas.errors.NumExprClobberingError \
+ pandas.errors.PossibleDataLossError \
+ pandas.errors.PossiblePrecisionLoss \
+ pandas.errors.SettingWithCopyError \
+ pandas.errors.SettingWithCopyWarning \
+ pandas.errors.SpecificationError \
+ pandas.errors.UndefinedVariableError \
+ pandas.errors.ValueLabelTypeMismatch \
+ pandas.Timestamp.ceil \
+ pandas.Timestamp.floor \
+ pandas.Timestamp.round \
+ pandas.read_pickle \
+ pandas.ExcelWriter \
+ pandas.read_json \
+ pandas.io.json.build_table_schema \
+ pandas.DataFrame.to_latex \
+ pandas.io.formats.style.Styler.to_latex \
+ pandas.read_parquet \
+ pandas.DataFrame.to_sql \
+ pandas.read_stata \
+ pandas.core.resample.Resampler.pipe \
+ pandas.core.resample.Resampler.fillna \
+ pandas.core.resample.Resampler.interpolate \
+ pandas.plotting.scatter_matrix \
+ pandas.pivot \
+ pandas.merge_asof \
+ pandas.wide_to_long \
+ pandas.Index.rename \
+ pandas.Index.droplevel \
+ pandas.Index.isin \
+ pandas.CategoricalIndex.set_categories \
+ pandas.MultiIndex.names \
+ pandas.MultiIndex.droplevel \
+ pandas.IndexSlice \
+ pandas.DatetimeIndex.month_name \
+ pandas.DatetimeIndex.day_name \
+ pandas.core.window.rolling.Rolling.corr \
+ pandas.Grouper \
+ pandas.core.groupby.SeriesGroupBy.apply \
+ pandas.core.groupby.DataFrameGroupBy.apply \
+ pandas.core.groupby.SeriesGroupBy.transform \
+ pandas.core.groupby.SeriesGroupBy.pipe \
+ pandas.core.groupby.DataFrameGroupBy.pipe \
+ pandas.core.groupby.DataFrameGroupBy.describe \
+ pandas.core.groupby.DataFrameGroupBy.idxmax \
+ pandas.core.groupby.DataFrameGroupBy.idxmin \
+ pandas.core.groupby.DataFrameGroupBy.value_counts \
+ pandas.core.groupby.SeriesGroupBy.describe \
+ pandas.core.groupby.DataFrameGroupBy.boxplot \
+ pandas.core.groupby.DataFrameGroupBy.hist \
+ pandas.io.formats.style.Styler.map \
+ pandas.io.formats.style.Styler.apply_index \
+ pandas.io.formats.style.Styler.map_index \
+ pandas.io.formats.style.Styler.format \
+ pandas.io.formats.style.Styler.format_index \
+ pandas.io.formats.style.Styler.relabel_index \
+ pandas.io.formats.style.Styler.hide \
+ pandas.io.formats.style.Styler.set_td_classes \
+ pandas.io.formats.style.Styler.set_tooltips \
+ pandas.io.formats.style.Styler.set_uuid \
+ pandas.io.formats.style.Styler.pipe \
+ pandas.io.formats.style.Styler.highlight_between \
+ pandas.io.formats.style.Styler.highlight_quantile \
+ pandas.io.formats.style.Styler.background_gradient \
+ pandas.io.formats.style.Styler.text_gradient \
+ pandas.DataFrame.values \
+ pandas.DataFrame.loc \
+ pandas.DataFrame.iloc \
+ pandas.DataFrame.groupby \
+ pandas.DataFrame.describe \
+ pandas.DataFrame.skew \
+ pandas.DataFrame.var \
+ pandas.DataFrame.idxmax \
+ pandas.DataFrame.idxmin \
+ pandas.DataFrame.last \
+ pandas.DataFrame.pivot \
+ pandas.DataFrame.sort_values \
+ pandas.DataFrame.tz_convert \
+ pandas.DataFrame.tz_localize \
+ pandas.DataFrame.plot.bar \
+ pandas.DataFrame.plot.hexbin \
+ pandas.DataFrame.plot.hist \
+ pandas.DataFrame.plot.line \
+ pandas.DataFrame.hist \
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
fi
### DOCUMENTATION NOTEBOOKS ###
| - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/54406 | 2023-08-04T13:20:50Z | 2023-08-04T16:51:03Z | 2023-08-04T16:51:03Z | 2023-08-04T17:12:32Z |
ENH: Update numpy exceptions imports | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 11c6d8ea1a821..6d419098bf279 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -32,6 +32,7 @@
import numpy as np
from pandas._libs import lib
+from pandas.compat.numpy import np_version_gte1p24
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
@@ -236,7 +237,8 @@ def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLi
try:
with warnings.catch_warnings():
# Can remove warning filter once NumPy 1.24 is min version
- warnings.simplefilter("ignore", np.VisibleDeprecationWarning)
+ if not np_version_gte1p24:
+ warnings.simplefilter("ignore", np.VisibleDeprecationWarning)
result = np.asarray(values, dtype=dtype)
except ValueError:
# Using try/except since it's more performant than checking is_list_like
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index e29fc12f118f4..05834b02a5ff3 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -13,6 +13,7 @@
import pytest
from pandas.compat import IS64
+from pandas.compat.numpy import np_version_gte1p25
from pandas.core.dtypes.common import (
is_integer_dtype,
@@ -389,7 +390,10 @@ def test_astype_preserves_name(self, index, dtype):
warn = None
if index.dtype.kind == "c" and dtype in ["float64", "int64", "uint64"]:
# imaginary components discarded
- warn = np.ComplexWarning
+ if np_version_gte1p25:
+ warn = np.exceptions.ComplexWarning
+ else:
+ warn = np.ComplexWarning
is_pyarrow_str = str(index.dtype) == "string[pyarrow]" and dtype == "category"
try:
| Due to NumPy's main namespace being changed in https://github.com/numpy/numpy/pull/24316, here I update warning imports.
- [ ] closes #xxxx
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/54405 | 2023-08-04T11:38:39Z | 2023-08-07T17:49:48Z | 2023-08-07T17:49:48Z | 2023-08-07T17:50:24Z |
CI: remove test cases which would have globbed pandas.tests | diff --git a/scripts/tests/test_check_test_naming.py b/scripts/tests/test_check_test_naming.py
index 9ddaf2fe2a97d..dbd803ce4dd31 100644
--- a/scripts/tests/test_check_test_naming.py
+++ b/scripts/tests/test_check_test_naming.py
@@ -17,18 +17,7 @@
1,
),
("def test_foo(): pass\n", "", 0),
- (
- "class TestFoo:\n def foo(): pass\n",
- "t.py:2:4 found test function which does not start with 'test'\n",
- 1,
- ),
("class TestFoo:\n def test_foo(): pass\n", "", 0),
- (
- "class Foo:\n def foo(): pass\n",
- "t.py:1:0 found test class which does not start with 'Test'\n"
- "t.py:2:4 found test function which does not start with 'test'\n",
- 1,
- ),
(
"def foo():\n pass\ndef test_foo():\n foo()\n",
"",
| This came up here https://github.com/pandas-dev/pandas/pull/54365#issuecomment-1664639342
I've removed the test cases which would've ended up triggering globbing of pandas/tests, this should remove the noise observed in that PR | https://api.github.com/repos/pandas-dev/pandas/pulls/54404 | 2023-08-04T10:30:39Z | 2023-08-04T17:00:56Z | 2023-08-04T17:00:55Z | 2023-08-04T17:01:04Z |
TYP: Enable pyright's reportInconsistentConstructor | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 15bdc816ee65c..dc1ae4bd99612 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -138,7 +138,7 @@ repos:
types: [python]
stages: [manual]
additional_dependencies: &pyright_dependencies
- - pyright@1.1.296
+ - pyright@1.1.305
- id: pyright
# note: assumes python env is setup and activated
name: pyright reportGeneralTypeIssues
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 6a61b37ff4a94..e36175fa5a0af 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -463,3 +463,6 @@ def closed(self) -> bool:
# to_stata
ToStataByteorder = Literal[">", "<", "little", "big"]
+
+# ExcelWriter
+ExcelWriterIfSheetExists = Literal["error", "new", "replace", "overlay"]
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index b14187b0cc3a5..9050fb6e76b9c 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -189,9 +189,6 @@ def ndim(self) -> int:
class Constant(Term):
- def __init__(self, value, env, side=None, encoding=None) -> None:
- super().__init__(value, env, side=side, encoding=encoding)
-
def _resolve_name(self):
return self._name
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 433421d35af55..77d8d79506258 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -93,9 +93,9 @@ def value(self):
class Constant(Term):
- def __init__(self, value, env: PyTablesScope, side=None, encoding=None) -> None:
+ def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None:
assert isinstance(env, PyTablesScope), type(env)
- super().__init__(value, env, side=side, encoding=encoding)
+ super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index c6da7d847c363..d972983532e3c 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -573,7 +573,7 @@ class PeriodProperties(Properties):
class CombinedDatetimelikeProperties(
DatetimeProperties, TimedeltaProperties, PeriodProperties
):
- def __new__(cls, data: Series):
+ def __new__(cls, data: Series): # pyright: ignore[reportInconsistentConstructor]
# CombinedDatetimelikeProperties isn't really instantiated. Instead
# we need to choose which parent (datetime or timedelta) is
# appropriate. Since we're checking the dtypes anyway, we'll just
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 7c67f85ed3d1e..1024b52b0c416 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -74,6 +74,7 @@
from pandas._typing import (
DtypeArg,
DtypeBackend,
+ ExcelWriterIfSheetExists,
FilePath,
IntStrT,
ReadBuffer,
@@ -1129,7 +1130,7 @@ def __new__(
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
- if_sheet_exists: Literal["error", "new", "replace", "overlay"] | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict | None = None,
) -> ExcelWriter:
# only switch class if generic(ExcelWriter)
@@ -1218,7 +1219,7 @@ def __init__(
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
- if_sheet_exists: str | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict[str, Any] | None = None,
) -> None:
# validate that this engine can handle the extension
diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py
index a4b4d965089dc..391103dd477f1 100644
--- a/pandas/io/excel/_odswriter.py
+++ b/pandas/io/excel/_odswriter.py
@@ -19,6 +19,7 @@
if TYPE_CHECKING:
from pandas._typing import (
+ ExcelWriterIfSheetExists,
FilePath,
StorageOptions,
WriteExcelBuffer,
@@ -39,7 +40,7 @@ def __init__(
datetime_format=None,
mode: str = "w",
storage_options: StorageOptions | None = None,
- if_sheet_exists: str | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> None:
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index 8ca2c098cd426..89f04be32b9bc 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -28,6 +28,7 @@
from openpyxl.workbook import Workbook
from pandas._typing import (
+ ExcelWriterIfSheetExists,
FilePath,
ReadBuffer,
Scalar,
@@ -48,7 +49,7 @@ def __init__(
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
- if_sheet_exists: str | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> None:
diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py
index d7c29a812c2b7..afa988a5eda51 100644
--- a/pandas/io/excel/_xlsxwriter.py
+++ b/pandas/io/excel/_xlsxwriter.py
@@ -15,6 +15,7 @@
if TYPE_CHECKING:
from pandas._typing import (
+ ExcelWriterIfSheetExists,
FilePath,
StorageOptions,
WriteExcelBuffer,
@@ -189,7 +190,7 @@ def __init__(
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
- if_sheet_exists: str | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> None:
diff --git a/pyproject.toml b/pyproject.toml
index 75a33e2a5269c..e98030217987d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -732,6 +732,7 @@ include = ["pandas", "typings"]
exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version"]
# enable subset of "strict"
reportDuplicateImport = true
+reportInconsistentConstructor = true
reportInvalidStubStatement = true
reportOverlappingOverload = true
reportPropertyTypeMismatch = true
diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json
index 761018c3ce496..b5baab8c33471 100644
--- a/pyright_reportGeneralTypeIssues.json
+++ b/pyright_reportGeneralTypeIssues.json
@@ -1,4 +1,3 @@
-# this becomes obsolete when reportGeneralTypeIssues can be enabled in pyproject.toml
{
"typeCheckingMode": "off",
"reportGeneralTypeIssues": true,
@@ -9,12 +8,11 @@
],
"exclude":
[
- # exclude tests
"pandas/tests",
- # exclude vendored files
+
"pandas/io/clipboard",
"pandas/util/version",
- # and all files that currently don't pass
+
"pandas/_testing/__init__.py",
"pandas/_testing/_hypothesis.py",
"pandas/_testing/_io.py",
| reportInconsistentConstructor checks that `__new__` and `__init__` should be consistent (argument names and their types) | https://api.github.com/repos/pandas-dev/pandas/pulls/54398 | 2023-08-04T02:17:38Z | 2023-08-04T17:25:18Z | 2023-08-04T17:25:18Z | 2023-08-09T15:08:31Z |
DEPR: Positional arguments in to_sql except name | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 006ab5c49e24c..6e352c52cd60e 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -5651,7 +5651,7 @@ the database using :func:`~pandas.DataFrame.to_sql`.
data = pd.DataFrame(d, columns=c)
data
- data.to_sql("data", engine)
+ data.to_sql("data", con=engine)
With some databases, writing large DataFrames can result in errors due to
packet size limitations being exceeded. This can be avoided by setting the
@@ -5660,7 +5660,7 @@ writes ``data`` to the database in batches of 1000 rows at a time:
.. ipython:: python
- data.to_sql("data_chunked", engine, chunksize=1000)
+ data.to_sql("data_chunked", con=engine, chunksize=1000)
SQL data types
++++++++++++++
@@ -5680,7 +5680,7 @@ default ``Text`` type for string columns:
from sqlalchemy.types import String
- data.to_sql("data_dtype", engine, dtype={"Col_1": String})
+ data.to_sql("data_dtype", con=engine, dtype={"Col_1": String})
.. note::
@@ -5849,7 +5849,7 @@ have schema's). For example:
.. code-block:: python
- df.to_sql("table", engine, schema="other_schema")
+ df.to_sql(name="table", con=engine, schema="other_schema")
pd.read_sql_table("table", engine, schema="other_schema")
Querying
@@ -5876,7 +5876,7 @@ Specifying this will return an iterator through chunks of the query result:
.. ipython:: python
df = pd.DataFrame(np.random.randn(20, 3), columns=list("abc"))
- df.to_sql("data_chunks", engine, index=False)
+ df.to_sql(name="data_chunks", con=engine, index=False)
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.14.0.rst b/doc/source/whatsnew/v0.14.0.rst
index f33ab3911f231..92c37243b7e81 100644
--- a/doc/source/whatsnew/v0.14.0.rst
+++ b/doc/source/whatsnew/v0.14.0.rst
@@ -437,7 +437,7 @@ This ``engine`` can then be used to write or read data to/from this database:
.. ipython:: python
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'c']})
- df.to_sql('db_table', engine, index=False)
+ df.to_sql(name='db_table', con=engine, index=False)
You can read data from a database by specifying the table name:
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index 5cafaa5759a5b..db58d7ca67619 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -220,6 +220,7 @@ Other enhancements
- :meth:`DataFrame.to_parquet` and :func:`read_parquet` will now write and read ``attrs`` respectively (:issue:`54346`)
- Added support for the DataFrame Consortium Standard (:issue:`54383`)
- Performance improvement in :meth:`GroupBy.quantile` (:issue:`51722`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_210.notable_bug_fixes:
@@ -560,6 +561,7 @@ Other Deprecations
- Deprecated the use of non-supported datetime64 and timedelta64 resolutions with :func:`pandas.array`. Supported resolutions are: "s", "ms", "us", "ns" resolutions (:issue:`53058`)
- Deprecated values "pad", "ffill", "bfill", "backfill" for :meth:`Series.interpolate` and :meth:`DataFrame.interpolate`, use ``obj.ffill()`` or ``obj.bfill()`` instead (:issue:`53581`)
- Deprecated the behavior of :meth:`Index.argmax`, :meth:`Index.argmin`, :meth:`Series.argmax`, :meth:`Series.argmin` with either all-NAs and skipna=True or any-NAs and skipna=False returning -1; in a future version this will raise ``ValueError`` (:issue:`33941`, :issue:`33942`)
+- Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_sql` except ``name``. (:issue:`54229`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8a3a105749800..106dfffa40e3b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -97,7 +97,10 @@
SettingWithCopyWarning,
_chained_assignment_method_msg,
)
-from pandas.util._decorators import doc
+from pandas.util._decorators import (
+ deprecate_nonkeyword_arguments,
+ doc,
+)
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import (
check_dtype_backend,
@@ -2792,6 +2795,9 @@ def to_hdf(
)
@final
+ @deprecate_nonkeyword_arguments(
+ version="3.0", allowed_args=["self", "name"], name="to_sql"
+ )
def to_sql(
self,
name: str,
@@ -2911,7 +2917,7 @@ def to_sql(
1 User 2
2 User 3
- >>> df.to_sql('users', con=engine)
+ >>> df.to_sql(name='users', con=engine)
3
>>> from sqlalchemy import text
>>> with engine.connect() as conn:
@@ -2922,14 +2928,14 @@ def to_sql(
>>> with engine.begin() as connection:
... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
- ... df1.to_sql('users', con=connection, if_exists='append')
+ ... df1.to_sql(name='users', con=connection, if_exists='append')
2
This is allowed to support operations that require that the same
DBAPI connection is used for the entire operation.
>>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']})
- >>> df2.to_sql('users', con=engine, if_exists='append')
+ >>> df2.to_sql(name='users', con=engine, if_exists='append')
2
>>> with engine.connect() as conn:
... conn.execute(text("SELECT * FROM users")).fetchall()
@@ -2939,7 +2945,7 @@ def to_sql(
Overwrite the table with just ``df2``.
- >>> df2.to_sql('users', con=engine, if_exists='replace',
+ >>> df2.to_sql(name='users', con=engine, if_exists='replace',
... index_label='id')
2
>>> with engine.connect() as conn:
@@ -2956,7 +2962,7 @@ def to_sql(
... stmt = insert(table.table).values(data).on_conflict_do_nothing(index_elements=["a"])
... result = conn.execute(stmt)
... return result.rowcount
- >>> df_conflict.to_sql("conflict_table", conn, if_exists="append", method=insert_on_conflict_nothing) # doctest: +SKIP
+ >>> df_conflict.to_sql(name="conflict_table", con=conn, if_exists="append", method=insert_on_conflict_nothing) # doctest: +SKIP
0
For MySQL, a callable to update columns ``b`` and ``c`` if there's a conflict
@@ -2973,7 +2979,7 @@ def to_sql(
... stmt = stmt.on_duplicate_key_update(b=stmt.inserted.b, c=stmt.inserted.c)
... result = conn.execute(stmt)
... return result.rowcount
- >>> df_conflict.to_sql("conflict_table", conn, if_exists="append", method=insert_on_conflict_update) # doctest: +SKIP
+ >>> df_conflict.to_sql(name="conflict_table", con=conn, if_exists="append", method=insert_on_conflict_update) # doctest: +SKIP
2
Specify the dtype (especially useful for integers with missing values).
@@ -2989,7 +2995,7 @@ def to_sql(
2 2.0
>>> from sqlalchemy.types import Integer
- >>> df.to_sql('integers', con=engine, index=False,
+ >>> df.to_sql(name='integers', con=engine, index=False,
... dtype={"A": Integer()})
3
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 2cf9d144eb91c..7669d5aa4cea5 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -621,7 +621,7 @@ def read_sql(
>>> conn = connect(':memory:')
>>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
... columns=['int_column', 'date_column'])
- >>> df.to_sql('test_data', conn)
+ >>> df.to_sql(name='test_data', con=conn)
2
>>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 8160249764272..0b98bcc4d4bec 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -548,7 +548,7 @@ def sqlite_buildin_iris(sqlite_buildin, iris_path):
def test_dataframe_to_sql(conn, test_frame1, request):
# GH 51086 if conn is sqlite_engine
conn = request.getfixturevalue(conn)
- test_frame1.to_sql("test", conn, if_exists="append", index=False)
+ test_frame1.to_sql(name="test", con=conn, if_exists="append", index=False)
@pytest.mark.db
@@ -569,7 +569,7 @@ def test_dataframe_to_sql_arrow_dtypes(conn, request):
)
conn = request.getfixturevalue(conn)
with tm.assert_produces_warning(UserWarning, match="the 'timedelta'"):
- df.to_sql("test_arrow", conn, if_exists="replace", index=False)
+ df.to_sql(name="test_arrow", con=conn, if_exists="replace", index=False)
@pytest.mark.db
@@ -585,7 +585,7 @@ def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture):
}
)
conn = request.getfixturevalue(conn)
- df.to_sql("test_arrow", conn, if_exists="replace", index=False)
+ df.to_sql(name="test_arrow", con=conn, if_exists="replace", index=False)
@pytest.mark.db
@@ -756,7 +756,7 @@ def test_read_procedure(conn, request):
from sqlalchemy.engine import Engine
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
- df.to_sql("test_frame", conn, index=False)
+ df.to_sql(name="test_frame", con=conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
@@ -811,7 +811,7 @@ def psql_insert_copy(table, conn, keys, data_iter):
conn = request.getfixturevalue(conn)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
result_count = expected.to_sql(
- "test_frame", conn, index=False, method=psql_insert_copy
+ name="test_frame", con=conn, index=False, method=psql_insert_copy
)
# GH 46891
if expected_count is None:
@@ -860,12 +860,14 @@ def insert_on_conflict(table, conn, keys, data_iter):
conn.execute(create_sql)
expected = DataFrame([[1, 2.1, "a"]], columns=list("abc"))
- expected.to_sql("test_insert_conflict", conn, if_exists="append", index=False)
+ expected.to_sql(
+ name="test_insert_conflict", con=conn, if_exists="append", index=False
+ )
df_insert = DataFrame([[1, 3.2, "b"]], columns=list("abc"))
inserted = df_insert.to_sql(
- "test_insert_conflict",
- conn,
+ name="test_insert_conflict",
+ con=conn,
index=False,
if_exists="append",
method=insert_on_conflict,
@@ -914,12 +916,12 @@ def insert_on_conflict(table, conn, keys, data_iter):
conn.execute(create_sql)
df = DataFrame([[1, 2.1, "a"]], columns=list("abc"))
- df.to_sql("test_insert_conflict", conn, if_exists="append", index=False)
+ df.to_sql(name="test_insert_conflict", con=conn, if_exists="append", index=False)
expected = DataFrame([[1, 3.2, "b"]], columns=list("abc"))
inserted = expected.to_sql(
- "test_insert_conflict",
- conn,
+ name="test_insert_conflict",
+ con=conn,
index=False,
if_exists="append",
method=insert_on_conflict,
@@ -1439,7 +1441,7 @@ def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
- result_count = df.to_sql("test_timedelta", self.conn)
+ result_count = df.to_sql(name="test_timedelta", con=self.conn)
assert result_count == 2
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].view("int64"))
@@ -1448,7 +1450,7 @@ def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
- assert df.to_sql("test_complex", self.conn) is None
+ assert df.to_sql("test_complex", con=self.conn) is None
@pytest.mark.parametrize(
"index_name,index_label,expected",
@@ -1539,7 +1541,7 @@ def test_multiindex_roundtrip(self):
index=["A", "B"],
)
- df.to_sql("test_multiindex_roundtrip", self.conn)
+ df.to_sql(name="test_multiindex_roundtrip", con=self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
@@ -1557,7 +1559,7 @@ def test_multiindex_roundtrip(self):
def test_dtype_argument(self, dtype):
# GH10285 Add dtype argument to read_sql_query
df = DataFrame([[1.2, 3.4], [5.6, 7.8]], columns=["A", "B"])
- assert df.to_sql("test_dtype_argument", self.conn) == 2
+ assert df.to_sql(name="test_dtype_argument", con=self.conn) == 2
expected = df.astype(dtype)
result = sql.read_sql_query(
@@ -1609,7 +1611,7 @@ def test_chunksize_read(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((22, 5)), columns=list("abcde")
)
- df.to_sql("test_chunksize", self.conn, index=False)
+ df.to_sql(name="test_chunksize", con=self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
@@ -1653,7 +1655,7 @@ def test_categorical(self):
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
- df2.to_sql("test_categorical", self.conn, index=False)
+ df2.to_sql(name="test_categorical", con=self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
@@ -1661,12 +1663,12 @@ def test_categorical(self):
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
- df.to_sql("test_unicode", self.conn, index=False)
+ df.to_sql(name="test_unicode", con=self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
- df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
+ df.to_sql(name="d1187b08-4943-4c8d-a7f6", con=self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
@@ -1675,7 +1677,7 @@ def test_escaped_table_name(self):
def test_read_sql_duplicate_columns(self):
# GH#53117
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": 1})
- df.to_sql("test_table", self.conn, index=False)
+ df.to_sql(name="test_table", con=self.conn, index=False)
result = pd.read_sql("SELECT a, b, a +1 as a, c FROM test_table;", self.conn)
expected = DataFrame(
@@ -1771,7 +1773,7 @@ def test_warning_case_insensitive_table_name(self, test_frame1):
# Test that the warning is certainly NOT triggered in a normal case.
with tm.assert_produces_warning(None):
- test_frame1.to_sql("CaseSensitive", self.conn)
+ test_frame1.to_sql(name="CaseSensitive", con=self.conn)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
@@ -1840,7 +1842,7 @@ def test_database_uri_string(self, test_frame1):
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
- test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
+ test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
@@ -1882,7 +1884,7 @@ def test_query_by_select_obj(self):
def test_column_with_percentage(self):
# GH 37157
df = DataFrame({"A": [0, 1, 2], "%_variation": [3, 4, 5]})
- df.to_sql("test_column_percentage", self.conn, index=False)
+ df.to_sql(name="test_column_percentage", con=self.conn, index=False)
res = sql.read_sql_table("test_column_percentage", self.conn)
@@ -2094,7 +2096,7 @@ def test_default_type_conversion(self):
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2**62]})
- assert df.to_sql("test_bigint", self.conn, index=False) == 1
+ assert df.to_sql(name="test_bigint", con=self.conn, index=False) == 1
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
@@ -2193,7 +2195,7 @@ def test_datetime_with_timezone_roundtrip(self):
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
- assert expected.to_sql("test_datetime_tz", self.conn, index=False) == 3
+ assert expected.to_sql(name="test_datetime_tz", con=self.conn, index=False) == 3
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
@@ -2215,7 +2217,7 @@ def test_datetime_with_timezone_roundtrip(self):
def test_out_of_bounds_datetime(self):
# GH 26761
data = DataFrame({"date": datetime(9999, 1, 1)}, index=[0])
- assert data.to_sql("test_datetime_obb", self.conn, index=False) == 1
+ assert data.to_sql(name="test_datetime_obb", con=self.conn, index=False) == 1
result = sql.read_sql_table("test_datetime_obb", self.conn)
expected = DataFrame([pd.NaT], columns=["date"])
tm.assert_frame_equal(result, expected)
@@ -2225,7 +2227,10 @@ def test_naive_datetimeindex_roundtrip(self):
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")._with_freq(None)
expected = DataFrame({"nums": range(5)}, index=dates)
- assert expected.to_sql("foo_table", self.conn, index_label="info_date") == 5
+ assert (
+ expected.to_sql(name="foo_table", con=self.conn, index_label="info_date")
+ == 5
+ )
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
@@ -2266,7 +2271,7 @@ def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
- assert df.to_sql("test_datetime", self.conn) == 3
+ assert df.to_sql(name="test_datetime", con=self.conn) == 3
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
@@ -2288,7 +2293,7 @@ def test_datetime_NaT(self):
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
- assert df.to_sql("test_datetime", self.conn, index=False) == 3
+ assert df.to_sql(name="test_datetime", con=self.conn, index=False) == 3
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
@@ -2306,7 +2311,7 @@ def test_datetime_NaT(self):
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
- assert df.to_sql("test_date", self.conn, index=False) == 2
+ assert df.to_sql(name="test_date", con=self.conn, index=False) == 2
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
@@ -2316,7 +2321,7 @@ def test_datetime_date(self):
def test_datetime_time(self, sqlite_buildin):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
- assert df.to_sql("test_time", self.conn, index=False) == 2
+ assert df.to_sql(name="test_time", con=self.conn, index=False) == 2
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
@@ -2343,7 +2348,7 @@ def test_mixed_dtype_insert(self):
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
- assert df.to_sql("test_read_write", self.conn, index=False) == 1
+ assert df.to_sql(name="test_read_write", con=self.conn, index=False) == 1
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
@@ -2351,7 +2356,7 @@ def test_mixed_dtype_insert(self):
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
- assert df.to_sql("test_nan", self.conn, index=False) == 3
+ assert df.to_sql(name="test_nan", con=self.conn, index=False) == 3
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
@@ -2364,7 +2369,7 @@ def test_nan_numeric(self):
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
- assert df.to_sql("test_nan", self.conn, index=False) == 3
+ assert df.to_sql(name="test_nan", con=self.conn, index=False) == 3
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
@@ -2379,7 +2384,7 @@ def test_nan_fullcolumn(self):
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
- assert df.to_sql("test_nan", self.conn, index=False) == 3
+ assert df.to_sql(name="test_nan", con=self.conn, index=False) == 3
# NaNs are coming back as None
df.loc[2, "B"] = None
@@ -2441,25 +2446,27 @@ def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
- assert df.to_sql("dtype_test", self.conn) == 2
- assert df.to_sql("dtype_test2", self.conn, dtype={"B": TEXT}) == 2
+ assert df.to_sql(name="dtype_test", con=self.conn) == 2
+ assert df.to_sql(name="dtype_test2", con=self.conn, dtype={"B": TEXT}) == 2
meta = MetaData()
meta.reflect(bind=self.conn)
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
- df.to_sql("error", self.conn, dtype={"B": str})
+ df.to_sql(name="error", con=self.conn, dtype={"B": str})
# GH9083
- assert df.to_sql("dtype_test3", self.conn, dtype={"B": String(10)}) == 2
+ assert (
+ df.to_sql(name="dtype_test3", con=self.conn, dtype={"B": String(10)}) == 2
+ )
meta.reflect(bind=self.conn)
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, String)
assert sqltype.length == 10
# single dtype
- assert df.to_sql("single_dtype_test", self.conn, dtype=TEXT) == 2
+ assert df.to_sql(name="single_dtype_test", con=self.conn, dtype=TEXT) == 2
meta.reflect(bind=self.conn)
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
@@ -2484,7 +2491,7 @@ def test_notna_dtype(self):
df = DataFrame(cols)
tbl = "notna_dtype_test"
- assert df.to_sql(tbl, self.conn) == 2
+ assert df.to_sql(name=tbl, con=self.conn) == 2
_ = sql.read_sql_table(tbl, self.conn)
meta = MetaData()
meta.reflect(bind=self.conn)
@@ -2517,8 +2524,8 @@ def test_double_precision(self):
assert (
df.to_sql(
- "test_dtypes",
- self.conn,
+ name="test_dtypes",
+ con=self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": Float(precision=23)},
@@ -2567,7 +2574,9 @@ def main(connectable):
test_connectable(connectable)
assert (
- DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
+ DataFrame({"test_foo_data": [0, 1, 2]}).to_sql(
+ name="test_foo_data", con=self.conn
+ )
== 3
)
main(self.conn)
@@ -2597,9 +2606,9 @@ def test_to_sql_with_negative_npinf(self, input, request):
msg = "inf cannot be used with MySQL"
with pytest.raises(ValueError, match=msg):
- df.to_sql("foobar", self.conn, index=False)
+ df.to_sql(name="foobar", con=self.conn, index=False)
else:
- assert df.to_sql("foobar", self.conn, index=False) == 1
+ assert df.to_sql(name="foobar", con=self.conn, index=False) == 1
res = sql.read_sql_table("foobar", self.conn)
tm.assert_equal(df, res)
@@ -2672,7 +2681,7 @@ def test_read_sql_dtype_backend(self, string_storage, func, dtype_backend):
# GH#50048
table = "test"
df = self.dtype_backend_data()
- df.to_sql(table, self.conn, index=False, if_exists="replace")
+ df.to_sql(name=table, con=self.conn, index=False, if_exists="replace")
with pd.option_context("mode.string_storage", string_storage):
result = getattr(pd, func)(
@@ -2684,7 +2693,7 @@ def test_read_sql_dtype_backend(self, string_storage, func, dtype_backend):
with pd.option_context("mode.string_storage", string_storage):
iterator = getattr(pd, func)(
f"Select * from {table}",
- self.conn,
+ con=self.conn,
dtype_backend=dtype_backend,
chunksize=3,
)
@@ -2697,7 +2706,7 @@ def test_read_sql_dtype_backend_table(self, string_storage, func, dtype_backend)
# GH#50048
table = "test"
df = self.dtype_backend_data()
- df.to_sql(table, self.conn, index=False, if_exists="replace")
+ df.to_sql(name=table, con=self.conn, index=False, if_exists="replace")
with pd.option_context("mode.string_storage", string_storage):
result = getattr(pd, func)(table, self.conn, dtype_backend=dtype_backend)
@@ -2719,7 +2728,7 @@ def test_read_sql_dtype_backend_table(self, string_storage, func, dtype_backend)
def test_read_sql_invalid_dtype_backend_table(self, func):
table = "test"
df = self.dtype_backend_data()
- df.to_sql(table, self.conn, index=False, if_exists="replace")
+ df.to_sql(name=table, con=self.conn, index=False, if_exists="replace")
msg = (
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
@@ -2784,7 +2793,7 @@ def test_chunksize_empty_dtypes(self):
dtypes = {"a": "int64", "b": "object"}
df = DataFrame(columns=["a", "b"]).astype(dtypes)
expected = df.copy()
- df.to_sql("test", self.conn, index=False, if_exists="replace")
+ df.to_sql(name="test", con=self.conn, index=False, if_exists="replace")
for result in read_sql_query(
"SELECT * FROM test",
@@ -2800,7 +2809,7 @@ def test_read_sql_dtype(self, func, dtype_backend):
# GH#50797
table = "test"
df = DataFrame({"a": [1, 2, 3], "b": 5})
- df.to_sql(table, self.conn, index=False, if_exists="replace")
+ df.to_sql(name=table, con=self.conn, index=False, if_exists="replace")
result = getattr(pd, func)(
f"Select * from {table}",
@@ -2837,6 +2846,17 @@ def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
+ def test_keyword_deprecation(self):
+ # GH 54397
+ msg = (
+ "tarting with pandas version 3.0 all arguments of to_sql except for the "
+ "argument 'name' will be keyword-only."
+ )
+ df = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 1, "B": 2, "C": 3}])
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.to_sql("example", self.conn)
+
def test_default_type_conversion(self):
df = sql.read_sql_table("types", self.conn)
@@ -2861,7 +2881,7 @@ def test_default_date_load(self):
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
- assert df.to_sql("test_bigintwarning", self.conn, index=False) == 2
+ assert df.to_sql(name="test_bigintwarning", con=self.conn, index=False) == 2
with tm.assert_produces_warning(None):
sql.read_sql_table("test_bigintwarning", self.conn)
@@ -2899,7 +2919,9 @@ class Test(BaseModel):
with Session() as session:
df = DataFrame({"id": [0, 1], "string_column": ["hello", "world"]})
assert (
- df.to_sql("test_frame", con=self.conn, index=False, if_exists="replace")
+ df.to_sql(
+ name="test_frame", con=self.conn, index=False, if_exists="replace"
+ )
== 2
)
session.commit()
@@ -2927,7 +2949,7 @@ def test_read_sql_string_inference(self):
pa = pytest.importorskip("pyarrow")
table = "test"
df = DataFrame({"a": ["x", "y"]})
- df.to_sql(table, self.conn, index=False, if_exists="replace")
+ df.to_sql(table, con=self.conn, index=False, if_exists="replace")
with pd.option_context("future.infer_string", True):
result = read_sql_table(table, self.conn)
@@ -3010,15 +3032,21 @@ def test_schema_support(self):
self.conn.exec_driver_sql("CREATE SCHEMA other;")
# write dataframe to different schema's
- assert df.to_sql("test_schema_public", self.conn, index=False) == 2
+ assert df.to_sql(name="test_schema_public", con=self.conn, index=False) == 2
assert (
df.to_sql(
- "test_schema_public_explicit", self.conn, index=False, schema="public"
+ name="test_schema_public_explicit",
+ con=self.conn,
+ index=False,
+ schema="public",
)
== 2
)
assert (
- df.to_sql("test_schema_other", self.conn, index=False, schema="other") == 2
+ df.to_sql(
+ name="test_schema_other", con=self.conn, index=False, schema="other"
+ )
+ == 2
)
# read dataframes back in
@@ -3045,19 +3073,22 @@ def test_schema_support(self):
# write dataframe with different if_exists options
assert (
- df.to_sql("test_schema_other", self.conn, schema="other", index=False) == 2
+ df.to_sql(
+ name="test_schema_other", con=self.conn, schema="other", index=False
+ )
+ == 2
)
df.to_sql(
- "test_schema_other",
- self.conn,
+ name="test_schema_other",
+ con=self.conn,
schema="other",
index=False,
if_exists="replace",
)
assert (
df.to_sql(
- "test_schema_other",
- self.conn,
+ name="test_schema_other",
+ con=self.conn,
schema="other",
index=False,
if_exists="append",
@@ -3176,7 +3207,7 @@ def test_execute_sql(self):
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
- assert df.to_sql("test_date", self.conn, index=False) == 2
+ assert df.to_sql(name="test_date", con=self.conn, index=False) == 2
res = read_sql_query("SELECT * FROM test_date", self.conn)
if self.flavor == "sqlite":
# comes back as strings
@@ -3195,7 +3226,7 @@ def test_datetime_time(self, tz_aware):
df = DataFrame(tz_times, columns=["a"])
- assert df.to_sql("test_time", self.conn, index=False) == 2
+ assert df.to_sql(name="test_time", con=self.conn, index=False) == 2
res = read_sql_query("SELECT * FROM test_time", self.conn)
if self.flavor == "sqlite":
# comes back as strings
@@ -3233,8 +3264,8 @@ def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
- assert df.to_sql("dtype_test", self.conn) == 2
- assert df.to_sql("dtype_test2", self.conn, dtype={"B": "STRING"}) == 2
+ assert df.to_sql(name="dtype_test", con=self.conn) == 2
+ assert df.to_sql(name="dtype_test2", con=self.conn, dtype={"B": "STRING"}) == 2
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER"
@@ -3242,10 +3273,10 @@ def test_dtype(self):
assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING"
msg = r"B \(<class 'bool'>\) not a string"
with pytest.raises(ValueError, match=msg):
- df.to_sql("error", self.conn, dtype={"B": bool})
+ df.to_sql(name="error", con=self.conn, dtype={"B": bool})
# single dtype
- assert df.to_sql("single_dtype_test", self.conn, dtype="STRING") == 2
+ assert df.to_sql(name="single_dtype_test", con=self.conn, dtype="STRING") == 2
assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING"
assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING"
@@ -3262,7 +3293,7 @@ def test_notna_dtype(self):
df = DataFrame(cols)
tbl = "notna_dtype_test"
- assert df.to_sql(tbl, self.conn) == 2
+ assert df.to_sql(name=tbl, con=self.conn) == 2
assert self._get_sqlite_column_type(tbl, "Bool") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Date") == "TIMESTAMP"
@@ -3275,7 +3306,7 @@ def test_illegal_names(self):
msg = "Empty table or column name specified"
with pytest.raises(ValueError, match=msg):
- df.to_sql("", self.conn)
+ df.to_sql(name="", con=self.conn)
for ndx, weird_name in enumerate(
[
@@ -3291,12 +3322,12 @@ def test_illegal_names(self):
"\xe9",
]
):
- assert df.to_sql(weird_name, self.conn) == 2
+ assert df.to_sql(name=weird_name, con=self.conn) == 2
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name])
c_tbl = f"test_weird_col_name{ndx:d}"
- assert df2.to_sql(c_tbl, self.conn) == 2
+ assert df2.to_sql(name=c_tbl, con=self.conn) == 2
sql.table_exists(c_tbl, self.conn)
| - [X] xref #54229
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/54397 | 2023-08-04T01:22:12Z | 2023-08-10T00:44:42Z | 2023-08-10T00:44:42Z | 2024-02-17T17:20:08Z |
CLN/DOC: DataFrame.count remove references to "level" | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 3adb0597cf563..96fe90ce08c5f 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -512,19 +512,10 @@ def setup(self, axis):
self.df_mixed = self.df.copy()
self.df_mixed["foo"] = "bar"
- self.df.index = MultiIndex.from_arrays([self.df.index, self.df.index])
- self.df.columns = MultiIndex.from_arrays([self.df.columns, self.df.columns])
- self.df_mixed.index = MultiIndex.from_arrays(
- [self.df_mixed.index, self.df_mixed.index]
- )
- self.df_mixed.columns = MultiIndex.from_arrays(
- [self.df_mixed.columns, self.df_mixed.columns]
- )
-
- def time_count_level_multi(self, axis):
+ def time_count(self, axis):
self.df.count(axis=axis)
- def time_count_level_mixed_dtypes_multi(self, axis):
+ def time_count_mixed_dtypes(self, axis):
self.df_mixed.count(axis=axis)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3b2fe1699e996..e3f56fc6421f8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -10991,9 +10991,8 @@ def count(self, axis: Axis = 0, numeric_only: bool = False):
Returns
-------
- Series or DataFrame
+ Series
For each column/row the number of non-NA/null entries.
- If `level` is specified returns a `DataFrame`.
See Also
--------
@@ -11051,7 +11050,7 @@ def count(self, axis: Axis = 0, numeric_only: bool = False):
else:
result = notna(frame).sum(axis=axis)
- return result.astype("int64").__finalize__(self, method="count")
+ return result.astype("int64", copy=False).__finalize__(self, method="count")
def _reduce(
self,
| 1. Removed mention of `level` in `DataFrame.count` docstring as it has been deprecated and removed.
2. Removed "level" from `frame_methods.Count` benchmark name and the `MultiIndex` since it was only relevant when using `level`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/54395 | 2023-08-04T00:34:35Z | 2023-08-04T17:07:46Z | 2023-08-04T17:07:46Z | 2023-09-06T00:54:35Z |
REF: simplify extension reduction tests | diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py
index 0e9a35b9f07e8..7d76838998540 100644
--- a/pandas/tests/extension/base/__init__.py
+++ b/pandas/tests/extension/base/__init__.py
@@ -60,6 +60,7 @@ class TestMyDtype(BaseDtypeTests):
BaseBooleanReduceTests,
BaseNoReduceTests,
BaseNumericReduceTests,
+ BaseReduceTests,
)
from pandas.tests.extension.base.reshaping import BaseReshapingTests # noqa: F401
from pandas.tests.extension.base.setitem import BaseSetitemTests # noqa: F401
diff --git a/pandas/tests/extension/base/reduce.py b/pandas/tests/extension/base/reduce.py
index b7edfb860549c..00919b16a2600 100644
--- a/pandas/tests/extension/base/reduce.py
+++ b/pandas/tests/extension/base/reduce.py
@@ -15,6 +15,10 @@ class BaseReduceTests(BaseExtensionTests):
make sense for numeric/boolean operations.
"""
+ def _supports_reduction(self, obj, op_name: str) -> bool:
+ # Specify if we expect this reduction to succeed.
+ return False
+
def check_reduce(self, s, op_name, skipna):
# We perform the same operation on the np.float64 data and check
# that the results match. Override if you need to cast to something
@@ -66,47 +70,42 @@ def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
tm.assert_extension_array_equal(result1, expected)
-
-class BaseNoReduceTests(BaseReduceTests):
- """we don't define any reductions"""
-
- @pytest.mark.parametrize("skipna", [True, False])
- def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
- op_name = all_numeric_reductions
- s = pd.Series(data)
-
- msg = (
- "[Cc]annot perform|Categorical is not ordered for operation|"
- "does not support reduction|"
- )
-
- with pytest.raises(TypeError, match=msg):
- getattr(s, op_name)(skipna=skipna)
-
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
op_name = all_boolean_reductions
s = pd.Series(data)
- msg = (
- "[Cc]annot perform|Categorical is not ordered for operation|"
- "does not support reduction|"
- )
+ if not self._supports_reduction(s, op_name):
+ msg = (
+ "[Cc]annot perform|Categorical is not ordered for operation|"
+ "does not support reduction|"
+ )
- with pytest.raises(TypeError, match=msg):
- getattr(s, op_name)(skipna=skipna)
+ with pytest.raises(TypeError, match=msg):
+ getattr(s, op_name)(skipna=skipna)
+ else:
+ self.check_reduce(s, op_name, skipna)
-class BaseNumericReduceTests(BaseReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
- def test_reduce_series(self, data, all_numeric_reductions, skipna):
+ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
s = pd.Series(data)
- # min/max with empty produce numpy warnings
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", RuntimeWarning)
- self.check_reduce(s, op_name, skipna)
+ if not self._supports_reduction(s, op_name):
+ msg = (
+ "[Cc]annot perform|Categorical is not ordered for operation|"
+ "does not support reduction|"
+ )
+
+ with pytest.raises(TypeError, match=msg):
+ getattr(s, op_name)(skipna=skipna)
+
+ else:
+ # min/max with empty produce numpy warnings
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", RuntimeWarning)
+ self.check_reduce(s, op_name, skipna)
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
@@ -118,12 +117,28 @@ def test_reduce_frame(self, data, all_numeric_reductions, skipna):
if op_name in ["count", "kurt", "sem"]:
pytest.skip(f"{op_name} not an array method")
+ if not self._supports_reduction(s, op_name):
+ pytest.skip(f"Reduction {op_name} not supported for this dtype")
+
self.check_reduce_frame(s, op_name, skipna)
+# TODO: deprecate BaseNoReduceTests, BaseNumericReduceTests, BaseBooleanReduceTests
+class BaseNoReduceTests(BaseReduceTests):
+ """we don't define any reductions"""
+
+
+class BaseNumericReduceTests(BaseReduceTests):
+ # For backward compatibility only, this only runs the numeric reductions
+ def _supports_reduction(self, obj, op_name: str) -> bool:
+ if op_name in ["any", "all"]:
+ pytest.skip("These are tested in BaseBooleanReduceTests")
+ return True
+
+
class BaseBooleanReduceTests(BaseReduceTests):
- @pytest.mark.parametrize("skipna", [True, False])
- def test_reduce_series(self, data, all_boolean_reductions, skipna):
- op_name = all_boolean_reductions
- s = pd.Series(data)
- self.check_reduce(s, op_name, skipna)
+ # For backward compatibility only, this only runs the numeric reductions
+ def _supports_reduction(self, obj, op_name: str) -> bool:
+ if op_name not in ["any", "all"]:
+ pytest.skip("These are tested in BaseNumericReduceTests")
+ return True
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 944ed0dbff66e..b2dd910fd0d2d 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -146,6 +146,9 @@ def test_fillna_series_method(self, data_missing, fillna_method):
class Reduce:
+ def _supports_reduction(self, obj, op_name: str) -> bool:
+ return True
+
def check_reduce(self, s, op_name, skipna):
if op_name in ["median", "skew", "kurt", "sem"]:
msg = r"decimal does not support the .* operation"
@@ -183,7 +186,7 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
tm.assert_series_equal(result, expected)
-class TestNumericReduce(Reduce, base.BaseNumericReduceTests):
+class TestReduce(Reduce, base.BaseReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
@@ -194,10 +197,6 @@ def test_reduce_frame(self, data, all_numeric_reductions, skipna):
return super().test_reduce_frame(data, all_numeric_reductions, skipna)
-class TestBooleanReduce(Reduce, base.BaseBooleanReduceTests):
- pass
-
-
class TestMethods(base.BaseMethodsTests):
def test_fillna_copy_frame(self, data_missing, using_copy_on_write):
warn = FutureWarning if not using_copy_on_write else None
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 8a571d9295e1f..fa3314e36c974 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -175,7 +175,7 @@ def test_fillna_frame(self):
unhashable = pytest.mark.xfail(reason="Unhashable")
-class TestReduce(base.BaseNoReduceTests):
+class TestReduce(base.BaseReduceTests):
pass
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 655ca9cc39c58..eae97fceab2f7 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -408,7 +408,10 @@ def test_accumulate_series(self, data, all_numeric_accumulations, skipna, reques
self.check_accumulate(ser, op_name, skipna)
-class TestBaseNumericReduce(base.BaseNumericReduceTests):
+class TestReduce(base.BaseReduceTests):
+ def _supports_reduction(self, obj, op_name: str) -> bool:
+ return True
+
def check_reduce(self, ser, op_name, skipna):
pa_dtype = ser.dtype.pyarrow_dtype
if op_name == "count":
@@ -429,7 +432,7 @@ def check_reduce(self, ser, op_name, skipna):
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("skipna", [True, False])
- def test_reduce_series(self, data, all_numeric_reductions, skipna, request):
+ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
pa_dtype = data.dtype.pyarrow_dtype
opname = all_numeric_reductions
@@ -497,44 +500,10 @@ def test_reduce_series(self, data, all_numeric_reductions, skipna, request):
"median",
}:
request.node.add_marker(xfail_mark)
- super().test_reduce_series(data, all_numeric_reductions, skipna)
-
- def _get_expected_reduction_dtype(self, arr, op_name: str):
- if op_name in ["max", "min"]:
- cmp_dtype = arr.dtype
- elif arr.dtype.name == "decimal128(7, 3)[pyarrow]":
- if op_name not in ["median", "var", "std"]:
- cmp_dtype = arr.dtype
- else:
- cmp_dtype = "float64[pyarrow]"
- elif op_name in ["median", "var", "std", "mean", "skew"]:
- cmp_dtype = "float64[pyarrow]"
- else:
- cmp_dtype = {
- "i": "int64[pyarrow]",
- "u": "uint64[pyarrow]",
- "f": "float64[pyarrow]",
- }[arr.dtype.kind]
- return cmp_dtype
+ super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
@pytest.mark.parametrize("skipna", [True, False])
- def test_reduce_frame(self, data, all_numeric_reductions, skipna):
- op_name = all_numeric_reductions
- if op_name == "skew":
- assert not hasattr(data, op_name)
- return
- return super().test_reduce_frame(data, all_numeric_reductions, skipna)
-
- @pytest.mark.parametrize("typ", ["int64", "uint64", "float64"])
- def test_median_not_approximate(self, typ):
- # GH 52679
- result = pd.Series([1, 2], dtype=f"{typ}[pyarrow]").median()
- assert result == 1.5
-
-
-class TestBaseBooleanReduce(base.BaseBooleanReduceTests):
- @pytest.mark.parametrize("skipna", [True, False])
- def test_reduce_series(
+ def test_reduce_series_boolean(
self, data, all_boolean_reductions, skipna, na_value, request
):
pa_dtype = data.dtype.pyarrow_dtype
@@ -566,6 +535,38 @@ def test_reduce_series(
result = getattr(ser, op_name)(skipna=skipna)
assert result is (op_name == "any")
+ def _get_expected_reduction_dtype(self, arr, op_name: str):
+ if op_name in ["max", "min"]:
+ cmp_dtype = arr.dtype
+ elif arr.dtype.name == "decimal128(7, 3)[pyarrow]":
+ if op_name not in ["median", "var", "std"]:
+ cmp_dtype = arr.dtype
+ else:
+ cmp_dtype = "float64[pyarrow]"
+ elif op_name in ["median", "var", "std", "mean", "skew"]:
+ cmp_dtype = "float64[pyarrow]"
+ else:
+ cmp_dtype = {
+ "i": "int64[pyarrow]",
+ "u": "uint64[pyarrow]",
+ "f": "float64[pyarrow]",
+ }[arr.dtype.kind]
+ return cmp_dtype
+
+ @pytest.mark.parametrize("skipna", [True, False])
+ def test_reduce_frame(self, data, all_numeric_reductions, skipna):
+ op_name = all_numeric_reductions
+ if op_name == "skew":
+ assert not hasattr(data, op_name)
+ return
+ return super().test_reduce_frame(data, all_numeric_reductions, skipna)
+
+ @pytest.mark.parametrize("typ", ["int64", "uint64", "float64"])
+ def test_median_not_approximate(self, typ):
+ # GH 52679
+ result = pd.Series([1, 2], dtype=f"{typ}[pyarrow]").median()
+ assert result == 1.5
+
class TestBaseGroupby(base.BaseGroupbyTests):
def test_in_numeric_groupby(self, data_for_grouping):
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index e5f6da5371742..229f1f4ec028c 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -220,7 +220,10 @@ def test_groupby_sum_mincount(self, data_for_grouping, min_count):
tm.assert_frame_equal(result, expected)
-class TestNumericReduce(base.BaseNumericReduceTests):
+class TestReduce(base.BaseReduceTests):
+ def _supports_reduction(self, obj, op_name: str) -> bool:
+ return True
+
def check_reduce(self, s, op_name, skipna):
if op_name == "count":
result = getattr(s, op_name)()
@@ -248,10 +251,6 @@ def _get_expected_reduction_dtype(self, arr, op_name: str):
return cmp_dtype
-class TestBooleanReduce(base.BaseBooleanReduceTests):
- pass
-
-
class TestPrinting(base.BasePrintingTests):
pass
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index fc4dfe3af3bca..e24d29ea53908 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -152,7 +152,7 @@ class TestMissing(base.BaseMissingTests):
pass
-class TestReduce(base.BaseNoReduceTests):
+class TestReduce(base.BaseReduceTests):
pass
diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py
index 4ef303289ee5c..b4870d2f1fe2f 100644
--- a/pandas/tests/extension/test_interval.py
+++ b/pandas/tests/extension/test_interval.py
@@ -105,7 +105,7 @@ class TestInterface(BaseInterval, base.BaseInterfaceTests):
pass
-class TestReduce(base.BaseNoReduceTests):
+class TestReduce(base.BaseReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
diff --git a/pandas/tests/extension/test_masked_numeric.py b/pandas/tests/extension/test_masked_numeric.py
index b171797dd6359..cb3792d37831d 100644
--- a/pandas/tests/extension/test_masked_numeric.py
+++ b/pandas/tests/extension/test_masked_numeric.py
@@ -227,7 +227,12 @@ class TestGroupby(base.BaseGroupbyTests):
pass
-class TestNumericReduce(base.BaseNumericReduceTests):
+class TestReduce(base.BaseReduceTests):
+ def _supports_reduction(self, obj, op_name: str) -> bool:
+ if op_name in ["any", "all"]:
+ pytest.skip(reason="Tested in tests/reductions/test_reductions.py")
+ return True
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
# overwrite to ensure pd.NA is tested instead of np.nan
# https://github.com/pandas-dev/pandas/issues/30958
@@ -266,11 +271,6 @@ def _get_expected_reduction_dtype(self, arr, op_name: str):
return cmp_dtype
-@pytest.mark.skip(reason="Tested in tests/reductions/test_reductions.py")
-class TestBooleanReduce(base.BaseBooleanReduceTests):
- pass
-
-
class TestAccumulation(base.BaseAccumulateTests):
def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:
return True
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index db191954c8d59..14f98e4115e4e 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -303,11 +303,22 @@ class TestPrinting(BaseNumPyTests, base.BasePrintingTests):
pass
-class TestNumericReduce(BaseNumPyTests, base.BaseNumericReduceTests):
+class TestReduce(BaseNumPyTests, base.BaseReduceTests):
+ def _supports_reduction(self, obj, op_name: str) -> bool:
+ if tm.get_dtype(obj).kind == "O":
+ return op_name in ["sum", "min", "max", "any", "all"]
+ return True
+
def check_reduce(self, s, op_name, skipna):
- result = getattr(s, op_name)(skipna=skipna)
+ res_op = getattr(s, op_name)
# avoid coercing int -> float. Just cast to the actual numpy type.
- expected = getattr(s.astype(s.dtype._dtype), op_name)(skipna=skipna)
+ exp_op = getattr(s.astype(s.dtype._dtype), op_name)
+ if op_name == "count":
+ result = res_op()
+ expected = exp_op()
+ else:
+ result = res_op(skipna=skipna)
+ expected = exp_op(skipna=skipna)
tm.assert_almost_equal(result, expected)
@pytest.mark.skip("tests not written yet")
@@ -315,15 +326,6 @@ def check_reduce(self, s, op_name, skipna):
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
pass
- @pytest.mark.parametrize("skipna", [True, False])
- def test_reduce_series(self, data, all_boolean_reductions, skipna):
- super().test_reduce_series(data, all_boolean_reductions, skipna)
-
-
-@skip_nested
-class TestBooleanReduce(BaseNumPyTests, base.BaseBooleanReduceTests):
- pass
-
class TestMissing(BaseNumPyTests, base.BaseMissingTests):
@skip_nested
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index d42d79da17f4e..7256ea5837bbf 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -168,7 +168,7 @@ def test_fillna_no_op_returns_copy(self, data):
tm.assert_extension_array_equal(result, data)
-class TestNoReduce(base.BaseNoReduceTests):
+class TestReduce(base.BaseReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
| - [x] closes #44742 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Will clean up the pyarrow tests usage in a follow-up.
Didn't remove the no-longer-used test classes since downstream projects might be using them. Are we OK just ripping those out or does that require a deprecation cycle? | https://api.github.com/repos/pandas-dev/pandas/pulls/54394 | 2023-08-03T23:15:01Z | 2023-08-04T23:31:58Z | 2023-08-04T23:31:58Z | 2023-08-05T01:03:12Z |
REF: de-duplicate check_reduce_frame | diff --git a/pandas/tests/extension/base/reduce.py b/pandas/tests/extension/base/reduce.py
index 8f3c919cb0957..b7edfb860549c 100644
--- a/pandas/tests/extension/base/reduce.py
+++ b/pandas/tests/extension/base/reduce.py
@@ -1,3 +1,4 @@
+from typing import final
import warnings
import pytest
@@ -15,6 +16,9 @@ class BaseReduceTests(BaseExtensionTests):
"""
def check_reduce(self, s, op_name, skipna):
+ # We perform the same operation on the np.float64 data and check
+ # that the results match. Override if you need to cast to something
+ # other than float64.
res_op = getattr(s, op_name)
exp_op = getattr(s.astype("float64"), op_name)
if op_name == "count":
@@ -25,6 +29,43 @@ def check_reduce(self, s, op_name, skipna):
expected = exp_op(skipna=skipna)
tm.assert_almost_equal(result, expected)
+ def _get_expected_reduction_dtype(self, arr, op_name: str):
+ # Find the expected dtype when the given reduction is done on a DataFrame
+ # column with this array. The default assumes float64-like behavior,
+ # i.e. retains the dtype.
+ return arr.dtype
+
+ # We anticipate that authors should not need to override check_reduce_frame,
+ # but should be able to do any necessary overriding in
+ # _get_expected_reduction_dtype. If you have a use case where this
+ # does not hold, please let us know at github.com/pandas-dev/pandas/issues.
+ @final
+ def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
+ # Check that the 2D reduction done in a DataFrame reduction "looks like"
+ # a wrapped version of the 1D reduction done by Series.
+ arr = ser.array
+ df = pd.DataFrame({"a": arr})
+
+ kwargs = {"ddof": 1} if op_name in ["var", "std"] else {}
+
+ cmp_dtype = self._get_expected_reduction_dtype(arr, op_name)
+
+ # The DataFrame method just calls arr._reduce with keepdims=True,
+ # so this first check is perfunctory.
+ result1 = arr._reduce(op_name, skipna=skipna, keepdims=True, **kwargs)
+ result2 = getattr(df, op_name)(skipna=skipna, **kwargs).array
+ tm.assert_extension_array_equal(result1, result2)
+
+ # Check that the 2D reduction looks like a wrapped version of the
+ # 1D reduction
+ if not skipna and ser.isna().any():
+ expected = pd.array([pd.NA], dtype=cmp_dtype)
+ else:
+ exp_value = getattr(ser.dropna(), op_name)()
+ expected = pd.array([exp_value], dtype=cmp_dtype)
+
+ tm.assert_extension_array_equal(result1, expected)
+
class BaseNoReduceTests(BaseReduceTests):
"""we don't define any reductions"""
@@ -71,9 +112,12 @@ def test_reduce_series(self, data, all_numeric_reductions, skipna):
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
s = pd.Series(data)
- if not is_numeric_dtype(s):
+ if not is_numeric_dtype(s.dtype):
pytest.skip("not numeric dtype")
+ if op_name in ["count", "kurt", "sem"]:
+ pytest.skip(f"{op_name} not an array method")
+
self.check_reduce_frame(s, op_name, skipna)
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index b2d47ec7d8f32..944ed0dbff66e 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -160,27 +160,6 @@ def check_reduce(self, s, op_name, skipna):
expected = getattr(np.asarray(s), op_name)()
tm.assert_almost_equal(result, expected)
- def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
- arr = ser.array
- df = pd.DataFrame({"a": arr})
-
- if op_name in ["count", "kurt", "sem", "skew", "median"]:
- assert not hasattr(arr, op_name)
- pytest.skip(f"{op_name} not an array method")
-
- result1 = arr._reduce(op_name, skipna=skipna, keepdims=True)
- result2 = getattr(df, op_name)(skipna=skipna).array
-
- tm.assert_extension_array_equal(result1, result2)
-
- if not skipna and ser.isna().any():
- expected = DecimalArray([pd.NA])
- else:
- exp_value = getattr(ser.dropna(), op_name)()
- expected = DecimalArray([exp_value])
-
- tm.assert_extension_array_equal(result1, expected)
-
def test_reduction_without_keepdims(self):
# GH52788
# test _reduce without keepdims
@@ -205,7 +184,14 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
class TestNumericReduce(Reduce, base.BaseNumericReduceTests):
- pass
+ @pytest.mark.parametrize("skipna", [True, False])
+ def test_reduce_frame(self, data, all_numeric_reductions, skipna):
+ op_name = all_numeric_reductions
+ if op_name in ["skew", "median"]:
+ assert not hasattr(data, op_name)
+ pytest.skip(f"{op_name} not an array method")
+
+ return super().test_reduce_frame(data, all_numeric_reductions, skipna)
class TestBooleanReduce(Reduce, base.BaseBooleanReduceTests):
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 7c4ea2d4d7b88..655ca9cc39c58 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -499,15 +499,7 @@ def test_reduce_series(self, data, all_numeric_reductions, skipna, request):
request.node.add_marker(xfail_mark)
super().test_reduce_series(data, all_numeric_reductions, skipna)
- def check_reduce_frame(self, ser, op_name, skipna):
- arr = ser.array
-
- if op_name in ["count", "kurt", "sem", "skew"]:
- assert not hasattr(arr, op_name)
- return
-
- kwargs = {"ddof": 1} if op_name in ["var", "std"] else {}
-
+ def _get_expected_reduction_dtype(self, arr, op_name: str):
if op_name in ["max", "min"]:
cmp_dtype = arr.dtype
elif arr.dtype.name == "decimal128(7, 3)[pyarrow]":
@@ -523,15 +515,15 @@ def check_reduce_frame(self, ser, op_name, skipna):
"u": "uint64[pyarrow]",
"f": "float64[pyarrow]",
}[arr.dtype.kind]
- result = arr._reduce(op_name, skipna=skipna, keepdims=True, **kwargs)
+ return cmp_dtype
- if not skipna and ser.isna().any():
- expected = pd.array([pd.NA], dtype=cmp_dtype)
- else:
- exp_value = getattr(ser.dropna().astype(cmp_dtype), op_name)(**kwargs)
- expected = pd.array([exp_value], dtype=cmp_dtype)
-
- tm.assert_extension_array_equal(result, expected)
+ @pytest.mark.parametrize("skipna", [True, False])
+ def test_reduce_frame(self, data, all_numeric_reductions, skipna):
+ op_name = all_numeric_reductions
+ if op_name == "skew":
+ assert not hasattr(data, op_name)
+ return
+ return super().test_reduce_frame(data, all_numeric_reductions, skipna)
@pytest.mark.parametrize("typ", ["int64", "uint64", "float64"])
def test_median_not_approximate(self, typ):
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index 3d9798169c736..e5f6da5371742 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -235,13 +235,7 @@ def check_reduce(self, s, op_name, skipna):
expected = bool(expected)
tm.assert_almost_equal(result, expected)
- def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
- arr = ser.array
-
- if op_name in ["count", "kurt", "sem"]:
- assert not hasattr(arr, op_name)
- pytest.skip(f"{op_name} not an array method")
-
+ def _get_expected_reduction_dtype(self, arr, op_name: str):
if op_name in ["mean", "median", "var", "std", "skew"]:
cmp_dtype = "Float64"
elif op_name in ["min", "max"]:
@@ -251,14 +245,7 @@ def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
cmp_dtype = "Int32" if is_windows_or_32bit else "Int64"
else:
raise TypeError("not supposed to reach this")
-
- result = arr._reduce(op_name, skipna=skipna, keepdims=True)
- if not skipna and ser.isna().any():
- expected = pd.array([pd.NA], dtype=cmp_dtype)
- else:
- exp_value = getattr(ser.dropna().astype(cmp_dtype), op_name)()
- expected = pd.array([exp_value], dtype=cmp_dtype)
- tm.assert_extension_array_equal(result, expected)
+ return cmp_dtype
class TestBooleanReduce(base.BaseBooleanReduceTests):
diff --git a/pandas/tests/extension/test_masked_numeric.py b/pandas/tests/extension/test_masked_numeric.py
index fc22ccabd7104..b171797dd6359 100644
--- a/pandas/tests/extension/test_masked_numeric.py
+++ b/pandas/tests/extension/test_masked_numeric.py
@@ -39,6 +39,8 @@
)
from pandas.tests.extension import base
+is_windows_or_32bit = is_platform_windows() or not IS64
+
pytestmark = [
pytest.mark.filterwarnings(
"ignore:invalid value encountered in divide:RuntimeWarning"
@@ -246,16 +248,7 @@ def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
expected = pd.NA
tm.assert_almost_equal(result, expected)
- def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
- if op_name in ["count", "kurt", "sem"]:
- assert not hasattr(ser.array, op_name)
- pytest.skip(f"{op_name} not an array method")
-
- arr = ser.array
- df = pd.DataFrame({"a": arr})
-
- is_windows_or_32bit = is_platform_windows() or not IS64
-
+ def _get_expected_reduction_dtype(self, arr, op_name: str):
if tm.is_float_dtype(arr.dtype):
cmp_dtype = arr.dtype.name
elif op_name in ["mean", "median", "var", "std", "skew"]:
@@ -270,18 +263,7 @@ def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
cmp_dtype = "UInt32" if is_windows_or_32bit else "UInt64"
else:
raise TypeError("not supposed to reach this")
-
- if not skipna and ser.isna().any():
- expected = pd.array([pd.NA], dtype=cmp_dtype)
- else:
- exp_value = getattr(ser.dropna().astype(cmp_dtype), op_name)()
- expected = pd.array([exp_value], dtype=cmp_dtype)
-
- result1 = arr._reduce(op_name, skipna=skipna, keepdims=True)
- result2 = getattr(df, op_name)(skipna=skipna).array
-
- tm.assert_extension_array_equal(result1, result2)
- tm.assert_extension_array_equal(result2, expected)
+ return cmp_dtype
@pytest.mark.skip(reason="Tested in tests/reductions/test_reductions.py")
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index f4ff423ad485b..db191954c8d59 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -311,7 +311,8 @@ def check_reduce(self, s, op_name, skipna):
tm.assert_almost_equal(result, expected)
@pytest.mark.skip("tests not written yet")
- def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
+ @pytest.mark.parametrize("skipna", [True, False])
+ def test_reduce_frame(self, data, all_numeric_reductions, skipna):
pass
@pytest.mark.parametrize("skipna", [True, False])
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/54393 | 2023-08-03T22:09:02Z | 2023-08-04T00:24:06Z | 2023-08-04T00:24:06Z | 2023-08-04T00:40:13Z |
TST: Use pytest or assert_produces_warnings instead of catch_warnings | diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst
index df5b69c471b09..c06c0f8703d11 100644
--- a/doc/source/development/contributing_codebase.rst
+++ b/doc/source/development/contributing_codebase.rst
@@ -576,16 +576,6 @@ ignore the error.
def test_thing(self):
pass
-If you need finer-grained control, you can use Python's
-`warnings module <https://docs.python.org/3/library/warnings.html>`__
-to control whether a warning is ignored or raised at different places within
-a single test.
-
-.. code-block:: python
-
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", FutureWarning)
-
Testing an exception
^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index 120cd1cabdc45..ba052c6936dd9 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -293,6 +293,7 @@ def test_apply_mixed_dtype_corner_indexing():
tm.assert_series_equal(result, expected)
+@pytest.mark.filterwarnings("ignore::RuntimeWarning")
@pytest.mark.parametrize("ax", ["index", "columns"])
@pytest.mark.parametrize(
"func", [lambda x: x, lambda x: x.mean()], ids=["identity", "mean"]
@@ -303,9 +304,7 @@ def test_apply_empty_infer_type(ax, func, raw, axis):
df = DataFrame(**{ax: ["a", "b", "c"]})
with np.errstate(all="ignore"):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- test_res = func(np.array([], dtype="f8"))
+ test_res = func(np.array([], dtype="f8"))
is_reduction = not isinstance(test_res, np.ndarray)
result = df.apply(func, axis=axis, raw=raw)
diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py
index 5e6026a9348c4..a3d9de5e78afb 100644
--- a/pandas/tests/apply/test_invalid_arg.py
+++ b/pandas/tests/apply/test_invalid_arg.py
@@ -8,7 +8,6 @@
from itertools import chain
import re
-import warnings
import numpy as np
import pytest
@@ -298,6 +297,7 @@ def test_transform_and_agg_err_agg(axis, float_frame):
float_frame.agg(["max", "sqrt"], axis=axis)
+@pytest.mark.filterwarnings("ignore::FutureWarning") # GH53325
@pytest.mark.parametrize(
"func, msg",
[
@@ -312,10 +312,7 @@ def test_transform_and_agg_err_series(string_series, func, msg):
# we are trying to transform with an aggregator
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
- # GH53325
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", FutureWarning)
- string_series.agg(func)
+ string_series.agg(func)
@pytest.mark.parametrize("func", [["max", "min"], ["max", "sqrt"]])
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index e6c743c76a2c1..01217f87b6359 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -11,7 +11,6 @@
starmap,
)
import operator
-import warnings
import numpy as np
import pytest
@@ -1166,6 +1165,7 @@ def test_dt64arr_add_sub_parr(
)
assert_invalid_addsub_type(dtarr, parr, msg)
+ @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
@@ -1183,14 +1183,10 @@ def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixtu
"cannot subtract DatetimeArray from ndarray",
]
)
-
- with warnings.catch_warnings(record=True):
- # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
- # applied to Series or DatetimeIndex
- # we aren't testing that here, so ignore.
- warnings.simplefilter("ignore", PerformanceWarning)
-
- assert_invalid_addsub_type(obj1, obj2, msg=msg)
+ # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
+ # applied to Series or DatetimeIndex
+ # we aren't testing that here, so ignore.
+ assert_invalid_addsub_type(obj1, obj2, msg=msg)
# -------------------------------------------------------------
# Other invalid operations
@@ -1370,6 +1366,7 @@ def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
+ @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
@pytest.mark.parametrize(
"cls_and_kwargs",
[
@@ -1458,28 +1455,26 @@ def test_dt64arr_add_sub_DateOffsets(
offset_cls = getattr(pd.offsets, cls_name)
- with warnings.catch_warnings(record=True):
- # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
- # applied to Series or DatetimeIndex
- # we aren't testing that here, so ignore.
- warnings.simplefilter("ignore", PerformanceWarning)
+ # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
+ # applied to Series or DatetimeIndex
+ # we aren't testing that here, so ignore.
- offset = offset_cls(n, normalize=normalize, **kwargs)
+ offset = offset_cls(n, normalize=normalize, **kwargs)
- expected = DatetimeIndex([x + offset for x in vec_items])
- expected = tm.box_expected(expected, box_with_array)
- tm.assert_equal(expected, vec + offset)
+ expected = DatetimeIndex([x + offset for x in vec_items])
+ expected = tm.box_expected(expected, box_with_array)
+ tm.assert_equal(expected, vec + offset)
- expected = DatetimeIndex([x - offset for x in vec_items])
- expected = tm.box_expected(expected, box_with_array)
- tm.assert_equal(expected, vec - offset)
+ expected = DatetimeIndex([x - offset for x in vec_items])
+ expected = tm.box_expected(expected, box_with_array)
+ tm.assert_equal(expected, vec - offset)
- expected = DatetimeIndex([offset + x for x in vec_items])
- expected = tm.box_expected(expected, box_with_array)
- tm.assert_equal(expected, offset + vec)
- msg = "(bad|unsupported) operand type for unary"
- with pytest.raises(TypeError, match=msg):
- offset - vec
+ expected = DatetimeIndex([offset + x for x in vec_items])
+ expected = tm.box_expected(expected, box_with_array)
+ tm.assert_equal(expected, offset + vec)
+ msg = "(bad|unsupported) operand type for unary"
+ with pytest.raises(TypeError, match=msg):
+ offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index 508ef6cb9c025..2e49d6d845a2d 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -1,5 +1,3 @@
-import warnings
-
import numpy as np
import pytest
@@ -196,6 +194,7 @@ def test_comparison_with_tuple(self):
result = cat != (0, 1)
tm.assert_numpy_array_equal(result, ~expected)
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_comparison_of_ordered_categorical_with_nan_to_scalar(
self, compare_operators_no_eq_ne
):
@@ -206,12 +205,11 @@ def test_comparison_of_ordered_categorical_with_nan_to_scalar(
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
scalar = 2
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", RuntimeWarning)
- expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)
+ expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)
actual = getattr(cat, compare_operators_no_eq_ne)(scalar)
tm.assert_numpy_array_equal(actual, expected)
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_comparison_of_ordered_categorical_with_nan_to_listlike(
self, compare_operators_no_eq_ne
):
@@ -221,9 +219,7 @@ def test_comparison_of_ordered_categorical_with_nan_to_listlike(
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", RuntimeWarning)
- expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)
+ expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)
actual = getattr(cat, compare_operators_no_eq_ne)(other)
tm.assert_numpy_array_equal(actual, expected)
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index a4fbc8df4a8fa..9eee2e0bea687 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -49,7 +49,9 @@ def period_index(freqstr):
# TODO: non-monotone indexes; NaTs, different start dates
with warnings.catch_warnings():
# suppress deprecation of Period[B]
- warnings.simplefilter("ignore")
+ warnings.filterwarnings(
+ "ignore", message="Period with BDay freq", category=FutureWarning
+ )
pi = pd.period_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr)
return pi
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index e87e0beda9251..f958d25e51103 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -3,7 +3,6 @@
from functools import reduce
from itertools import product
import operator
-import warnings
import numpy as np
import pytest
@@ -761,28 +760,26 @@ def test_align_nested_unary_op(self, engine, parser):
res = pd.eval(s, engine=engine, parser=parser)
tm.assert_frame_equal(res, df * ~2)
+ @pytest.mark.filterwarnings("always::RuntimeWarning")
@pytest.mark.parametrize("lr_idx_type", lhs_index_types)
@pytest.mark.parametrize("rr_idx_type", index_types)
@pytest.mark.parametrize("c_idx_type", index_types)
def test_basic_frame_alignment(
self, engine, parser, lr_idx_type, rr_idx_type, c_idx_type
):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always", RuntimeWarning)
-
- df = tm.makeCustomDataframe(
- 10, 10, data_gen_f=f, r_idx_type=lr_idx_type, c_idx_type=c_idx_type
- )
- df2 = tm.makeCustomDataframe(
- 20, 10, data_gen_f=f, r_idx_type=rr_idx_type, c_idx_type=c_idx_type
- )
- # only warns if not monotonic and not sortable
- if should_warn(df.index, df2.index):
- with tm.assert_produces_warning(RuntimeWarning):
- res = pd.eval("df + df2", engine=engine, parser=parser)
- else:
+ df = tm.makeCustomDataframe(
+ 10, 10, data_gen_f=f, r_idx_type=lr_idx_type, c_idx_type=c_idx_type
+ )
+ df2 = tm.makeCustomDataframe(
+ 20, 10, data_gen_f=f, r_idx_type=rr_idx_type, c_idx_type=c_idx_type
+ )
+ # only warns if not monotonic and not sortable
+ if should_warn(df.index, df2.index):
+ with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df + df2", engine=engine, parser=parser)
- tm.assert_frame_equal(res, df + df2)
+ else:
+ res = pd.eval("df + df2", engine=engine, parser=parser)
+ tm.assert_frame_equal(res, df + df2)
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
@pytest.mark.parametrize("c_idx_type", lhs_index_types)
@@ -801,55 +798,46 @@ def test_frame_comparison(self, engine, parser, r_idx_type, c_idx_type):
res = pd.eval("df < df3", engine=engine, parser=parser)
tm.assert_frame_equal(res, df < df3)
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
@pytest.mark.parametrize("r1", lhs_index_types)
@pytest.mark.parametrize("c1", index_types)
@pytest.mark.parametrize("r2", index_types)
@pytest.mark.parametrize("c2", index_types)
def test_medium_complex_frame_alignment(self, engine, parser, r1, c1, r2, c2):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always", RuntimeWarning)
-
- df = tm.makeCustomDataframe(
- 3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1
- )
- df2 = tm.makeCustomDataframe(
- 4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
- )
- df3 = tm.makeCustomDataframe(
- 5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
- )
- if should_warn(df.index, df2.index, df3.index):
- with tm.assert_produces_warning(RuntimeWarning):
- res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
- else:
+ df = tm.makeCustomDataframe(3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1)
+ df2 = tm.makeCustomDataframe(4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)
+ df3 = tm.makeCustomDataframe(5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)
+ if should_warn(df.index, df2.index, df3.index):
+ with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
- tm.assert_frame_equal(res, df + df2 + df3)
+ else:
+ res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
+ tm.assert_frame_equal(res, df + df2 + df3)
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
@pytest.mark.parametrize("index_name", ["index", "columns"])
@pytest.mark.parametrize("c_idx_type", index_types)
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
def test_basic_frame_series_alignment(
self, engine, parser, index_name, r_idx_type, c_idx_type
):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always", RuntimeWarning)
- df = tm.makeCustomDataframe(
- 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
- )
- index = getattr(df, index_name)
- s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
+ df = tm.makeCustomDataframe(
+ 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
+ )
+ index = getattr(df, index_name)
+ s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
- if should_warn(df.index, s.index):
- with tm.assert_produces_warning(RuntimeWarning):
- res = pd.eval("df + s", engine=engine, parser=parser)
- else:
+ if should_warn(df.index, s.index):
+ with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df + s", engine=engine, parser=parser)
+ else:
+ res = pd.eval("df + s", engine=engine, parser=parser)
- if r_idx_type == "dt" or c_idx_type == "dt":
- expected = df.add(s) if engine == "numexpr" else df + s
- else:
- expected = df + s
- tm.assert_frame_equal(res, expected)
+ if r_idx_type == "dt" or c_idx_type == "dt":
+ expected = df.add(s) if engine == "numexpr" else df + s
+ else:
+ expected = df + s
+ tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize("index_name", ["index", "columns"])
@pytest.mark.parametrize(
@@ -890,6 +878,7 @@ def test_basic_series_frame_alignment(
expected = s + df
tm.assert_frame_equal(res, expected)
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
@pytest.mark.parametrize("c_idx_type", index_types)
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
@pytest.mark.parametrize("index_name", ["index", "columns"])
@@ -897,30 +886,28 @@ def test_basic_series_frame_alignment(
def test_series_frame_commutativity(
self, engine, parser, index_name, op, r_idx_type, c_idx_type
):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always", RuntimeWarning)
+ df = tm.makeCustomDataframe(
+ 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
+ )
+ index = getattr(df, index_name)
+ s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
- df = tm.makeCustomDataframe(
- 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
- )
- index = getattr(df, index_name)
- s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
-
- lhs = f"s {op} df"
- rhs = f"df {op} s"
- if should_warn(df.index, s.index):
- with tm.assert_produces_warning(RuntimeWarning):
- a = pd.eval(lhs, engine=engine, parser=parser)
- with tm.assert_produces_warning(RuntimeWarning):
- b = pd.eval(rhs, engine=engine, parser=parser)
- else:
+ lhs = f"s {op} df"
+ rhs = f"df {op} s"
+ if should_warn(df.index, s.index):
+ with tm.assert_produces_warning(RuntimeWarning):
a = pd.eval(lhs, engine=engine, parser=parser)
+ with tm.assert_produces_warning(RuntimeWarning):
b = pd.eval(rhs, engine=engine, parser=parser)
+ else:
+ a = pd.eval(lhs, engine=engine, parser=parser)
+ b = pd.eval(rhs, engine=engine, parser=parser)
- if r_idx_type != "dt" and c_idx_type != "dt":
- if engine == "numexpr":
- tm.assert_frame_equal(a, b)
+ if r_idx_type != "dt" and c_idx_type != "dt":
+ if engine == "numexpr":
+ tm.assert_frame_equal(a, b)
+ @pytest.mark.filterwarnings("always::RuntimeWarning")
@pytest.mark.parametrize("r1", lhs_index_types)
@pytest.mark.parametrize("c1", index_types)
@pytest.mark.parametrize("r2", index_types)
@@ -930,44 +917,37 @@ def test_complex_series_frame_alignment(self, engine, parser, r1, c1, r2, c2):
m1 = 5
m2 = 2 * m1
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always", RuntimeWarning)
+ index_name = np.random.default_rng(2).choice(["index", "columns"])
+ obj_name = np.random.default_rng(2).choice(["df", "df2"])
- index_name = np.random.default_rng(2).choice(["index", "columns"])
- obj_name = np.random.default_rng(2).choice(["df", "df2"])
+ df = tm.makeCustomDataframe(m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1)
+ df2 = tm.makeCustomDataframe(m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)
+ index = getattr(locals().get(obj_name), index_name)
+ ser = Series(np.random.default_rng(2).standard_normal(n), index[:n])
- df = tm.makeCustomDataframe(
- m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1
- )
- df2 = tm.makeCustomDataframe(
- m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
- )
- index = getattr(locals().get(obj_name), index_name)
- ser = Series(np.random.default_rng(2).standard_normal(n), index[:n])
-
- if r2 == "dt" or c2 == "dt":
- if engine == "numexpr":
- expected2 = df2.add(ser)
- else:
- expected2 = df2 + ser
+ if r2 == "dt" or c2 == "dt":
+ if engine == "numexpr":
+ expected2 = df2.add(ser)
else:
expected2 = df2 + ser
+ else:
+ expected2 = df2 + ser
- if r1 == "dt" or c1 == "dt":
- if engine == "numexpr":
- expected = expected2.add(df)
- else:
- expected = expected2 + df
+ if r1 == "dt" or c1 == "dt":
+ if engine == "numexpr":
+ expected = expected2.add(df)
else:
expected = expected2 + df
+ else:
+ expected = expected2 + df
- if should_warn(df2.index, ser.index, df.index):
- with tm.assert_produces_warning(RuntimeWarning):
- res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
- else:
+ if should_warn(df2.index, ser.index, df.index):
+ with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
- assert res.shape == expected.shape
- tm.assert_frame_equal(res, expected)
+ else:
+ res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
+ assert res.shape == expected.shape
+ tm.assert_frame_equal(res, expected)
def test_performance_warning_for_poor_alignment(self, engine, parser):
df = DataFrame(np.random.default_rng(2).standard_normal((1000, 10)))
diff --git a/pandas/tests/config/test_config.py b/pandas/tests/config/test_config.py
index aad42b27cb80b..f49ae94242399 100644
--- a/pandas/tests/config/test_config.py
+++ b/pandas/tests/config/test_config.py
@@ -1,11 +1,10 @@
-import warnings
-
import pytest
from pandas._config import config as cf
from pandas._config.config import OptionError
import pandas as pd
+import pandas._testing as tm
class TestConfig:
@@ -270,38 +269,26 @@ def test_deprecate_option(self):
cf.deprecate_option("foo")
assert cf._is_deprecated("foo")
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ with tm.assert_produces_warning(FutureWarning, match="deprecated"):
with pytest.raises(KeyError, match="No such keys.s.: 'foo'"):
cf.get_option("foo")
- assert len(w) == 1 # should have raised one warning
- assert "deprecated" in str(w[-1]) # we get the default message
cf.register_option("a", 1, "doc", validator=cf.is_int)
cf.register_option("b.c", "hullo", "doc2")
cf.register_option("foo", "hullo", "doc2")
cf.deprecate_option("a", removal_ver="nifty_ver")
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ with tm.assert_produces_warning(FutureWarning, match="eprecated.*nifty_ver"):
cf.get_option("a")
- assert len(w) == 1 # should have raised one warning
- assert "eprecated" in str(w[-1]) # we get the default message
- assert "nifty_ver" in str(w[-1]) # with the removal_ver quoted
-
msg = "Option 'a' has already been defined as deprecated"
with pytest.raises(OptionError, match=msg):
cf.deprecate_option("a")
cf.deprecate_option("b.c", "zounds!")
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ with tm.assert_produces_warning(FutureWarning, match="zounds!"):
cf.get_option("b.c")
- assert len(w) == 1 # should have raised one warning
- assert "zounds!" in str(w[-1]) # we get the custom message
-
# test rerouting keys
cf.register_option("d.a", "foo", "doc2")
cf.register_option("d.dep", "bar", "doc2")
@@ -309,27 +296,15 @@ def test_deprecate_option(self):
assert cf.get_option("d.dep") == "bar"
cf.deprecate_option("d.dep", rkey="d.a") # reroute d.dep to d.a
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ with tm.assert_produces_warning(FutureWarning, match="eprecated"):
assert cf.get_option("d.dep") == "foo"
- assert len(w) == 1 # should have raised one warning
- assert "eprecated" in str(w[-1]) # we get the custom message
-
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ with tm.assert_produces_warning(FutureWarning, match="eprecated"):
cf.set_option("d.dep", "baz") # should overwrite "d.a"
- assert len(w) == 1 # should have raised one warning
- assert "eprecated" in str(w[-1]) # we get the custom message
-
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ with tm.assert_produces_warning(FutureWarning, match="eprecated"):
assert cf.get_option("d.dep") == "baz"
- assert len(w) == 1 # should have raised one warning
- assert "eprecated" in str(w[-1]) # we get the custom message
-
def test_config_prefix(self):
with cf.config_prefix("base"):
cf.register_option("a", 1, "doc1")
diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py
index 25abad0004e7e..3da3237370e60 100644
--- a/pandas/tests/dtypes/test_generic.py
+++ b/pandas/tests/dtypes/test_generic.py
@@ -1,5 +1,4 @@
import re
-from warnings import catch_warnings
import numpy as np
import pytest
@@ -107,25 +106,22 @@ def test_setattr_warnings():
}
df = pd.DataFrame(d)
- with catch_warnings(record=True) as w:
+ with tm.assert_produces_warning(None):
# successfully add new column
# this should not raise a warning
df["three"] = df.two + 1
- assert len(w) == 0
assert df.three.sum() > df.two.sum()
- with catch_warnings(record=True) as w:
+ with tm.assert_produces_warning(None):
# successfully modify column in place
# this should not raise a warning
df.one += 1
- assert len(w) == 0
assert df.one.iloc[0] == 2
- with catch_warnings(record=True) as w:
+ with tm.assert_produces_warning(None):
# successfully add an attribute to a series
# this should not raise a warning
df.two.not_an_index = [1, 2]
- assert len(w) == 0
with tm.assert_produces_warning(UserWarning):
# warn when setting column to nonexistent name
diff --git a/pandas/tests/extension/base/reduce.py b/pandas/tests/extension/base/reduce.py
index 00919b16a2600..3d2870191ff6b 100644
--- a/pandas/tests/extension/base/reduce.py
+++ b/pandas/tests/extension/base/reduce.py
@@ -1,5 +1,4 @@
from typing import final
-import warnings
import pytest
@@ -87,6 +86,7 @@ def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
else:
self.check_reduce(s, op_name, skipna)
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
@@ -103,9 +103,7 @@ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
else:
# min/max with empty produce numpy warnings
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", RuntimeWarning)
- self.check_reduce(s, op_name, skipna)
+ self.check_reduce(s, op_name, skipna)
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index f61cc2d76845c..23a9656193d2c 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -1,5 +1,3 @@
-import warnings
-
import numpy as np
import pytest
@@ -153,6 +151,7 @@ def test_corr_constant(self, meth):
rs = df.corr(meth)
assert isna(rs.values).all()
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
def test_corr_int_and_boolean(self, meth):
# when dtypes of pandas series are different
@@ -162,10 +161,7 @@ def test_corr_int_and_boolean(self, meth):
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
-
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- result = df.corr(meth)
+ result = df.corr(meth)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["cov", "corr"])
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 8244b62dd9e66..c8b67675b7798 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -14,7 +14,6 @@
)
import functools
import re
-import warnings
import numpy as np
from numpy import ma
@@ -1054,6 +1053,9 @@ def test_constructor_maskedarray(self):
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
+ @pytest.mark.filterwarnings(
+ "ignore:elementwise comparison failed:DeprecationWarning"
+ )
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
@@ -1088,13 +1090,7 @@ def test_constructor_maskedarray_nonfloat(self):
# cast type
msg = r"datetime64\[ns\] values and dtype=int64 is not supported"
with pytest.raises(TypeError, match=msg):
- with warnings.catch_warnings():
- warnings.filterwarnings(
- "ignore",
- category=DeprecationWarning,
- message="elementwise comparison failed",
- )
- DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64)
+ DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64)
# Check non-masked values
mat2 = ma.copy(mat)
diff --git a/pandas/tests/indexes/datetimes/methods/test_to_period.py b/pandas/tests/indexes/datetimes/methods/test_to_period.py
index fe0b9464c7f45..14de6c5907d03 100644
--- a/pandas/tests/indexes/datetimes/methods/test_to_period.py
+++ b/pandas/tests/indexes/datetimes/methods/test_to_period.py
@@ -1,5 +1,3 @@
-import warnings
-
import dateutil.tz
from dateutil.tz import tzlocal
import pytest
@@ -93,16 +91,10 @@ def test_to_period_infer(self):
freq="5min",
)
- with tm.assert_produces_warning(None):
- # Using simple filter because we are not checking for the warning here
- warnings.simplefilter("ignore", UserWarning)
-
+ with tm.assert_produces_warning(UserWarning):
pi1 = rng.to_period("5min")
- with tm.assert_produces_warning(None):
- # Using simple filter because we are not checking for the warning here
- warnings.simplefilter("ignore", UserWarning)
-
+ with tm.assert_produces_warning(UserWarning):
pi2 = rng.to_period()
tm.assert_index_equal(pi1, pi2)
diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py
index f069cdbedabf0..99c8ebb1e57b2 100644
--- a/pandas/tests/indexes/multi/test_drop.py
+++ b/pandas/tests/indexes/multi/test_drop.py
@@ -1,5 +1,3 @@
-import warnings
-
import numpy as np
import pytest
@@ -154,12 +152,11 @@ def test_drop_with_nan_in_index(nulls_fixture):
mi.drop(pd.Timestamp("2001"), level="date")
+@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_drop_with_non_monotonic_duplicates():
# GH#33494
mi = MultiIndex.from_tuples([(1, 2), (2, 3), (1, 2)])
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", PerformanceWarning)
- result = mi.drop((1, 2))
+ result = mi.drop((1, 2))
expected = MultiIndex.from_tuples([(2, 3)])
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 62172ec9a83ad..bc7604330695f 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -2,10 +2,6 @@
from datetime import datetime
import re
-from warnings import (
- catch_warnings,
- simplefilter,
-)
import numpy as np
import pytest
@@ -742,6 +738,7 @@ def test_iloc_setitem_with_scalar_index(self, indexer, value):
assert is_scalar(result) and result == "Z"
+ @pytest.mark.filterwarnings("ignore::UserWarning")
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(list(range(5)), index=list("ABCDE"), columns=["a"])
@@ -786,32 +783,30 @@ def test_iloc_mask(self):
}
# UserWarnings from reindex of a boolean mask
- with catch_warnings(record=True):
- simplefilter("ignore", UserWarning)
- for idx in [None, "index", "locs"]:
- mask = (df.nums > 2).values
- if idx:
- mask_index = getattr(df, idx)[::-1]
- mask = Series(mask, list(mask_index))
- for method in ["", ".loc", ".iloc"]:
- try:
- if method:
- accessor = getattr(df, method[1:])
- else:
- accessor = df
- answer = str(bin(accessor[mask]["nums"].sum()))
- except (ValueError, IndexingError, NotImplementedError) as e:
- answer = str(e)
-
- key = (
- idx,
- method,
+ for idx in [None, "index", "locs"]:
+ mask = (df.nums > 2).values
+ if idx:
+ mask_index = getattr(df, idx)[::-1]
+ mask = Series(mask, list(mask_index))
+ for method in ["", ".loc", ".iloc"]:
+ try:
+ if method:
+ accessor = getattr(df, method[1:])
+ else:
+ accessor = df
+ answer = str(bin(accessor[mask]["nums"].sum()))
+ except (ValueError, IndexingError, NotImplementedError) as e:
+ answer = str(e)
+
+ key = (
+ idx,
+ method,
+ )
+ r = expected.get(key)
+ if r != answer:
+ raise AssertionError(
+ f"[{key}] does not match [{answer}], received [{r}]"
)
- r = expected.get(key)
- if r != answer:
- raise AssertionError(
- f"[{key}] does not match [{answer}], received [{r}]"
- )
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py
index 817daad9849c0..492b4d5ec058e 100644
--- a/pandas/tests/io/parser/common/test_read_errors.py
+++ b/pandas/tests/io/parser/common/test_read_errors.py
@@ -7,7 +7,6 @@
from io import StringIO
import os
from pathlib import Path
-import warnings
import numpy as np
import pytest
@@ -203,6 +202,7 @@ def test_null_byte_char(request, all_parsers):
parser.read_csv(StringIO(data), names=names)
+@pytest.mark.filterwarnings("always::ResourceWarning")
def test_open_file(request, all_parsers):
# GH 39024
parser = all_parsers
@@ -218,12 +218,10 @@ def test_open_file(request, all_parsers):
file = Path(path)
file.write_bytes(b"\xe4\na\n1")
- with warnings.catch_warnings(record=True) as record:
+ with tm.assert_produces_warning(None):
# should not trigger a ResourceWarning
- warnings.simplefilter("always", category=ResourceWarning)
with pytest.raises(csv.Error, match="Could not determine delimiter"):
parser.read_csv(file, sep=None, encoding_errors="replace")
- assert len(record) == 0, record[0].message
def test_invalid_on_bad_line(all_parsers):
diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py
index a447601f3d8c4..c5a053a7500fe 100644
--- a/pandas/tests/io/pytables/test_append.py
+++ b/pandas/tests/io/pytables/test_append.py
@@ -1,7 +1,6 @@
import datetime
from datetime import timedelta
import re
-from warnings import catch_warnings
import numpy as np
import pytest
@@ -25,74 +24,76 @@
pytestmark = pytest.mark.single_cpu
+tables = pytest.importorskip("tables")
+
+@pytest.mark.filterwarnings("ignore::tables.NaturalNameWarning")
def test_append(setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
- with catch_warnings(record=True):
- df = tm.makeTimeDataFrame()
- _maybe_remove(store, "df1")
- store.append("df1", df[:10])
- store.append("df1", df[10:])
- tm.assert_frame_equal(store["df1"], df)
+ df = tm.makeTimeDataFrame()
+ _maybe_remove(store, "df1")
+ store.append("df1", df[:10])
+ store.append("df1", df[10:])
+ tm.assert_frame_equal(store["df1"], df)
- _maybe_remove(store, "df2")
- store.put("df2", df[:10], format="table")
- store.append("df2", df[10:])
- tm.assert_frame_equal(store["df2"], df)
+ _maybe_remove(store, "df2")
+ store.put("df2", df[:10], format="table")
+ store.append("df2", df[10:])
+ tm.assert_frame_equal(store["df2"], df)
- _maybe_remove(store, "df3")
- store.append("/df3", df[:10])
- store.append("/df3", df[10:])
- tm.assert_frame_equal(store["df3"], df)
+ _maybe_remove(store, "df3")
+ store.append("/df3", df[:10])
+ store.append("/df3", df[10:])
+ tm.assert_frame_equal(store["df3"], df)
- # this is allowed by almost always don't want to do it
- # tables.NaturalNameWarning
- _maybe_remove(store, "/df3 foo")
- store.append("/df3 foo", df[:10])
- store.append("/df3 foo", df[10:])
- tm.assert_frame_equal(store["df3 foo"], df)
-
- # dtype issues - mizxed type in a single object column
- df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
- df["mixed_column"] = "testing"
- df.loc[2, "mixed_column"] = np.nan
- _maybe_remove(store, "df")
- store.append("df", df)
- tm.assert_frame_equal(store["df"], df)
+ # this is allowed by almost always don't want to do it
+ # tables.NaturalNameWarning
+ _maybe_remove(store, "/df3 foo")
+ store.append("/df3 foo", df[:10])
+ store.append("/df3 foo", df[10:])
+ tm.assert_frame_equal(store["df3 foo"], df)
+
+ # dtype issues - mizxed type in a single object column
+ df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
+ df["mixed_column"] = "testing"
+ df.loc[2, "mixed_column"] = np.nan
+ _maybe_remove(store, "df")
+ store.append("df", df)
+ tm.assert_frame_equal(store["df"], df)
- # uints - test storage of uints
- uint_data = DataFrame(
- {
- "u08": Series(
- np.random.default_rng(2).integers(0, high=255, size=5),
- dtype=np.uint8,
- ),
- "u16": Series(
- np.random.default_rng(2).integers(0, high=65535, size=5),
- dtype=np.uint16,
- ),
- "u32": Series(
- np.random.default_rng(2).integers(0, high=2**30, size=5),
- dtype=np.uint32,
- ),
- "u64": Series(
- [2**58, 2**59, 2**60, 2**61, 2**62],
- dtype=np.uint64,
- ),
- },
- index=np.arange(5),
- )
- _maybe_remove(store, "uints")
- store.append("uints", uint_data)
- tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
+ # uints - test storage of uints
+ uint_data = DataFrame(
+ {
+ "u08": Series(
+ np.random.default_rng(2).integers(0, high=255, size=5),
+ dtype=np.uint8,
+ ),
+ "u16": Series(
+ np.random.default_rng(2).integers(0, high=65535, size=5),
+ dtype=np.uint16,
+ ),
+ "u32": Series(
+ np.random.default_rng(2).integers(0, high=2**30, size=5),
+ dtype=np.uint32,
+ ),
+ "u64": Series(
+ [2**58, 2**59, 2**60, 2**61, 2**62],
+ dtype=np.uint64,
+ ),
+ },
+ index=np.arange(5),
+ )
+ _maybe_remove(store, "uints")
+ store.append("uints", uint_data)
+ tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
- # uints - test storage of uints in indexable columns
- _maybe_remove(store, "uints")
- # 64-bit indices not yet supported
- store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
- tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
+ # uints - test storage of uints in indexable columns
+ _maybe_remove(store, "uints")
+ # 64-bit indices not yet supported
+ store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
+ tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
def test_append_series(setup_path):
@@ -357,81 +358,77 @@ def test_append_with_different_block_ordering(setup_path):
def test_append_with_strings(setup_path):
with ensure_clean_store(setup_path) as store:
- with catch_warnings(record=True):
-
- def check_col(key, name, size):
- assert (
- getattr(store.get_storer(key).table.description, name).itemsize
- == size
- )
-
- # avoid truncation on elements
- df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
- store.append("df_big", df)
- tm.assert_frame_equal(store.select("df_big"), df)
- check_col("df_big", "values_block_1", 15)
-
- # appending smaller string ok
- df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
- store.append("df_big", df2)
- expected = concat([df, df2])
- tm.assert_frame_equal(store.select("df_big"), expected)
- check_col("df_big", "values_block_1", 15)
-
- # avoid truncation on elements
- df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
- store.append("df_big2", df, min_itemsize={"values": 50})
- tm.assert_frame_equal(store.select("df_big2"), df)
- check_col("df_big2", "values_block_1", 50)
-
- # bigger string on next append
- store.append("df_new", df)
- df_new = DataFrame(
- [[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
- )
- msg = (
- r"Trying to store a string with len \[26\] in "
- r"\[values_block_1\] column but\n"
- r"this column has a limit of \[15\]!\n"
- "Consider using min_itemsize to preset the sizes on these "
- "columns"
+
+ def check_col(key, name, size):
+ assert (
+ getattr(store.get_storer(key).table.description, name).itemsize == size
)
- with pytest.raises(ValueError, match=msg):
- store.append("df_new", df_new)
-
- # min_itemsize on Series index (GH 11412)
- df = tm.makeMixedDataFrame().set_index("C")
- store.append("ss", df["B"], min_itemsize={"index": 4})
- tm.assert_series_equal(store.select("ss"), df["B"])
-
- # same as above, with data_columns=True
- store.append("ss2", df["B"], data_columns=True, min_itemsize={"index": 4})
- tm.assert_series_equal(store.select("ss2"), df["B"])
-
- # min_itemsize in index without appending (GH 10381)
- store.put("ss3", df, format="table", min_itemsize={"index": 6})
- # just make sure there is a longer string:
- df2 = df.copy().reset_index().assign(C="longer").set_index("C")
- store.append("ss3", df2)
- tm.assert_frame_equal(store.select("ss3"), concat([df, df2]))
-
- # same as above, with a Series
- store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
- store.append("ss4", df2["B"])
- tm.assert_series_equal(store.select("ss4"), concat([df["B"], df2["B"]]))
-
- # with nans
- _maybe_remove(store, "df")
- df = tm.makeTimeDataFrame()
- df["string"] = "foo"
- df.loc[df.index[1:4], "string"] = np.nan
- df["string2"] = "bar"
- df.loc[df.index[4:8], "string2"] = np.nan
- df["string3"] = "bah"
- df.loc[df.index[1:], "string3"] = np.nan
- store.append("df", df)
- result = store.select("df")
- tm.assert_frame_equal(result, df)
+
+ # avoid truncation on elements
+ df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
+ store.append("df_big", df)
+ tm.assert_frame_equal(store.select("df_big"), df)
+ check_col("df_big", "values_block_1", 15)
+
+ # appending smaller string ok
+ df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
+ store.append("df_big", df2)
+ expected = concat([df, df2])
+ tm.assert_frame_equal(store.select("df_big"), expected)
+ check_col("df_big", "values_block_1", 15)
+
+ # avoid truncation on elements
+ df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
+ store.append("df_big2", df, min_itemsize={"values": 50})
+ tm.assert_frame_equal(store.select("df_big2"), df)
+ check_col("df_big2", "values_block_1", 50)
+
+ # bigger string on next append
+ store.append("df_new", df)
+ df_new = DataFrame([[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]])
+ msg = (
+ r"Trying to store a string with len \[26\] in "
+ r"\[values_block_1\] column but\n"
+ r"this column has a limit of \[15\]!\n"
+ "Consider using min_itemsize to preset the sizes on these "
+ "columns"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df_new", df_new)
+
+ # min_itemsize on Series index (GH 11412)
+ df = tm.makeMixedDataFrame().set_index("C")
+ store.append("ss", df["B"], min_itemsize={"index": 4})
+ tm.assert_series_equal(store.select("ss"), df["B"])
+
+ # same as above, with data_columns=True
+ store.append("ss2", df["B"], data_columns=True, min_itemsize={"index": 4})
+ tm.assert_series_equal(store.select("ss2"), df["B"])
+
+ # min_itemsize in index without appending (GH 10381)
+ store.put("ss3", df, format="table", min_itemsize={"index": 6})
+ # just make sure there is a longer string:
+ df2 = df.copy().reset_index().assign(C="longer").set_index("C")
+ store.append("ss3", df2)
+ tm.assert_frame_equal(store.select("ss3"), concat([df, df2]))
+
+ # same as above, with a Series
+ store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
+ store.append("ss4", df2["B"])
+ tm.assert_series_equal(store.select("ss4"), concat([df["B"], df2["B"]]))
+
+ # with nans
+ _maybe_remove(store, "df")
+ df = tm.makeTimeDataFrame()
+ df["string"] = "foo"
+ df.loc[df.index[1:4], "string"] = np.nan
+ df["string2"] = "bar"
+ df.loc[df.index[4:8], "string2"] = np.nan
+ df["string3"] = "bah"
+ df.loc[df.index[1:], "string3"] = np.nan
+ store.append("df", df)
+ result = store.select("df")
+ tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
df = DataFrame({"A": "foo", "B": "bar"}, index=range(10))
diff --git a/pandas/tests/io/pytables/test_complex.py b/pandas/tests/io/pytables/test_complex.py
index 7df6223df70e7..dd2f650a2070d 100644
--- a/pandas/tests/io/pytables/test_complex.py
+++ b/pandas/tests/io/pytables/test_complex.py
@@ -1,5 +1,3 @@
-from warnings import catch_warnings
-
import numpy as np
import pytest
@@ -114,18 +112,17 @@ def test_complex_mixed_table(tmp_path, setup_path):
def test_complex_across_dimensions_fixed(tmp_path, setup_path):
- with catch_warnings(record=True):
- complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
- s = Series(complex128, index=list("abcd"))
- df = DataFrame({"A": s, "B": s})
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
+ s = Series(complex128, index=list("abcd"))
+ df = DataFrame({"A": s, "B": s})
- objs = [s, df]
- comps = [tm.assert_series_equal, tm.assert_frame_equal]
- for obj, comp in zip(objs, comps):
- path = tmp_path / setup_path
- obj.to_hdf(path, "obj", format="fixed")
- reread = read_hdf(path, "obj")
- comp(obj, reread)
+ objs = [s, df]
+ comps = [tm.assert_series_equal, tm.assert_frame_equal]
+ for obj, comp in zip(objs, comps):
+ path = tmp_path / setup_path
+ obj.to_hdf(path, "obj", format="fixed")
+ reread = read_hdf(path, "obj")
+ comp(obj, reread)
def test_complex_across_dimensions(tmp_path, setup_path):
@@ -133,14 +130,10 @@ def test_complex_across_dimensions(tmp_path, setup_path):
s = Series(complex128, index=list("abcd"))
df = DataFrame({"A": s, "B": s})
- with catch_warnings(record=True):
- objs = [df]
- comps = [tm.assert_frame_equal]
- for obj, comp in zip(objs, comps):
- path = tmp_path / setup_path
- obj.to_hdf(path, "obj", format="table")
- reread = read_hdf(path, "obj")
- comp(obj, reread)
+ path = tmp_path / setup_path
+ df.to_hdf(path, "obj", format="table")
+ reread = read_hdf(path, "obj")
+ tm.assert_frame_equal(df, reread)
def test_complex_indexing_error(setup_path):
diff --git a/pandas/tests/io/pytables/test_errors.py b/pandas/tests/io/pytables/test_errors.py
index 44bdbfc3fdd7e..72fdb0f78d8e6 100644
--- a/pandas/tests/io/pytables/test_errors.py
+++ b/pandas/tests/io/pytables/test_errors.py
@@ -1,7 +1,6 @@
import datetime
from io import BytesIO
import re
-from warnings import catch_warnings
import numpy as np
import pytest
@@ -86,31 +85,28 @@ def test_unimplemented_dtypes_table_columns(setup_path):
def test_invalid_terms(tmp_path, setup_path):
with ensure_clean_store(setup_path) as store:
- with catch_warnings(record=True):
- df = tm.makeTimeDataFrame()
- df["string"] = "foo"
- df.loc[df.index[0:4], "string"] = "bar"
+ df = tm.makeTimeDataFrame()
+ df["string"] = "foo"
+ df.loc[df.index[0:4], "string"] = "bar"
- store.put("df", df, format="table")
+ store.put("df", df, format="table")
- # some invalid terms
- msg = re.escape(
- "__init__() missing 1 required positional argument: 'where'"
- )
- with pytest.raises(TypeError, match=msg):
- Term()
+ # some invalid terms
+ msg = re.escape("__init__() missing 1 required positional argument: 'where'")
+ with pytest.raises(TypeError, match=msg):
+ Term()
- # more invalid
- msg = re.escape(
- "cannot process expression [df.index[3]], "
- "[2000-01-06 00:00:00] is not a valid condition"
- )
- with pytest.raises(ValueError, match=msg):
- store.select("df", "df.index[3]")
+ # more invalid
+ msg = re.escape(
+ "cannot process expression [df.index[3]], "
+ "[2000-01-06 00:00:00] is not a valid condition"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.select("df", "df.index[3]")
- msg = "invalid syntax"
- with pytest.raises(SyntaxError, match=msg):
- store.select("df", "index>")
+ msg = "invalid syntax"
+ with pytest.raises(SyntaxError, match=msg):
+ store.select("df", "index>")
# from the docs
path = tmp_path / setup_path
diff --git a/pandas/tests/io/pytables/test_put.py b/pandas/tests/io/pytables/test_put.py
index e63e3f71c93b5..5bf94340f4d3f 100644
--- a/pandas/tests/io/pytables/test_put.py
+++ b/pandas/tests/io/pytables/test_put.py
@@ -1,9 +1,5 @@
import datetime
import re
-from warnings import (
- catch_warnings,
- simplefilter,
-)
import numpy as np
import pytest
@@ -193,9 +189,7 @@ def test_put_mixed_type(setup_path):
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
- # PerformanceWarning
- with catch_warnings(record=True):
- simplefilter("ignore", pd.errors.PerformanceWarning)
+ with tm.assert_produces_warning(pd.errors.PerformanceWarning):
store.put("df", df)
expected = store.get("df")
diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py
index 8ffdc421492a5..085db5f521a9f 100644
--- a/pandas/tests/io/pytables/test_round_trip.py
+++ b/pandas/tests/io/pytables/test_round_trip.py
@@ -1,9 +1,5 @@
import datetime
import re
-from warnings import (
- catch_warnings,
- simplefilter,
-)
import numpy as np
import pytest
@@ -281,60 +277,51 @@ def test_tuple_index(setup_path):
data = np.random.default_rng(2).standard_normal(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
- with catch_warnings(record=True):
- simplefilter("ignore", pd.errors.PerformanceWarning)
+ with tm.assert_produces_warning(pd.errors.PerformanceWarning):
_check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(setup_path):
- with catch_warnings(record=True):
- values = np.random.default_rng(2).standard_normal(2)
+ values = np.random.default_rng(2).standard_normal(2)
- func = lambda lhs, rhs: tm.assert_series_equal(lhs, rhs, check_index_type=True)
+ func = lambda lhs, rhs: tm.assert_series_equal(lhs, rhs, check_index_type=True)
- with catch_warnings(record=True):
- ser = Series(values, [0, "y"])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [0, "y"])
+ _check_roundtrip(ser, func, path=setup_path)
- with catch_warnings(record=True):
- ser = Series(values, [datetime.datetime.today(), 0])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [datetime.datetime.today(), 0])
+ _check_roundtrip(ser, func, path=setup_path)
- with catch_warnings(record=True):
- ser = Series(values, ["y", 0])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, ["y", 0])
+ _check_roundtrip(ser, func, path=setup_path)
- with catch_warnings(record=True):
- ser = Series(values, [datetime.date.today(), "a"])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [datetime.date.today(), "a"])
+ _check_roundtrip(ser, func, path=setup_path)
- with catch_warnings(record=True):
- ser = Series(values, [0, "y"])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [0, "y"])
+ _check_roundtrip(ser, func, path=setup_path)
- ser = Series(values, [datetime.datetime.today(), 0])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [datetime.datetime.today(), 0])
+ _check_roundtrip(ser, func, path=setup_path)
- ser = Series(values, ["y", 0])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, ["y", 0])
+ _check_roundtrip(ser, func, path=setup_path)
- ser = Series(values, [datetime.date.today(), "a"])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [datetime.date.today(), "a"])
+ _check_roundtrip(ser, func, path=setup_path)
- ser = Series(values, [1.23, "b"])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [1.23, "b"])
+ _check_roundtrip(ser, func, path=setup_path)
- ser = Series(values, [1, 1.53])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [1, 1.53])
+ _check_roundtrip(ser, func, path=setup_path)
- ser = Series(values, [1, 5])
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [1, 5])
+ _check_roundtrip(ser, func, path=setup_path)
- ser = Series(
- values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
- )
- _check_roundtrip(ser, func, path=setup_path)
+ ser = Series(values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)])
+ _check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(setup_path, request):
@@ -499,14 +486,11 @@ def _check_roundtrip_table(obj, comparator, path, compression=False):
def test_unicode_index(setup_path):
unicode_values = ["\u03c3", "\u03c3\u03c3"]
- # PerformanceWarning
- with catch_warnings(record=True):
- simplefilter("ignore", pd.errors.PerformanceWarning)
- s = Series(
- np.random.default_rng(2).standard_normal(len(unicode_values)),
- unicode_values,
- )
- _check_roundtrip(s, tm.assert_series_equal, path=setup_path)
+ s = Series(
+ np.random.default_rng(2).standard_normal(len(unicode_values)),
+ unicode_values,
+ )
+ _check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_unicode_longer_encoded(setup_path):
diff --git a/pandas/tests/io/pytables/test_select.py b/pandas/tests/io/pytables/test_select.py
index 77e69f1264f1b..dc592beae45ba 100644
--- a/pandas/tests/io/pytables/test_select.py
+++ b/pandas/tests/io/pytables/test_select.py
@@ -1,5 +1,3 @@
-from warnings import catch_warnings
-
import numpy as np
import pytest
@@ -131,40 +129,39 @@ def test_select_with_dups(setup_path):
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
- with catch_warnings(record=True):
- # select with columns=
- df = tm.makeTimeDataFrame()
- _maybe_remove(store, "df")
- store.append("df", df)
- result = store.select("df", columns=["A", "B"])
- expected = df.reindex(columns=["A", "B"])
- tm.assert_frame_equal(expected, result)
+ # select with columns=
+ df = tm.makeTimeDataFrame()
+ _maybe_remove(store, "df")
+ store.append("df", df)
+ result = store.select("df", columns=["A", "B"])
+ expected = df.reindex(columns=["A", "B"])
+ tm.assert_frame_equal(expected, result)
- # equivalently
- result = store.select("df", [("columns=['A', 'B']")])
- expected = df.reindex(columns=["A", "B"])
- tm.assert_frame_equal(expected, result)
+ # equivalently
+ result = store.select("df", [("columns=['A', 'B']")])
+ expected = df.reindex(columns=["A", "B"])
+ tm.assert_frame_equal(expected, result)
- # with a data column
- _maybe_remove(store, "df")
- store.append("df", df, data_columns=["A"])
- result = store.select("df", ["A > 0"], columns=["A", "B"])
- expected = df[df.A > 0].reindex(columns=["A", "B"])
- tm.assert_frame_equal(expected, result)
+ # with a data column
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=["A"])
+ result = store.select("df", ["A > 0"], columns=["A", "B"])
+ expected = df[df.A > 0].reindex(columns=["A", "B"])
+ tm.assert_frame_equal(expected, result)
- # all a data columns
- _maybe_remove(store, "df")
- store.append("df", df, data_columns=True)
- result = store.select("df", ["A > 0"], columns=["A", "B"])
- expected = df[df.A > 0].reindex(columns=["A", "B"])
- tm.assert_frame_equal(expected, result)
+ # all a data columns
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=True)
+ result = store.select("df", ["A > 0"], columns=["A", "B"])
+ expected = df[df.A > 0].reindex(columns=["A", "B"])
+ tm.assert_frame_equal(expected, result)
- # with a data column, but different columns
- _maybe_remove(store, "df")
- store.append("df", df, data_columns=["A"])
- result = store.select("df", ["A > 0"], columns=["C", "D"])
- expected = df[df.A > 0].reindex(columns=["C", "D"])
- tm.assert_frame_equal(expected, result)
+ # with a data column, but different columns
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=["A"])
+ result = store.select("df", ["A > 0"], columns=["C", "D"])
+ expected = df[df.A > 0].reindex(columns=["C", "D"])
+ tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 5d45c92ece0dc..9d7cb52e3817d 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -3,10 +3,6 @@
import hashlib
import tempfile
import time
-from warnings import (
- catch_warnings,
- simplefilter,
-)
import numpy as np
import pytest
@@ -36,6 +32,8 @@
pytestmark = pytest.mark.single_cpu
+tables = pytest.importorskip("tables")
+
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
@@ -123,8 +121,7 @@ def test_repr(setup_path):
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()
- with catch_warnings(record=True):
- simplefilter("ignore", pd.errors.PerformanceWarning)
+ with tm.assert_produces_warning(pd.errors.PerformanceWarning):
store["df"] = df
# make a random group in hdf space
@@ -158,7 +155,9 @@ def test_contains(setup_path):
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
- with catch_warnings(record=True):
+ with tm.assert_produces_warning(
+ tables.NaturalNameWarning, check_stacklevel=False
+ ):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
@@ -335,63 +334,61 @@ def test_to_hdf_errors(tmp_path, format, setup_path):
def test_create_table_index(setup_path):
with ensure_clean_store(setup_path) as store:
- with catch_warnings(record=True):
-
- def col(t, column):
- return getattr(store.get_storer(t).table.cols, column)
-
- # data columns
- df = tm.makeTimeDataFrame()
- df["string"] = "foo"
- df["string2"] = "bar"
- store.append("f", df, data_columns=["string", "string2"])
- assert col("f", "index").is_indexed is True
- assert col("f", "string").is_indexed is True
- assert col("f", "string2").is_indexed is True
-
- # specify index=columns
- store.append("f2", df, index=["string"], data_columns=["string", "string2"])
- assert col("f2", "index").is_indexed is False
- assert col("f2", "string").is_indexed is True
- assert col("f2", "string2").is_indexed is False
-
- # try to index a non-table
- _maybe_remove(store, "f2")
- store.put("f2", df)
- msg = "cannot create table index on a Fixed format store"
- with pytest.raises(TypeError, match=msg):
- store.create_table_index("f2")
+
+ def col(t, column):
+ return getattr(store.get_storer(t).table.cols, column)
+
+ # data columns
+ df = tm.makeTimeDataFrame()
+ df["string"] = "foo"
+ df["string2"] = "bar"
+ store.append("f", df, data_columns=["string", "string2"])
+ assert col("f", "index").is_indexed is True
+ assert col("f", "string").is_indexed is True
+ assert col("f", "string2").is_indexed is True
+
+ # specify index=columns
+ store.append("f2", df, index=["string"], data_columns=["string", "string2"])
+ assert col("f2", "index").is_indexed is False
+ assert col("f2", "string").is_indexed is True
+ assert col("f2", "string2").is_indexed is False
+
+ # try to index a non-table
+ _maybe_remove(store, "f2")
+ store.put("f2", df)
+ msg = "cannot create table index on a Fixed format store"
+ with pytest.raises(TypeError, match=msg):
+ store.create_table_index("f2")
def test_create_table_index_data_columns_argument(setup_path):
# GH 28156
with ensure_clean_store(setup_path) as store:
- with catch_warnings(record=True):
- def col(t, column):
- return getattr(store.get_storer(t).table.cols, column)
+ def col(t, column):
+ return getattr(store.get_storer(t).table.cols, column)
- # data columns
- df = tm.makeTimeDataFrame()
- df["string"] = "foo"
- df["string2"] = "bar"
- store.append("f", df, data_columns=["string"])
- assert col("f", "index").is_indexed is True
- assert col("f", "string").is_indexed is True
+ # data columns
+ df = tm.makeTimeDataFrame()
+ df["string"] = "foo"
+ df["string2"] = "bar"
+ store.append("f", df, data_columns=["string"])
+ assert col("f", "index").is_indexed is True
+ assert col("f", "string").is_indexed is True
- msg = "'Cols' object has no attribute 'string2'"
- with pytest.raises(AttributeError, match=msg):
- col("f", "string2").is_indexed
+ msg = "'Cols' object has no attribute 'string2'"
+ with pytest.raises(AttributeError, match=msg):
+ col("f", "string2").is_indexed
- # try to index a col which isn't a data_column
- msg = (
- "column string2 is not a data_column.\n"
- "In order to read column string2 you must reload the dataframe \n"
- "into HDFStore and include string2 with the data_columns argument."
- )
- with pytest.raises(AttributeError, match=msg):
- store.create_table_index("f", columns=["string2"])
+ # try to index a col which isn't a data_column
+ msg = (
+ "column string2 is not a data_column.\n"
+ "In order to read column string2 you must reload the dataframe \n"
+ "into HDFStore and include string2 with the data_columns argument."
+ )
+ with pytest.raises(AttributeError, match=msg):
+ store.create_table_index("f", columns=["string2"])
def test_mi_data_columns(setup_path):
@@ -948,20 +945,18 @@ def test_to_hdf_with_object_column_names(tmp_path, setup_path):
np.random.default_rng(2).standard_normal((10, 2)), columns=index(2)
)
path = tmp_path / setup_path
- with catch_warnings(record=True):
- msg = "cannot have non-object label DataIndexableCol"
- with pytest.raises(ValueError, match=msg):
- df.to_hdf(path, "df", format="table", data_columns=True)
+ msg = "cannot have non-object label DataIndexableCol"
+ with pytest.raises(ValueError, match=msg):
+ df.to_hdf(path, "df", format="table", data_columns=True)
for index in types_should_run:
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 2)), columns=index(2)
)
path = tmp_path / setup_path
- with catch_warnings(record=True):
- df.to_hdf(path, "df", format="table", data_columns=True)
- result = read_hdf(path, "df", where=f"index = [{df.index[0]}]")
- assert len(result)
+ df.to_hdf(path, "df", format="table", data_columns=True)
+ result = read_hdf(path, "df", where=f"index = [{df.index[0]}]")
+ assert len(result)
def test_hdfstore_strides(setup_path):
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 501e471695a8a..f3b1ac8062f19 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -3,7 +3,6 @@
from io import BytesIO
import os
import pathlib
-from warnings import catch_warnings
import numpy as np
import pytest
@@ -201,8 +200,7 @@ def check_round_trip(
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
- with catch_warnings(record=True):
- actual = read_parquet(path, **read_kwargs)
+ actual = read_parquet(path, **read_kwargs)
if "string_with_nan" in expected:
expected.loc[1, "string_with_nan"] = None
@@ -354,12 +352,11 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
- with catch_warnings(record=True):
- result = read_parquet(path, engine=pa)
- tm.assert_frame_equal(result, df)
+ result = read_parquet(path, engine=pa)
+ tm.assert_frame_equal(result, df)
- result = read_parquet(path, engine=pa, columns=["a", "d"])
- tm.assert_frame_equal(result, df[["a", "d"]])
+ result = read_parquet(path, engine=pa, columns=["a", "d"])
+ tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index ac24dc21bab38..75e4de7074e63 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -23,7 +23,6 @@
import shutil
import tarfile
import uuid
-from warnings import catch_warnings
import zipfile
import numpy as np
@@ -44,6 +43,7 @@
period_range,
)
import pandas._testing as tm
+from pandas.tests.io.generate_legacy_storage_files import create_pickle_data
import pandas.io.common as icom
from pandas.tseries.offsets import (
@@ -55,10 +55,7 @@
@pytest.fixture
def current_pickle_data():
# our current version pickle data
- from pandas.tests.io.generate_legacy_storage_files import create_pickle_data
-
- with catch_warnings():
- return create_pickle_data()
+ return create_pickle_data()
# ---------------------
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 6130271325bee..3a7762a6a6060 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -6,7 +6,6 @@
import os
import struct
import tarfile
-import warnings
import zipfile
import numpy as np
@@ -136,6 +135,7 @@ def test_read_dta1(self, file, datapath):
tm.assert_frame_equal(parsed, expected)
+ @pytest.mark.filterwarnings("always")
def test_read_dta2(self, datapath):
expected = DataFrame.from_records(
[
@@ -174,14 +174,15 @@ def test_read_dta2(self, datapath):
)
expected["yearly_date"] = expected["yearly_date"].astype("O")
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ with tm.assert_produces_warning(UserWarning):
parsed_114 = self.read_dta(
datapath("io", "data", "stata", "stata2_114.dta")
)
+ with tm.assert_produces_warning(UserWarning):
parsed_115 = self.read_dta(
datapath("io", "data", "stata", "stata2_115.dta")
)
+ with tm.assert_produces_warning(UserWarning):
parsed_117 = self.read_dta(
datapath("io", "data", "stata", "stata2_117.dta")
)
@@ -190,12 +191,6 @@ def test_read_dta2(self, datapath):
# datapath("io", "data", "stata", "stata2_113.dta")
# )
- # Remove resource warnings
- w = [x for x in w if x.category is UserWarning]
-
- # should get warning for each call to read_dta
- assert len(w) == 3
-
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
@@ -460,11 +455,9 @@ def test_read_write_dta12(self, version):
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always", InvalidColumnName)
+ with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path, convert_dates=None, version=version)
# should get a warning for that format.
- assert len(w) == 1
written_and_read_again = self.read_dta(path)
@@ -1156,6 +1149,7 @@ def test_categorical_ordering(self, file, datapath):
assert parsed[col].cat.ordered
assert not parsed_unordered[col].cat.ordered
+ @pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize(
"file",
[
@@ -1180,13 +1174,11 @@ def test_read_chunks_117(
):
fname = datapath("io", "data", "stata", f"{file}.dta")
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always")
- parsed = read_stata(
- fname,
- convert_categoricals=convert_categoricals,
- convert_dates=convert_dates,
- )
+ parsed = read_stata(
+ fname,
+ convert_categoricals=convert_categoricals,
+ convert_dates=convert_dates,
+ )
with read_stata(
fname,
iterator=True,
@@ -1195,12 +1187,10 @@ def test_read_chunks_117(
) as itr:
pos = 0
for j in range(5):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always")
- try:
- chunk = itr.read(chunksize)
- except StopIteration:
- break
+ try:
+ chunk = itr.read(chunksize)
+ except StopIteration:
+ break
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
from_frame = self._convert_categorical(from_frame)
tm.assert_frame_equal(
@@ -1249,6 +1239,7 @@ def test_iterator(self, datapath):
from_chunks = pd.concat(itr)
tm.assert_frame_equal(parsed, from_chunks)
+ @pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize(
"file",
[
@@ -1273,13 +1264,11 @@ def test_read_chunks_115(
fname = datapath("io", "data", "stata", f"{file}.dta")
# Read the whole file
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always")
- parsed = read_stata(
- fname,
- convert_categoricals=convert_categoricals,
- convert_dates=convert_dates,
- )
+ parsed = read_stata(
+ fname,
+ convert_categoricals=convert_categoricals,
+ convert_dates=convert_dates,
+ )
# Compare to what we get when reading by chunk
with read_stata(
@@ -1290,12 +1279,10 @@ def test_read_chunks_115(
) as itr:
pos = 0
for j in range(5):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always")
- try:
- chunk = itr.read(chunksize)
- except StopIteration:
- break
+ try:
+ chunk = itr.read(chunksize)
+ except StopIteration:
+ break
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
from_frame = self._convert_categorical(from_frame)
tm.assert_frame_equal(
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index c18e995d26d78..a7994f0bdf733 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -7,7 +7,6 @@
import itertools
import re
import string
-import warnings
import weakref
import numpy as np
@@ -1728,13 +1727,12 @@ def test_errorbar_plot_different_yerr_xerr_subplots(self, kind):
@pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError)
def test_errorbar_plot_iterator(self):
- with warnings.catch_warnings():
- d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
- df = DataFrame(d)
+ d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
+ df = DataFrame(d)
- # yerr is iterator
- ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
- _check_has_errorbars(ax, xerr=0, yerr=2)
+ # yerr is iterator
+ ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
+ _check_has_errorbars(ax, xerr=0, yerr=2)
def test_errorbar_with_integer_column_names(self):
# test with integer column names
diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py
index a2b54c91693b8..bce00600f6615 100644
--- a/pandas/tests/plotting/frame/test_frame_subplots.py
+++ b/pandas/tests/plotting/frame/test_frame_subplots.py
@@ -1,7 +1,6 @@
""" Test cases for DataFrame.plot """
import string
-import warnings
import numpy as np
import pytest
@@ -336,9 +335,7 @@ def test_subplots_multiple_axes_2_dim(self, layout, exp_layout):
np.random.default_rng(2).random((10, 4)),
index=list(string.ascii_letters[:10]),
)
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", UserWarning)
-
+ with tm.assert_produces_warning(UserWarning):
returned = df.plot(
subplots=True, ax=axes, layout=layout, sharex=False, sharey=False
)
diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index 4491023125fb2..869bf3ace9492 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -5,18 +5,11 @@
from collections.abc import Iterator
from datetime import datetime
from decimal import Decimal
-from warnings import (
- catch_warnings,
- simplefilter,
-)
import numpy as np
import pytest
-from pandas.errors import (
- InvalidIndexError,
- PerformanceWarning,
-)
+from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
import pandas as pd
@@ -531,15 +524,14 @@ def test_concat_no_unnecessary_upcast(dt, frame_or_series):
@pytest.mark.parametrize("pdt", [Series, DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
- with catch_warnings(record=True):
- dims = pdt().ndim
- dfs = [
- pdt(np.array([1], dtype=dt, ndmin=dims)),
- pdt(np.array([np.nan], ndmin=dims)),
- pdt(np.array([5], dtype=dt, ndmin=dims)),
- ]
- x = concat(dfs)
- assert x.values.dtype == "float64"
+ dims = pdt().ndim
+ dfs = [
+ pdt(np.array([1], dtype=dt, ndmin=dims)),
+ pdt(np.array([np.nan], ndmin=dims)),
+ pdt(np.array([5], dtype=dt, ndmin=dims)),
+ ]
+ x = concat(dfs)
+ assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
@@ -596,10 +588,7 @@ def test_duplicate_keys_same_frame():
[(keys[0], "a"), (keys[0], "b"), (keys[1], "a"), (keys[1], "b")]
)
expected = DataFrame(expected_values, columns=expected_columns)
- with catch_warnings():
- # result.columns not sorted, resulting in performance warning
- simplefilter("ignore", PerformanceWarning)
- tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/series/accessors/test_cat_accessor.py b/pandas/tests/series/accessors/test_cat_accessor.py
index 4cb3624309916..2d50b0f36904a 100644
--- a/pandas/tests/series/accessors/test_cat_accessor.py
+++ b/pandas/tests/series/accessors/test_cat_accessor.py
@@ -1,5 +1,3 @@
-import warnings
-
import numpy as np
import pytest
@@ -198,13 +196,18 @@ def test_dt_accessor_api_for_categorical(self, idx):
)
for func, args, kwargs in func_defs:
- with warnings.catch_warnings():
- if func == "to_period":
- # dropping TZ
- warnings.simplefilter("ignore", UserWarning)
- if func == "to_pydatetime":
- # deprecated to return Index[object]
- warnings.simplefilter("ignore", FutureWarning)
+ warn_cls = []
+ if func == "to_period" and getattr(idx, "tz", None) is not None:
+ # dropping TZ
+ warn_cls.append(UserWarning)
+ if func == "to_pydatetime":
+ # deprecated to return Index[object]
+ warn_cls.append(FutureWarning)
+ if warn_cls:
+ warn_cls = tuple(warn_cls)
+ else:
+ warn_cls = None
+ with tm.assert_produces_warning(warn_cls):
res = getattr(cat.dt, func)(*args, **kwargs)
exp = getattr(ser.dt, func)(*args, **kwargs)
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index e61539a9e97a7..17dfa95b66201 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -1,6 +1,5 @@
import operator
import re
-import warnings
import numpy as np
import pytest
@@ -211,9 +210,7 @@ def test_invalid(self):
result = expr._can_use_numexpr(operator.add, "+", array, array2, "evaluate")
assert result
- @pytest.mark.filterwarnings(
- "ignore:invalid value encountered in true_divide:RuntimeWarning"
- )
+ @pytest.mark.filterwarnings("ignore:invalid value encountered in:RuntimeWarning")
@pytest.mark.parametrize(
"opname,op_str",
[("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")],
@@ -232,12 +229,9 @@ def testit():
op = getattr(operator, opname)
- with warnings.catch_warnings():
- # array has 0s
- msg = "invalid value encountered in divide|true_divide"
- warnings.filterwarnings("ignore", msg, RuntimeWarning)
- result = expr.evaluate(op, left, left, use_numexpr=True)
- expected = expr.evaluate(op, left, left, use_numexpr=False)
+ # array has 0s
+ result = expr.evaluate(op, left, left, use_numexpr=True)
+ expected = expr.evaluate(op, left, left, use_numexpr=False)
tm.assert_numpy_array_equal(result, expected)
result = expr._can_use_numexpr(op, op_str, right, right, "evaluate")
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 7e1a23022135e..76784ec726afe 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1,5 +1,4 @@
from functools import partial
-import warnings
import numpy as np
import pytest
@@ -465,17 +464,16 @@ def test_nanmean(self, skipna):
nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False
)
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_nanmedian(self, skipna):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- self.check_funs(
- nanops.nanmedian,
- np.median,
- skipna,
- allow_complex=False,
- allow_date=False,
- allow_obj="convert",
- )
+ self.check_funs(
+ nanops.nanmedian,
+ np.median,
+ skipna,
+ allow_complex=False,
+ allow_date=False,
+ allow_obj="convert",
+ )
@pytest.mark.parametrize("ddof", range(3))
def test_nanvar(self, ddof, skipna):
@@ -517,13 +515,12 @@ def test_nansem(self, ddof, skipna):
ddof=ddof,
)
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
@pytest.mark.parametrize(
"nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)]
)
def test_nanops_with_warnings(self, nan_op, np_op, skipna):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- self.check_funs(nan_op, np_op, skipna, allow_obj=False)
+ self.check_funs(nan_op, np_op, skipna, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
@@ -540,17 +537,15 @@ def _argminmax_wrap(self, value, axis=None, func=None):
res = -1
return res
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_nanargmax(self, skipna):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- func = partial(self._argminmax_wrap, func=np.argmax)
- self.check_funs(nanops.nanargmax, func, skipna, allow_obj=False)
+ func = partial(self._argminmax_wrap, func=np.argmax)
+ self.check_funs(nanops.nanargmax, func, skipna, allow_obj=False)
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_nanargmin(self, skipna):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- func = partial(self._argminmax_wrap, func=np.argmin)
- self.check_funs(nanops.nanargmin, func, skipna, allow_obj=False)
+ func = partial(self._argminmax_wrap, func=np.argmin)
+ self.check_funs(nanops.nanargmin, func, skipna, allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
diff --git a/pandas/tests/util/test_deprecate_nonkeyword_arguments.py b/pandas/tests/util/test_deprecate_nonkeyword_arguments.py
index 2ea3dae19a3e4..e74ff89b11581 100644
--- a/pandas/tests/util/test_deprecate_nonkeyword_arguments.py
+++ b/pandas/tests/util/test_deprecate_nonkeyword_arguments.py
@@ -3,7 +3,6 @@
"""
import inspect
-import warnings
from pandas.util._decorators import deprecate_nonkeyword_arguments
@@ -52,16 +51,12 @@ def test_four_arguments():
def test_three_arguments_with_name_in_warning():
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ msg = (
+ "Starting with pandas version 1.1 all arguments of f_add_inputs "
+ "except for the arguments 'a' and 'b' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
assert f(6, 3, 3) == 12
- assert len(w) == 1
- for actual_warning in w:
- assert actual_warning.category == FutureWarning
- assert str(actual_warning.message) == (
- "Starting with pandas version 1.1 all arguments of f_add_inputs "
- "except for the arguments 'a' and 'b' will be keyword-only."
- )
@deprecate_nonkeyword_arguments(version="1.1")
@@ -85,16 +80,12 @@ def test_three_arguments_default_allowed_args():
def test_three_positional_argument_with_warning_message_analysis():
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ msg = (
+ "Starting with pandas version 1.1 all arguments of g "
+ "except for the argument 'a' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
assert g(6, 3, 3) == 12
- assert len(w) == 1
- for actual_warning in w:
- assert actual_warning.category == FutureWarning
- assert str(actual_warning.message) == (
- "Starting with pandas version 1.1 all arguments of g "
- "except for the argument 'a' will be keyword-only."
- )
@deprecate_nonkeyword_arguments(version="1.1")
@@ -117,16 +108,9 @@ def test_one_positional_argument():
def test_one_positional_argument_with_warning_message_analysis():
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
+ msg = "Starting with pandas version 1.1 all arguments of h will be keyword-only."
+ with tm.assert_produces_warning(FutureWarning, match=msg):
assert h(19) == 19
- assert len(w) == 1
- for actual_warning in w:
- assert actual_warning.category == FutureWarning
- assert str(actual_warning.message) == (
- "Starting with pandas version 1.1 all arguments "
- "of h will be keyword-only."
- )
@deprecate_nonkeyword_arguments(version="1.1")
diff --git a/pandas/tests/window/test_apply.py b/pandas/tests/window/test_apply.py
index d2114c425218c..6af5a41e96e0a 100644
--- a/pandas/tests/window/test_apply.py
+++ b/pandas/tests/window/test_apply.py
@@ -1,5 +1,3 @@
-import warnings
-
import numpy as np
import pytest
@@ -18,17 +16,15 @@
from pandas.tseries import offsets
+# suppress warnings about empty slices, as we are deliberately testing
+# with a 0-length Series
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:.*(empty slice|0 for slice).*:RuntimeWarning"
+)
+
def f(x):
- # suppress warnings about empty slices, as we are deliberately testing
- # with a 0-length Series
- with warnings.catch_warnings():
- warnings.filterwarnings(
- "ignore",
- message=".*(empty slice|0 for slice).*",
- category=RuntimeWarning,
- )
- return x[np.isfinite(x)].mean()
+ return x[np.isfinite(x)].mean()
@pytest.mark.parametrize("bad_raw", [None, 1, 0])
diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py
index ec794e818edf1..8dac6d271510a 100644
--- a/pandas/tests/window/test_pairwise.py
+++ b/pandas/tests/window/test_pairwise.py
@@ -1,5 +1,3 @@
-import warnings
-
import numpy as np
import pytest
@@ -325,6 +323,7 @@ def test_pairwise_with_other(
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
+ @pytest.mark.filterwarnings("ignore:RuntimeWarning")
@pytest.mark.parametrize(
"f",
[
@@ -344,13 +343,11 @@ def test_no_pairwise_with_other(self, pairwise_frames, pairwise_other_frame, f):
else None
)
if result is not None:
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- # we can have int and str columns
- expected_index = pairwise_frames.index.union(pairwise_other_frame.index)
- expected_columns = pairwise_frames.columns.union(
- pairwise_other_frame.columns
- )
+ # we can have int and str columns
+ expected_index = pairwise_frames.index.union(pairwise_other_frame.index)
+ expected_columns = pairwise_frames.columns.union(
+ pairwise_other_frame.columns
+ )
tm.assert_index_equal(result.index, expected_index)
tm.assert_index_equal(result.columns, expected_columns)
else:
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/54390 | 2023-08-03T19:30:01Z | 2023-08-05T18:38:26Z | 2023-08-05T18:38:26Z | 2023-08-05T18:38:29Z |
bug fix 54251 | diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index bc187361493c0..914f6d4d9b566 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -487,6 +487,7 @@ Conversion
- Bug in :meth:`DataFrame.info` raising ``ValueError`` when ``use_numba`` is set (:issue:`51922`)
- Bug in :meth:`DataFrame.insert` raising ``TypeError`` if ``loc`` is ``np.int64`` (:issue:`53193`)
- Bug in :meth:`HDFStore.select` loses precision of large int when stored and retrieved (:issue:`54186`)
+- Bug in :meth:`Series.astype` not supporting ``object_`` (:issue:`54251`)
Strings
^^^^^^^
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index a39dafc64c42b..a0feb49f47c4e 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1634,7 +1634,13 @@ def pandas_dtype(dtype) -> DtypeObj:
# also catch some valid dtypes such as object, np.object_ and 'object'
# which we safeguard against by catching them earlier and returning
# np.dtype(valid_dtype) before this condition is evaluated.
- if is_hashable(dtype) and dtype in [object, np.object_, "object", "O"]:
+ if is_hashable(dtype) and dtype in [
+ object,
+ np.object_,
+ "object",
+ "O",
+ "object_",
+ ]:
# check hashability to avoid errors/DeprecationWarning when we get
# here and `dtype` is an array
return npdtype
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index d0a34cedb7dbe..60047cd3cc8fc 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -53,6 +53,7 @@ def test_invalid_dtype_error(self, box):
np.float64,
float,
np.dtype("float64"),
+ "object_",
],
)
def test_pandas_dtype_valid(self, dtype):
| - [x] closes #54251
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.1.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/54388 | 2023-08-03T17:21:42Z | 2023-08-04T16:52:28Z | 2023-08-04T16:52:28Z | 2023-08-04T16:52:34Z |
TST: Use importorskip over skip_if_no_scipy | diff --git a/pandas/conftest.py b/pandas/conftest.py
index a55aea369e0ed..1dcf413f2edf6 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1868,7 +1868,7 @@ def spmatrix(request):
"""
Yields scipy sparse matrix classes.
"""
- from scipy import sparse
+ sparse = pytest.importorskip("scipy.sparse")
return getattr(sparse, request.param + "_matrix")
diff --git a/pandas/tests/arrays/sparse/test_accessor.py b/pandas/tests/arrays/sparse/test_accessor.py
index e782b148803d7..87eb7bcfa9cee 100644
--- a/pandas/tests/arrays/sparse/test_accessor.py
+++ b/pandas/tests/arrays/sparse/test_accessor.py
@@ -3,8 +3,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import SparseDtype
import pandas._testing as tm
@@ -46,7 +44,6 @@ def test_from_coo(self):
expected = pd.Series([4, 9, 7, 5], index=index, dtype="Sparse[int]")
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
@pytest.mark.parametrize(
"sort_labels, expected_rows, expected_cols, expected_values_pos",
[
@@ -67,7 +64,7 @@ def test_from_coo(self):
def test_to_coo(
self, sort_labels, expected_rows, expected_cols, expected_values_pos
):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
values = SparseArray([0, np.nan, 1, 0, None, 3], fill_value=0)
index = pd.MultiIndex.from_tuples(
@@ -89,7 +86,7 @@ def test_to_coo(
A, rows, cols = ss.sparse.to_coo(
row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels
)
- assert isinstance(A, scipy.sparse.coo_matrix)
+ assert isinstance(A, sp_sparse.coo_matrix)
tm.assert_numpy_array_equal(A.toarray(), expected_A)
assert rows == expected_rows
assert cols == expected_cols
@@ -109,13 +106,12 @@ def test_accessor_raises(self):
@pytest.mark.parametrize("format", ["csc", "csr", "coo"])
@pytest.mark.parametrize("labels", [None, list(string.ascii_letters[:10])])
@pytest.mark.parametrize("dtype", ["float64", "int64"])
- @td.skip_if_no_scipy
def test_from_spmatrix(self, format, labels, dtype):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
sp_dtype = SparseDtype(dtype, np.array(0, dtype=dtype).item())
- mat = scipy.sparse.eye(10, format=format, dtype=dtype)
+ mat = sp_sparse.eye(10, format=format, dtype=dtype)
result = pd.DataFrame.sparse.from_spmatrix(mat, index=labels, columns=labels)
expected = pd.DataFrame(
np.eye(10, dtype=dtype), index=labels, columns=labels
@@ -123,11 +119,10 @@ def test_from_spmatrix(self, format, labels, dtype):
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("format", ["csc", "csr", "coo"])
- @td.skip_if_no_scipy
def test_from_spmatrix_including_explicit_zero(self, format):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
- mat = scipy.sparse.random(10, 2, density=0.5, format=format)
+ mat = sp_sparse.random(10, 2, density=0.5, format=format)
mat.data[0] = 0
result = pd.DataFrame.sparse.from_spmatrix(mat)
dtype = SparseDtype("float64", 0.0)
@@ -138,13 +133,12 @@ def test_from_spmatrix_including_explicit_zero(self, format):
"columns",
[["a", "b"], pd.MultiIndex.from_product([["A"], ["a", "b"]]), ["a", "a"]],
)
- @td.skip_if_no_scipy
def test_from_spmatrix_columns(self, columns):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
dtype = SparseDtype("float64", 0.0)
- mat = scipy.sparse.random(10, 2, density=0.5)
+ mat = sp_sparse.random(10, 2, density=0.5)
result = pd.DataFrame.sparse.from_spmatrix(mat, columns=columns)
expected = pd.DataFrame(mat.toarray(), columns=columns).astype(dtype)
tm.assert_frame_equal(result, expected)
@@ -152,20 +146,19 @@ def test_from_spmatrix_columns(self, columns):
@pytest.mark.parametrize(
"colnames", [("A", "B"), (1, 2), (1, pd.NA), (0.1, 0.2), ("x", "x"), (0, 0)]
)
- @td.skip_if_no_scipy
def test_to_coo(self, colnames):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
df = pd.DataFrame(
{colnames[0]: [0, 1, 0], colnames[1]: [1, 0, 0]}, dtype="Sparse[int64, 0]"
)
result = df.sparse.to_coo()
- expected = scipy.sparse.coo_matrix(np.asarray(df))
+ expected = sp_sparse.coo_matrix(np.asarray(df))
assert (result != expected).nnz == 0
@pytest.mark.parametrize("fill_value", [1, np.nan])
- @td.skip_if_no_scipy
def test_to_coo_nonzero_fill_val_raises(self, fill_value):
+ pytest.importorskip("scipy")
df = pd.DataFrame(
{
"A": SparseArray(
@@ -179,10 +172,9 @@ def test_to_coo_nonzero_fill_val_raises(self, fill_value):
with pytest.raises(ValueError, match="fill value must be 0"):
df.sparse.to_coo()
- @td.skip_if_no_scipy
def test_to_coo_midx_categorical(self):
# GH#50996
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
midx = pd.MultiIndex.from_arrays(
[
@@ -193,7 +185,7 @@ def test_to_coo_midx_categorical(self):
ser = pd.Series(1, index=midx, dtype="Sparse[int]")
result = ser.sparse.to_coo(row_levels=["x"], column_levels=["y"])[0]
- expected = scipy.sparse.coo_matrix(
+ expected = sp_sparse.coo_matrix(
(np.array([1, 1]), (np.array([0, 1]), np.array([0, 1]))), shape=(2, 2)
)
assert (result != expected).nnz == 0
@@ -226,11 +218,10 @@ def test_density(self):
@pytest.mark.parametrize("dtype", ["int64", "float64"])
@pytest.mark.parametrize("dense_index", [True, False])
- @td.skip_if_no_scipy
def test_series_from_coo(self, dtype, dense_index):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
- A = scipy.sparse.eye(3, format="coo", dtype=dtype)
+ A = sp_sparse.eye(3, format="coo", dtype=dtype)
result = pd.Series.sparse.from_coo(A, dense_index=dense_index)
index = pd.MultiIndex.from_tuples(
@@ -246,12 +237,11 @@ def test_series_from_coo(self, dtype, dense_index):
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
def test_series_from_coo_incorrect_format_raises(self):
# gh-26554
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
- m = scipy.sparse.csr_matrix(np.array([[0, 1], [0, 0]]))
+ m = sp_sparse.csr_matrix(np.array([[0, 1], [0, 0]]))
with pytest.raises(
TypeError, match="Expected coo_matrix. Got csr_matrix instead."
):
diff --git a/pandas/tests/arrays/sparse/test_constructors.py b/pandas/tests/arrays/sparse/test_constructors.py
index efe60fe3c7a62..2831c8abdaf13 100644
--- a/pandas/tests/arrays/sparse/test_constructors.py
+++ b/pandas/tests/arrays/sparse/test_constructors.py
@@ -2,7 +2,6 @@
import pytest
from pandas._libs.sparse import IntIndex
-import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
@@ -188,11 +187,10 @@ def test_constructor_inferred_fill_value(self, data, fill_value):
@pytest.mark.parametrize("format", ["coo", "csc", "csr"])
@pytest.mark.parametrize("size", [0, 10])
- @td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
- mat = scipy.sparse.random(size, 1, density=0.5, format=format)
+ mat = sp_sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
@@ -200,11 +198,10 @@ def test_from_spmatrix(self, size, format):
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("format", ["coo", "csc", "csr"])
- @td.skip_if_no_scipy
def test_from_spmatrix_including_explicit_zero(self, format):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
- mat = scipy.sparse.random(10, 1, density=0.5, format=format)
+ mat = sp_sparse.random(10, 1, density=0.5, format=format)
mat.data[0] = 0
result = SparseArray.from_spmatrix(mat)
@@ -212,11 +209,10 @@ def test_from_spmatrix_including_explicit_zero(self, format):
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
- @td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
- mat = scipy.sparse.eye(5, 4, format="csc")
+ mat = sp_sparse.eye(5, 4, format="csc")
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index d0a34cedb7dbe..498c3776bca20 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -210,11 +210,10 @@ def test_is_sparse(check_scipy):
assert not com.is_sparse(scipy.sparse.bsr_matrix([1, 2, 3]))
-@td.skip_if_no_scipy
def test_is_scipy_sparse():
- from scipy.sparse import bsr_matrix
+ sp_sparse = pytest.importorskip("scipy.sparse")
- assert com.is_scipy_sparse(bsr_matrix([1, 2, 3]))
+ assert com.is_scipy_sparse(sp_sparse.bsr_matrix([1, 2, 3]))
assert not com.is_scipy_sparse(SparseArray([1, 2, 3]))
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 78f960f4d46d5..375003e58c21a 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -33,7 +33,6 @@
missing as libmissing,
ops as libops,
)
-import pandas.util._test_decorators as td
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
@@ -1970,9 +1969,9 @@ def test_nan_to_nat_conversions():
assert s[8] is pd.NaT
-@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scipy_sparse(spmatrix):
+ pytest.importorskip("scipy")
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index 59c195a65f89e..f61cc2d76845c 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -104,8 +104,8 @@ class TestDataFrameCorr:
# DataFrame.corr(), as opposed to DataFrame.corrwith
@pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"])
- @td.skip_if_no_scipy
def test_corr_scipy_method(self, float_frame, method):
+ pytest.importorskip("scipy")
float_frame.loc[float_frame.index[:5], "A"] = np.nan
float_frame.loc[float_frame.index[5:10], "B"] = np.nan
float_frame.loc[float_frame.index[:10], "A"] = float_frame["A"][10:20]
@@ -123,10 +123,10 @@ def test_corr_non_numeric(self, float_string_frame):
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].corr()
tm.assert_frame_equal(result, expected)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
def test_corr_nooverlap(self, meth):
# nothing in common
+ pytest.importorskip("scipy")
df = DataFrame(
{
"A": [1, 1.5, 1, np.nan, np.nan, np.nan],
@@ -153,12 +153,12 @@ def test_corr_constant(self, meth):
rs = df.corr(meth)
assert isna(rs.values).all()
- @td.skip_if_no_scipy
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
def test_corr_int_and_boolean(self, meth):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
+ pytest.importorskip("scipy")
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
@@ -193,7 +193,6 @@ def test_corr_int(self):
df.cov()
df.corr()
- @td.skip_if_no_scipy
@pytest.mark.parametrize(
"nullable_column", [pd.array([1, 2, 3]), pd.array([1, 2, None])]
)
@@ -204,6 +203,7 @@ def test_corr_int(self):
@pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
def test_corr_nullable_integer(self, nullable_column, other_column, method):
# https://github.com/pandas-dev/pandas/issues/33803
+ pytest.importorskip("scipy")
data = DataFrame({"a": nullable_column, "b": other_column})
result = data.corr(method=method)
expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"])
@@ -249,9 +249,9 @@ def test_calc_corr_small_numbers(self):
expected = DataFrame({"A": [1.0, 1.0], "B": [1.0, 1.0]}, index=["A", "B"])
tm.assert_frame_equal(result, expected)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
def test_corr_min_periods_greater_than_length(self, method):
+ pytest.importorskip("scipy")
df = DataFrame({"A": [1, 2], "B": [1, 2]})
result = df.corr(method=method, min_periods=3)
expected = DataFrame(
@@ -259,13 +259,13 @@ def test_corr_min_periods_greater_than_length(self, method):
)
tm.assert_frame_equal(result, expected)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_corr_numeric_only(self, meth, numeric_only):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
+ pytest.importorskip("scipy")
df = DataFrame({"a": [1, 0], "b": [1, 0], "c": ["x", "y"]})
expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
if numeric_only:
@@ -422,25 +422,25 @@ def test_corr_numerical_instabilities(self):
expected = DataFrame({0: [1.0, -1.0], 1: [-1.0, 1.0]})
tm.assert_frame_equal(result - 1, expected - 1, atol=1e-17)
- @td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH#21925
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH#21925
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
def test_corrwith_spearman_with_tied_data(self):
# GH#48826
+ pytest.importorskip("scipy")
df1 = DataFrame(
{
"A": [1, np.nan, 7, 8],
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index 0a94f986c0d88..fa7a49c164913 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -202,8 +202,8 @@ def test_interp_nan_idx(self):
with pytest.raises(NotImplementedError, match=msg):
df.interpolate(method="values")
- @td.skip_if_no_scipy
def test_interp_various(self):
+ pytest.importorskip("scipy")
df = DataFrame(
{"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
)
@@ -241,8 +241,8 @@ def test_interp_various(self):
expected.loc[13, "A"] = 5
tm.assert_frame_equal(result, expected, check_dtype=False)
- @td.skip_if_no_scipy
def test_interp_alt_scipy(self):
+ pytest.importorskip("scipy")
df = DataFrame(
{"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
)
diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py
index efe4b17f95ce8..8b451c84dc5da 100644
--- a/pandas/tests/frame/methods/test_rank.py
+++ b/pandas/tests/frame/methods/test_rank.py
@@ -10,7 +10,6 @@
Infinity,
NegInfinity,
)
-import pandas.util._test_decorators as td
from pandas import (
DataFrame,
@@ -38,10 +37,8 @@ def method(self, request):
"""
return request.param
- @td.skip_if_no_scipy
def test_rank(self, float_frame):
- import scipy.stats # noqa: F401
- from scipy.stats import rankdata
+ sp_stats = pytest.importorskip("scipy.stats")
float_frame.loc[::2, "A"] = np.nan
float_frame.loc[::3, "B"] = np.nan
@@ -54,10 +51,10 @@ def test_rank(self, float_frame):
fvals = float_frame.fillna(np.inf).values
- exp0 = np.apply_along_axis(rankdata, 0, fvals)
+ exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fvals)
exp0[mask] = np.nan
- exp1 = np.apply_along_axis(rankdata, 1, fvals)
+ exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fvals)
exp1[mask] = np.nan
tm.assert_almost_equal(ranks0.values, exp0)
@@ -144,10 +141,8 @@ def test_rank_mixed_frame(self, float_string_frame):
with pytest.raises(TypeError, match="not supported between instances of"):
float_string_frame.rank(axis=1)
- @td.skip_if_no_scipy
def test_rank_na_option(self, float_frame):
- import scipy.stats # noqa: F401
- from scipy.stats import rankdata
+ sp_stats = pytest.importorskip("scipy.stats")
float_frame.loc[::2, "A"] = np.nan
float_frame.loc[::3, "B"] = np.nan
@@ -160,8 +155,8 @@ def test_rank_na_option(self, float_frame):
fvals = float_frame.fillna(np.inf).values
- exp0 = np.apply_along_axis(rankdata, 0, fvals)
- exp1 = np.apply_along_axis(rankdata, 1, fvals)
+ exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fvals)
+ exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
@@ -175,8 +170,8 @@ def test_rank_na_option(self, float_frame):
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
- exp0 = np.apply_along_axis(rankdata, 0, fval0)
- exp1 = np.apply_along_axis(rankdata, 1, fval1)
+ exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fval0)
+ exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fval1)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
@@ -189,8 +184,8 @@ def test_rank_na_option(self, float_frame):
fvals = float_frame.fillna(np.inf).values
- exp0 = np.apply_along_axis(rankdata, 0, -fvals)
- exp1 = np.apply_along_axis(rankdata, 1, -fvals)
+ exp0 = np.apply_along_axis(sp_stats.rankdata, 0, -fvals)
+ exp1 = np.apply_along_axis(sp_stats.rankdata, 1, -fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
@@ -206,8 +201,8 @@ def test_rank_na_option(self, float_frame):
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
- exp0 = np.apply_along_axis(rankdata, 0, -fval0)
- exp1 = np.apply_along_axis(rankdata, 1, -fval1)
+ exp0 = np.apply_along_axis(sp_stats.rankdata, 0, -fval0)
+ exp1 = np.apply_along_axis(sp_stats.rankdata, 1, -fval1)
tm.assert_numpy_array_equal(ranks0.values, exp0)
tm.assert_numpy_array_equal(ranks1.values, exp1)
@@ -228,10 +223,10 @@ def test_rank_axis(self):
tm.assert_frame_equal(df.rank(axis=0), df.rank(axis="index"))
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis="columns"))
- @td.skip_if_no_scipy
- def test_rank_methods_frame(self):
- import scipy.stats # noqa: F401
- from scipy.stats import rankdata
+ @pytest.mark.parametrize("ax", [0, 1])
+ @pytest.mark.parametrize("m", ["average", "min", "max", "first", "dense"])
+ def test_rank_methods_frame(self, ax, m):
+ sp_stats = pytest.importorskip("scipy.stats")
xs = np.random.default_rng(2).integers(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
@@ -240,15 +235,13 @@ def test_rank_methods_frame(self):
for vals in [xs, xs + 1e6, xs * 1e-6]:
df = DataFrame(vals, columns=cols)
- for ax in [0, 1]:
- for m in ["average", "min", "max", "first", "dense"]:
- result = df.rank(axis=ax, method=m)
- sprank = np.apply_along_axis(
- rankdata, ax, vals, m if m != "first" else "ordinal"
- )
- sprank = sprank.astype(np.float64)
- expected = DataFrame(sprank, columns=cols).astype("float64")
- tm.assert_frame_equal(result, expected)
+ result = df.rank(axis=ax, method=m)
+ sprank = np.apply_along_axis(
+ sp_stats.rankdata, ax, vals, m if m != "first" else "ordinal"
+ )
+ sprank = sprank.astype(np.float64)
+ expected = DataFrame(sprank, columns=cols).astype("float64")
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
def test_rank_descending(self, method, dtype):
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 77ea6885792b4..3768298156550 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -280,21 +280,18 @@ def sem(x):
check_dates=True,
)
- @td.skip_if_no_scipy
def test_stat_op_calc_skew_kurtosis(self, float_frame_with_na):
- def skewness(x):
- from scipy.stats import skew
+ sp_stats = pytest.importorskip("scipy.stats")
+ def skewness(x):
if len(x) < 3:
return np.nan
- return skew(x, bias=False)
+ return sp_stats.skew(x, bias=False)
def kurt(x):
- from scipy.stats import kurtosis
-
if len(x) < 4:
return np.nan
- return kurtosis(x, bias=False)
+ return sp_stats.kurtosis(x, bias=False)
assert_stat_op_calc("skew", skewness, float_frame_with_na)
assert_stat_op_calc("kurt", kurt, float_frame_with_na)
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index 3ef012183ef26..37ca52eba6451 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -1,8 +1,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import (
DataFrame,
@@ -655,8 +653,8 @@ def test_memory_usage(self):
result = df.memory_usage(index=False)
assert isinstance(result, tm.SubclassedSeries)
- @td.skip_if_no_scipy
def test_corrwith(self):
+ pytest.importorskip("scipy")
index = ["a", "b", "c", "d", "e"]
columns = ["one", "two", "three", "four"]
df1 = tm.SubclassedDataFrame(
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 28d235812de27..8524529ecc4b0 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1292,11 +1292,10 @@ def test_loc_getitem_time_object(self, frame_or_series):
@pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"])
@pytest.mark.parametrize("dtype", [np.int64, np.float64, complex])
- @td.skip_if_no_scipy
def test_loc_getitem_range_from_spmatrix(self, spmatrix_t, dtype):
- import scipy.sparse
+ sp_sparse = pytest.importorskip("scipy.sparse")
- spmatrix_t = getattr(scipy.sparse, spmatrix_t)
+ spmatrix_t = getattr(sp_sparse, spmatrix_t)
# The bug is triggered by a sparse matrix with purely sparse columns. So the
# recipe below generates a rectangular matrix of dimension (5, 7) where all the
@@ -1321,12 +1320,11 @@ def test_loc_getitem_listlike_all_retains_sparse(self):
result = df.loc[[0, 1]]
tm.assert_frame_equal(result, df)
- @td.skip_if_no_scipy
def test_loc_getitem_sparse_frame(self):
# GH34687
- from scipy.sparse import eye
+ sp_sparse = pytest.importorskip("scipy.sparse")
- df = DataFrame.sparse.from_spmatrix(eye(5))
+ df = DataFrame.sparse.from_spmatrix(sp_sparse.eye(5))
result = df.loc[range(2)]
expected = DataFrame(
[[1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0]],
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index 4b0705adc2d9e..c18e995d26d78 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -13,8 +13,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
from pandas.core.dtypes.api import is_list_like
import pandas as pd
@@ -1060,22 +1058,22 @@ def test_boxplot_return_type_invalid_type(self, return_type):
result = df.plot.box(return_type=return_type)
_check_box_return_type(result, return_type)
- @td.skip_if_no_scipy
def test_kde_df(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((100, 4)))
ax = _check_plot_works(df.plot, kind="kde")
expected = [pprint_thing(c) for c in df.columns]
_check_legend_labels(ax, labels=expected)
_check_ticks_props(ax, xrot=0)
- @td.skip_if_no_scipy
def test_kde_df_rot(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
ax = df.plot(kind="kde", rot=20, fontsize=5)
_check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
- @td.skip_if_no_scipy
def test_kde_df_subplots(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
axes = _check_plot_works(
df.plot,
@@ -1085,14 +1083,14 @@ def test_kde_df_subplots(self):
)
_check_axes_shape(axes, axes_num=4, layout=(4, 1))
- @td.skip_if_no_scipy
def test_kde_df_logy(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
axes = df.plot(kind="kde", logy=True, subplots=True)
_check_ax_scales(axes, yaxis="log")
- @td.skip_if_no_scipy
def test_kde_missing_vals(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind="kde")
@@ -1376,16 +1374,16 @@ def test_unordered_ts(self):
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
- @td.skip_if_no_scipy
@pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
def test_kind_both_ways(self, kind):
+ pytest.importorskip("scipy")
df = DataFrame({"x": [1, 2, 3]})
df.plot(kind=kind)
getattr(df.plot, kind)()
- @td.skip_if_no_scipy
@pytest.mark.parametrize("kind", ["scatter", "hexbin"])
def test_kind_both_ways_x_y(self, kind):
+ pytest.importorskip("scipy")
df = DataFrame({"x": [1, 2, 3]})
df.plot("x", "x", kind=kind)
getattr(df.plot, kind)("x", "x")
@@ -2029,10 +2027,10 @@ def test_sharey_and_ax_tight(self):
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds)
def test_memory_leak(self, kind):
"""Check that every plot type gets properly collected."""
+ pytest.importorskip("scipy")
args = {}
if kind in ["hexbin", "scatter", "pie"]:
df = DataFrame(
@@ -2347,11 +2345,11 @@ def test_plot_no_numeric_data(self):
with pytest.raises(TypeError, match="no numeric data to plot"):
df.plot()
- @td.skip_if_no_scipy
@pytest.mark.parametrize(
"kind", ("line", "bar", "barh", "hist", "kde", "density", "area", "pie")
)
def test_group_subplot(self, kind):
+ pytest.importorskip("scipy")
d = {
"a": np.arange(10),
"b": np.arange(10) + 1,
diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py
index 1cbebc11bb615..ff1edd323ef28 100644
--- a/pandas/tests/plotting/frame/test_frame_color.py
+++ b/pandas/tests/plotting/frame/test_frame_color.py
@@ -4,8 +4,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
@@ -447,24 +445,24 @@ def test_hist_colors_single_color(self):
ax = df.plot(kind="hist", color="green")
_check_colors(ax.patches[::10], facecolors=["green"] * 5)
- @td.skip_if_no_scipy
def test_kde_colors(self):
+ pytest.importorskip("scipy")
custom_colors = "rgcby"
df = DataFrame(np.random.default_rng(2).random((5, 5)))
ax = df.plot.kde(color=custom_colors)
_check_colors(ax.get_lines(), linecolors=custom_colors)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("colormap", ["jet", cm.jet])
def test_kde_colors_cmap(self, colormap):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
ax = df.plot.kde(colormap=colormap)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
_check_colors(ax.get_lines(), linecolors=rgba_colors)
- @td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
+ pytest.importorskip("scipy")
default_colors = _unpack_cycler(mpl.pyplot.rcParams)
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
@@ -473,41 +471,41 @@ def test_kde_colors_and_styles_subplots(self):
for ax, c in zip(axes, list(default_colors)):
_check_colors(ax.get_lines(), linecolors=[c])
- @td.skip_if_no_scipy
@pytest.mark.parametrize("colormap", ["k", "red"])
def test_kde_colors_and_styles_subplots_single_col_str(self, colormap):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
axes = df.plot(kind="kde", color=colormap, subplots=True)
for ax in axes:
_check_colors(ax.get_lines(), linecolors=[colormap])
- @td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots_custom_color(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
custom_colors = "rgcby"
axes = df.plot(kind="kde", color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
_check_colors(ax.get_lines(), linecolors=[c])
- @td.skip_if_no_scipy
@pytest.mark.parametrize("colormap", ["jet", cm.jet])
def test_kde_colors_and_styles_subplots_cmap(self, colormap):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
axes = df.plot(kind="kde", colormap=colormap, subplots=True)
for ax, c in zip(axes, rgba_colors):
_check_colors(ax.get_lines(), linecolors=[c])
- @td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots_single_col(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True)
_check_colors(axes[0].lines, linecolors=["DodgerBlue"])
- @td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots_single_char(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
# list of styles
# single character style
@@ -515,8 +513,8 @@ def test_kde_colors_and_styles_subplots_single_char(self):
for ax in axes:
_check_colors(ax.get_lines(), linecolors=["r"])
- @td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots_list(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
# list of styles
styles = list("rgcby")
diff --git a/pandas/tests/plotting/frame/test_frame_legend.py b/pandas/tests/plotting/frame/test_frame_legend.py
index 2590c41664c4b..d2924930667b6 100644
--- a/pandas/tests/plotting/frame/test_frame_legend.py
+++ b/pandas/tests/plotting/frame/test_frame_legend.py
@@ -59,9 +59,9 @@ def test_legend_false(self):
expected = ["blue", "green", "red"]
assert result == expected
- @td.skip_if_no_scipy
@pytest.mark.parametrize("kind", ["line", "bar", "barh", "kde", "area", "hist"])
def test_df_legend_labels(self, kind):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
df2 = DataFrame(
np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
@@ -86,8 +86,8 @@ def test_df_legend_labels(self, kind):
expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
_check_legend_labels(ax, labels=expected)
- @td.skip_if_no_scipy
def test_df_legend_labels_secondary_y(self):
+ pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
df2 = DataFrame(
np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
@@ -103,9 +103,9 @@ def test_df_legend_labels_secondary_y(self):
ax = df3.plot(kind="bar", legend=True, secondary_y="h", ax=ax)
_check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"])
- @td.skip_if_no_scipy
def test_df_legend_labels_time_series(self):
# Time Series
+ pytest.importorskip("scipy")
ind = date_range("1/1/2014", periods=3)
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
@@ -129,9 +129,9 @@ def test_df_legend_labels_time_series(self):
ax = df3.plot(legend=True, ax=ax)
_check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"])
- @td.skip_if_no_scipy
def test_df_legend_labels_time_series_scatter(self):
# Time Series
+ pytest.importorskip("scipy")
ind = date_range("1/1/2014", periods=3)
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
@@ -156,8 +156,8 @@ def test_df_legend_labels_time_series_scatter(self):
ax = df3.plot.scatter(x="g", y="h", label="data3", ax=ax)
_check_legend_labels(ax, labels=["data1", "data3"])
- @td.skip_if_no_scipy
def test_df_legend_labels_time_series_no_mutate(self):
+ pytest.importorskip("scipy")
ind = date_range("1/1/2014", periods=3)
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 323b526028e32..765e587af75ea 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -14,7 +14,6 @@
BaseOffset,
to_offset,
)
-import pandas.util._test_decorators as td
from pandas import (
DataFrame,
@@ -695,8 +694,8 @@ def test_secondary_y_ts_visible(self):
ax = ser2.plot()
assert ax.get_yaxis().get_visible()
- @td.skip_if_no_scipy
def test_secondary_kde(self):
+ pytest.importorskip("scipy")
ser = Series(np.random.default_rng(2).standard_normal(10))
fig, ax = mpl.pyplot.subplots()
ax = ser.plot(secondary_y=True, kind="density", ax=ax)
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 6bab3d910d879..e38cd696a2d90 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -4,8 +4,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
from pandas import (
DataFrame,
Index,
@@ -202,8 +200,8 @@ def test_hist_kwargs_align(self, ts):
ax = ts.plot.hist(align="left", stacked=True, ax=ax)
@pytest.mark.xfail(reason="Api changed in 3.6.0")
- @td.skip_if_no_scipy
def test_hist_kde(self, ts):
+ pytest.importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.hist(logy=True, ax=ax)
_check_ax_scales(ax, yaxis="log")
@@ -213,17 +211,17 @@ def test_hist_kde(self, ts):
ylabels = ax.get_yticklabels()
_check_text_labels(ylabels, [""] * len(ylabels))
- @td.skip_if_no_scipy
def test_hist_kde_plot_works(self, ts):
+ pytest.importorskip("scipy")
_check_plot_works(ts.plot.kde)
- @td.skip_if_no_scipy
def test_hist_kde_density_works(self, ts):
+ pytest.importorskip("scipy")
_check_plot_works(ts.plot.density)
@pytest.mark.xfail(reason="Api changed in 3.6.0")
- @td.skip_if_no_scipy
def test_hist_kde_logy(self, ts):
+ pytest.importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.kde(logy=True, ax=ax)
_check_ax_scales(ax, yaxis="log")
@@ -232,16 +230,16 @@ def test_hist_kde_logy(self, ts):
ylabels = ax.get_yticklabels()
_check_text_labels(ylabels, [""] * len(ylabels))
- @td.skip_if_no_scipy
def test_hist_kde_color_bins(self, ts):
+ pytest.importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.hist(logy=True, bins=10, color="b", ax=ax)
_check_ax_scales(ax, yaxis="log")
assert len(ax.patches) == 10
_check_colors(ax.patches, facecolors=["b"] * 10)
- @td.skip_if_no_scipy
def test_hist_kde_color(self, ts):
+ pytest.importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.kde(logy=True, color="r", ax=ax)
_check_ax_scales(ax, yaxis="log")
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index ff2058a4c4f09..a5145472203a3 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -97,9 +97,9 @@ def test_bootstrap_plot(self):
class TestDataFramePlots:
- @td.skip_if_no_scipy
@pytest.mark.parametrize("pass_axis", [False, True])
def test_scatter_matrix_axis(self, pass_axis):
+ pytest.importorskip("scipy")
scatter_matrix = plotting.scatter_matrix
ax = None
@@ -122,9 +122,9 @@ def test_scatter_matrix_axis(self, pass_axis):
_check_text_labels(axes0_labels, expected)
_check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("pass_axis", [False, True])
def test_scatter_matrix_axis_smaller(self, pass_axis):
+ pytest.importorskip("scipy")
scatter_matrix = plotting.scatter_matrix
ax = None
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 8b8a69a88ca35..6f0afab53c267 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -556,7 +556,6 @@ def test_plot_fails_with_dupe_color_and_style(self):
with pytest.raises(ValueError, match=msg):
x.plot(style="k--", color="k", ax=ax)
- @td.skip_if_no_scipy
@pytest.mark.parametrize(
"bw_method, ind",
[
@@ -567,23 +566,24 @@ def test_plot_fails_with_dupe_color_and_style(self):
],
)
def test_kde_kwargs(self, ts, bw_method, ind):
+ pytest.importorskip("scipy")
_check_plot_works(ts.plot.kde, bw_method=bw_method, ind=ind)
- @td.skip_if_no_scipy
def test_density_kwargs(self, ts):
+ pytest.importorskip("scipy")
sample_points = np.linspace(-100, 100, 20)
_check_plot_works(ts.plot.density, bw_method=0.5, ind=sample_points)
- @td.skip_if_no_scipy
def test_kde_kwargs_check_axes(self, ts):
+ pytest.importorskip("scipy")
_, ax = mpl.pyplot.subplots()
sample_points = np.linspace(-100, 100, 20)
ax = ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax)
_check_ax_scales(ax, yaxis="log")
_check_text_labels(ax.yaxis.get_label(), "Density")
- @td.skip_if_no_scipy
def test_kde_missing_vals(self):
+ pytest.importorskip("scipy")
s = Series(np.random.default_rng(2).uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
@@ -601,23 +601,23 @@ def test_boxplot_series(self, ts):
ylabels = ax.get_yticklabels()
_check_text_labels(ylabels, [""] * len(ylabels))
- @td.skip_if_no_scipy
@pytest.mark.parametrize(
"kind",
plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds,
)
def test_kind_kwarg(self, kind):
+ pytest.importorskip("scipy")
s = Series(range(3))
_, ax = mpl.pyplot.subplots()
s.plot(kind=kind, ax=ax)
mpl.pyplot.close()
- @td.skip_if_no_scipy
@pytest.mark.parametrize(
"kind",
plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds,
)
def test_kind_attr(self, kind):
+ pytest.importorskip("scipy")
s = Series(range(3))
_, ax = mpl.pyplot.subplots()
getattr(s.plot, kind)()
@@ -631,9 +631,9 @@ def test_invalid_plot_data(self, kind):
with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
def test_valid_object_plot(self, kind):
+ pytest.importorskip("scipy")
s = Series(range(10), dtype=object)
_check_plot_works(s.plot, kind=kind)
@@ -745,9 +745,9 @@ def test_table_self(self, series):
_check_plot_works(series.plot, table=series)
@pytest.mark.slow
- @td.skip_if_no_scipy
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
+ pytest.importorskip("scipy")
_check_grid_settings(
Series([1, 2, 3]),
plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds,
diff --git a/pandas/tests/reductions/test_stat_reductions.py b/pandas/tests/reductions/test_stat_reductions.py
index 5f6fcba50142c..58c5fc7269aee 100644
--- a/pandas/tests/reductions/test_stat_reductions.py
+++ b/pandas/tests/reductions/test_stat_reductions.py
@@ -6,8 +6,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import (
DataFrame,
@@ -227,13 +225,12 @@ def test_sem(self):
result = s.sem(ddof=1)
assert pd.isna(result)
- @td.skip_if_no_scipy
def test_skew(self):
- from scipy.stats import skew
+ sp_stats = pytest.importorskip("scipy.stats")
string_series = tm.makeStringSeries().rename("series")
- alt = lambda x: skew(x, bias=False)
+ alt = lambda x: sp_stats.skew(x, bias=False)
self._check_stat_op("skew", alt, string_series)
# test corner cases, skew() returns NaN unless there's at least 3
@@ -250,13 +247,12 @@ def test_skew(self):
assert isinstance(s.skew(), np.float64) # GH53482
assert (df.skew() == 0).all()
- @td.skip_if_no_scipy
def test_kurt(self):
- from scipy.stats import kurtosis
+ sp_stats = pytest.importorskip("scipy.stats")
string_series = tm.makeStringSeries().rename("series")
- alt = lambda x: kurtosis(x, bias=False)
+ alt = lambda x: sp_stats.kurtosis(x, bias=False)
self._check_stat_op("kurt", alt, string_series)
def test_kurt_corner(self):
diff --git a/pandas/tests/series/methods/test_cov_corr.py b/pandas/tests/series/methods/test_cov_corr.py
index 51866800399a0..b2d5d1ee090ac 100644
--- a/pandas/tests/series/methods/test_cov_corr.py
+++ b/pandas/tests/series/methods/test_cov_corr.py
@@ -3,8 +3,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import (
Series,
@@ -57,10 +55,9 @@ def test_cov_ddof(self, test_ddof, dtype):
class TestSeriesCorr:
- @td.skip_if_no_scipy
@pytest.mark.parametrize("dtype", ["float64", "Float64"])
def test_corr(self, datetime_series, dtype):
- from scipy import stats
+ stats = pytest.importorskip("scipy.stats")
datetime_series = datetime_series.astype(dtype)
@@ -90,9 +87,8 @@ def test_corr(self, datetime_series, dtype):
expected, _ = stats.pearsonr(A, B)
tm.assert_almost_equal(result, expected)
- @td.skip_if_no_scipy
def test_corr_rank(self):
- from scipy import stats
+ stats = pytest.importorskip("scipy.stats")
# kendall and spearman
A = tm.makeTimeSeries()
diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py
index 3b099720dcdf2..eb8fe047df8a6 100644
--- a/pandas/tests/series/methods/test_interpolate.py
+++ b/pandas/tests/series/methods/test_interpolate.py
@@ -117,8 +117,8 @@ def test_interpolate_time_raises_for_non_timeseries(self):
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
- @td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
+ pytest.importorskip("scipy")
ser = Series([10, 11, 12, 13])
expected = Series(
@@ -132,8 +132,8 @@ def test_interpolate_cubicspline(self):
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
def test_interpolate_pchip(self):
+ pytest.importorskip("scipy")
ser = Series(np.sort(np.random.default_rng(2).uniform(size=100)))
# interpolate at new_index
@@ -144,8 +144,8 @@ def test_interpolate_pchip(self):
# does not blow up, GH5977
interp_s[49:51]
- @td.skip_if_no_scipy
def test_interpolate_akima(self):
+ pytest.importorskip("scipy")
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
@@ -170,8 +170,8 @@ def test_interpolate_akima(self):
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
- @td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
+ pytest.importorskip("scipy")
ser = Series([10, 11, 12, 13])
expected = Series(
@@ -185,8 +185,8 @@ def test_interpolate_piecewise_polynomial(self):
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
- @td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
+ pytest.importorskip("scipy")
ser = Series([10, 11, 12, 13])
expected = Series(
@@ -275,15 +275,15 @@ def test_nan_str_index(self):
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
def test_interp_quad(self):
+ pytest.importorskip("scipy")
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
def test_interp_scipy_basic(self):
+ pytest.importorskip("scipy")
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
@@ -617,8 +617,8 @@ def test_interp_limit_before_ends(self):
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
def test_interp_all_good(self):
+ pytest.importorskip("scipy")
s = Series([1, 2, 3])
result = s.interpolate(method="polynomial", order=1)
tm.assert_series_equal(result, s)
@@ -644,16 +644,16 @@ def test_interp_multiIndex(self, check_scipy):
with pytest.raises(ValueError, match=msg):
s.interpolate(method="polynomial", order=1)
- @td.skip_if_no_scipy
def test_interp_nonmono_raise(self):
+ pytest.importorskip("scipy")
s = Series([1, np.nan, 3], index=[0, 2, 1])
msg = "krogh interpolation requires that the index be monotonic"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="krogh")
- @td.skip_if_no_scipy
@pytest.mark.parametrize("method", ["nearest", "pad"])
def test_interp_datetime64(self, method, tz_naive_fixture):
+ pytest.importorskip("scipy")
df = Series(
[1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)
)
@@ -696,32 +696,32 @@ def test_interp_limit_no_nans(self):
expected = s
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("method", ["polynomial", "spline"])
def test_no_order(self, method):
# see GH-10633, GH-24014
+ pytest.importorskip("scipy")
s = Series([0, 1, np.nan, 3])
msg = "You must specify the order of the spline or polynomial"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("order", [-1, -1.0, 0, 0.0, np.nan])
def test_interpolate_spline_invalid_order(self, order):
+ pytest.importorskip("scipy")
s = Series([0, 1, np.nan, 3])
msg = "order needs to be specified and greater than 0"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="spline", order=order)
- @td.skip_if_no_scipy
def test_spline(self):
+ pytest.importorskip("scipy")
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method="spline", order=1)
expected = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
tm.assert_series_equal(result, expected)
- @td.skip_if_no_scipy
def test_spline_extrapolate(self):
+ pytest.importorskip("scipy")
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method="spline", order=1, ext=3)
expected3 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0])
@@ -731,17 +731,17 @@ def test_spline_extrapolate(self):
expected1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
tm.assert_series_equal(result1, expected1)
- @td.skip_if_no_scipy
def test_spline_smooth(self):
+ pytest.importorskip("scipy")
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (
s.interpolate(method="spline", order=3, s=0)[5]
!= s.interpolate(method="spline", order=3)[5]
)
- @td.skip_if_no_scipy
def test_spline_interpolation(self):
# Explicit cast to float to avoid implicit cast when setting np.nan
+ pytest.importorskip("scipy")
s = Series(np.arange(10) ** 2, dtype="float")
s[np.random.default_rng(2).integers(0, 9, 3)] = np.nan
result1 = s.interpolate(method="spline", order=1)
@@ -817,7 +817,6 @@ def test_interp_non_timedelta_index(self, interp_methods_ind, ind):
with pytest.raises(ValueError, match=expected_error):
df[0].interpolate(method=method, **kwargs)
- @td.skip_if_no_scipy
def test_interpolate_timedelta_index(self, request, interp_methods_ind):
"""
Tests for non numerical index types - object, period, timedelta
@@ -825,6 +824,7 @@ def test_interpolate_timedelta_index(self, request, interp_methods_ind):
are tested here.
"""
# gh 21662
+ pytest.importorskip("scipy")
ind = pd.timedelta_range(start=1, periods=4)
df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
diff --git a/pandas/tests/series/methods/test_rank.py b/pandas/tests/series/methods/test_rank.py
index da7c4612cbc49..766a2415d89fb 100644
--- a/pandas/tests/series/methods/test_rank.py
+++ b/pandas/tests/series/methods/test_rank.py
@@ -55,9 +55,8 @@ def dtype(request):
class TestSeriesRank:
- @td.skip_if_no_scipy
def test_rank(self, datetime_series):
- from scipy.stats import rankdata
+ sp_stats = pytest.importorskip("scipy.stats")
datetime_series[::2] = np.nan
datetime_series[:10:3] = 4.0
@@ -71,7 +70,7 @@ def test_rank(self, datetime_series):
filled = datetime_series.fillna(np.inf)
# rankdata returns a ndarray
- exp = Series(rankdata(filled), index=filled.index, name="ts")
+ exp = Series(sp_stats.rankdata(filled), index=filled.index, name="ts")
exp[mask] = np.nan
tm.assert_series_equal(ranks, exp)
@@ -249,7 +248,6 @@ def test_rank_tie_methods(self, ser, results, dtype):
result = ser.rank(method=method)
tm.assert_series_equal(result, Series(exp))
- @td.skip_if_no_scipy
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"])
@pytest.mark.parametrize("na_option", ["top", "bottom", "keep"])
@@ -271,6 +269,7 @@ def test_rank_tie_methods(self, ser, results, dtype):
def test_rank_tie_methods_on_infs_nans(
self, method, na_option, ascending, dtype, na_value, pos_inf, neg_inf
):
+ pytest.importorskip("scipy")
if dtype == "float64[pyarrow]":
if method == "average":
exp_dtype = "float64[pyarrow]"
@@ -309,7 +308,6 @@ def test_rank_desc_mix_nans_infs(self):
exp = Series([3, np.nan, 1, 4, 2], dtype="float64")
tm.assert_series_equal(result, exp)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"])
@pytest.mark.parametrize(
"op, value",
@@ -320,7 +318,7 @@ def test_rank_desc_mix_nans_infs(self):
],
)
def test_rank_methods_series(self, method, op, value):
- from scipy.stats import rankdata
+ sp_stats = pytest.importorskip("scipy.stats")
xs = np.random.default_rng(2).standard_normal(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
@@ -330,7 +328,7 @@ def test_rank_methods_series(self, method, op, value):
vals = op(xs, value)
ts = Series(vals, index=index)
result = ts.rank(method=method)
- sprank = rankdata(vals, method if method != "first" else "ordinal")
+ sprank = sp_stats.rankdata(vals, method if method != "first" else "ordinal")
expected = Series(sprank, index=index).astype("float64")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index fa5d12d5a722c..856c31b9ccb06 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -9,7 +9,6 @@
algos as libalgos,
hashtable as ht,
)
-import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_bool_dtype,
@@ -1731,7 +1730,6 @@ def test_hashtable_factorize(self, htable, tm_dtype, writable):
class TestRank:
- @td.skip_if_no_scipy
@pytest.mark.parametrize(
"arr",
[
@@ -1740,7 +1738,7 @@ class TestRank:
],
)
def test_scipy_compat(self, arr):
- from scipy.stats import rankdata
+ sp_stats = pytest.importorskip("scipy.stats")
arr = np.array(arr)
@@ -1748,7 +1746,7 @@ def test_scipy_compat(self, arr):
arr = arr.copy()
result = libalgos.rank_1d(arr)
arr[mask] = np.inf
- exp = rankdata(arr)
+ exp = sp_stats.rankdata(arr)
exp[mask] = np.nan
tm.assert_almost_equal(result, exp)
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index a1e6ac0448d3f..7e1a23022135e 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -501,15 +501,14 @@ def test_nanstd(self, ddof, skipna):
ddof=ddof,
)
- @td.skip_if_no_scipy
@pytest.mark.parametrize("ddof", range(3))
def test_nansem(self, ddof, skipna):
- from scipy.stats import sem
+ sp_stats = pytest.importorskip("scipy.stats")
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nansem,
- sem,
+ sp_stats.sem,
skipna,
allow_complex=False,
allow_date=False,
@@ -565,11 +564,10 @@ def _skew_kurt_wrap(self, values, axis=None, func=None):
return 0.0
return result
- @td.skip_if_no_scipy
def test_nanskew(self, skipna):
- from scipy.stats import skew
+ sp_stats = pytest.importorskip("scipy.stats")
- func = partial(self._skew_kurt_wrap, func=skew)
+ func = partial(self._skew_kurt_wrap, func=sp_stats.skew)
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nanskew,
@@ -580,11 +578,10 @@ def test_nanskew(self, skipna):
allow_tdelta=False,
)
- @td.skip_if_no_scipy
def test_nankurt(self, skipna):
- from scipy.stats import kurtosis
+ sp_stats = pytest.importorskip("scipy.stats")
- func1 = partial(kurtosis, fisher=True)
+ func1 = partial(sp_stats.kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
with np.errstate(invalid="ignore"):
self.check_funs(
@@ -712,30 +709,28 @@ def test_nancorr_pearson(self):
targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
- @td.skip_if_no_scipy
def test_nancorr_kendall(self):
- from scipy.stats import kendalltau
+ sp_stats = pytest.importorskip("scipy.stats")
- targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
- targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
+ targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
+ targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall")
- targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
- targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
+ targ0 = sp_stats.kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
+ targ1 = sp_stats.kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall")
- @td.skip_if_no_scipy
def test_nancorr_spearman(self):
- from scipy.stats import spearmanr
+ sp_stats = pytest.importorskip("scipy.stats")
- targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
- targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
+ targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
+ targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman")
- targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
- targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
+ targ0 = sp_stats.spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
+ targ1 = sp_stats.spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman")
- @td.skip_if_no_scipy
def test_invalid_method(self):
+ pytest.importorskip("scipy")
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'"
diff --git a/pandas/tests/window/test_rolling_skew_kurt.py b/pandas/tests/window/test_rolling_skew_kurt.py
index cd8e555c829fb..ada726401c4a0 100644
--- a/pandas/tests/window/test_rolling_skew_kurt.py
+++ b/pandas/tests/window/test_rolling_skew_kurt.py
@@ -3,8 +3,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
from pandas import (
DataFrame,
Series,
@@ -17,23 +15,21 @@
from pandas.tseries import offsets
-@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_series(series, sp_func, roll_func):
- import scipy.stats
+ sp_stats = pytest.importorskip("scipy.stats")
- compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
result = getattr(series.rolling(50), roll_func)()
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
-@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_frame(raw, frame, sp_func, roll_func):
- import scipy.stats
+ sp_stats = pytest.importorskip("scipy.stats")
- compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
result = getattr(frame.rolling(50), roll_func)()
assert isinstance(result, DataFrame)
tm.assert_series_equal(
@@ -43,12 +39,11 @@ def test_frame(raw, frame, sp_func, roll_func):
)
-@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_time_rule_series(series, sp_func, roll_func):
- import scipy.stats
+ sp_stats = pytest.importorskip("scipy.stats")
- compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=10), roll_func)()
@@ -59,12 +54,11 @@ def test_time_rule_series(series, sp_func, roll_func):
tm.assert_almost_equal(series_result.iloc[-1], compare_func(trunc_series))
-@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_time_rule_frame(raw, frame, sp_func, roll_func):
- import scipy.stats
+ sp_stats = pytest.importorskip("scipy.stats")
- compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=10), roll_func)()
@@ -79,12 +73,11 @@ def test_time_rule_frame(raw, frame, sp_func, roll_func):
)
-@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_nans(sp_func, roll_func):
- import scipy.stats
+ sp_stats = pytest.importorskip("scipy.stats")
- compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
obj = Series(np.random.default_rng(2).standard_normal(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
diff --git a/pandas/tests/window/test_win_type.py b/pandas/tests/window/test_win_type.py
index 53612f87a67d3..2ca02fef796ed 100644
--- a/pandas/tests/window/test_win_type.py
+++ b/pandas/tests/window/test_win_type.py
@@ -1,8 +1,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
from pandas import (
DataFrame,
Series,
@@ -35,9 +33,9 @@ def win_types_special(request):
return request.param
-@td.skip_if_no_scipy
def test_constructor(frame_or_series):
# GH 12669
+ pytest.importorskip("scipy")
c = frame_or_series(range(5)).rolling
# valid
@@ -47,10 +45,9 @@ def test_constructor(frame_or_series):
@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
-@td.skip_if_no_scipy
def test_invalid_constructor(frame_or_series, w):
# not valid
-
+ pytest.importorskip("scipy")
c = frame_or_series(range(5)).rolling
with pytest.raises(ValueError, match="min_periods must be an integer"):
c(win_type="boxcar", window=2, min_periods=w)
@@ -59,23 +56,23 @@ def test_invalid_constructor(frame_or_series, w):
@pytest.mark.parametrize("wt", ["foobar", 1])
-@td.skip_if_no_scipy
def test_invalid_constructor_wintype(frame_or_series, wt):
+ pytest.importorskip("scipy")
c = frame_or_series(range(5)).rolling
with pytest.raises(ValueError, match="Invalid win_type"):
c(win_type=wt, window=2)
-@td.skip_if_no_scipy
def test_constructor_with_win_type(frame_or_series, win_types):
# GH 12669
+ pytest.importorskip("scipy")
c = frame_or_series(range(5)).rolling
c(win_type=win_types, window=2)
-@td.skip_if_no_scipy
@pytest.mark.parametrize("arg", ["median", "kurt", "skew"])
def test_agg_function_support(arg):
+ pytest.importorskip("scipy")
df = DataFrame({"A": np.arange(5)})
roll = df.rolling(2, win_type="triang")
@@ -90,17 +87,17 @@ def test_agg_function_support(arg):
roll.agg({"A": arg})
-@td.skip_if_no_scipy
def test_invalid_scipy_arg():
# This error is raised by scipy
+ pytest.importorskip("scipy")
msg = r"boxcar\(\) got an unexpected"
with pytest.raises(TypeError, match=msg):
Series(range(3)).rolling(1, win_type="boxcar").mean(foo="bar")
-@td.skip_if_no_scipy
def test_constructor_with_win_type_invalid(frame_or_series):
# GH 13383
+ pytest.importorskip("scipy")
c = frame_or_series(range(5)).rolling
msg = "window must be an integer 0 or greater"
@@ -109,9 +106,9 @@ def test_constructor_with_win_type_invalid(frame_or_series):
c(-1, win_type="boxcar")
-@td.skip_if_no_scipy
def test_window_with_args(step):
# make sure that we are aggregating window functions correctly with arg
+ pytest.importorskip("scipy")
r = Series(np.random.default_rng(2).standard_normal(100)).rolling(
window=10, min_periods=1, win_type="gaussian", step=step
)
@@ -132,18 +129,18 @@ def b(x):
tm.assert_frame_equal(result, expected)
-@td.skip_if_no_scipy
def test_win_type_with_method_invalid():
+ pytest.importorskip("scipy")
with pytest.raises(
NotImplementedError, match="'single' is the only supported method type."
):
Series(range(1)).rolling(1, win_type="triang", method="table")
-@td.skip_if_no_scipy
@pytest.mark.parametrize("arg", [2000000000, "2s", Timedelta("2s")])
def test_consistent_win_type_freq(arg):
# GH 15969
+ pytest.importorskip("scipy")
s = Series(range(1))
with pytest.raises(ValueError, match="Invalid win_type freq"):
s.rolling(arg, win_type="freq")
@@ -155,8 +152,9 @@ def test_win_type_freq_return_none():
assert freq_roll.win_type is None
-@td.skip_if_no_scipy
def test_win_type_not_implemented():
+ pytest.importorskip("scipy")
+
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed, step):
return np.array([0, 1]), np.array([1, 2])
@@ -167,9 +165,9 @@ def get_window_bounds(self, num_values, min_periods, center, closed, step):
df.rolling(indexer, win_type="boxcar")
-@td.skip_if_no_scipy
def test_cmov_mean(step):
# GH 8238
+ pytest.importorskip("scipy")
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, center=True, step=step).mean()
expected_values = [
@@ -188,9 +186,9 @@ def test_cmov_mean(step):
tm.assert_series_equal(expected, result)
-@td.skip_if_no_scipy
def test_cmov_window(step):
# GH 8238
+ pytest.importorskip("scipy")
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, win_type="boxcar", center=True, step=step).mean()
expected_values = [
@@ -209,10 +207,10 @@ def test_cmov_window(step):
tm.assert_series_equal(expected, result)
-@td.skip_if_no_scipy
def test_cmov_window_corner(step):
# GH 8238
# all nan
+ pytest.importorskip("scipy")
vals = Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar", step=step).mean()
assert np.isnan(result).all()
@@ -229,7 +227,6 @@ def test_cmov_window_corner(step):
assert len(result) == len(range(0, 5, step or 1))
-@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
@@ -297,6 +294,7 @@ def test_cmov_window_corner(step):
)
def test_cmov_window_frame(f, xp, step):
# Gh 8238
+ pytest.importorskip("scipy")
df = DataFrame(
np.array(
[
@@ -322,8 +320,8 @@ def test_cmov_window_frame(f, xp, step):
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4, 5])
-@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(step, min_periods):
+ pytest.importorskip("scipy")
vals = Series(np.random.default_rng(2).standard_normal(10))
vals[4] = np.nan
vals[8] = np.nan
@@ -335,9 +333,9 @@ def test_cmov_window_na_min_periods(step, min_periods):
tm.assert_series_equal(xp, rs)
-@td.skip_if_no_scipy
def test_cmov_window_regular(win_types, step):
# GH 8238
+ pytest.importorskip("scipy")
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
"hamming": [
@@ -443,9 +441,9 @@ def test_cmov_window_regular(win_types, step):
tm.assert_series_equal(xp, rs)
-@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(win_types, step):
# GH 8238
+ pytest.importorskip("scipy")
vals = np.array(range(10), dtype=float)
xp = vals.copy()
xp[:2] = np.nan
@@ -456,9 +454,9 @@ def test_cmov_window_regular_linear_range(win_types, step):
tm.assert_series_equal(xp, rs)
-@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(win_types, step):
# GH 8238
+ pytest.importorskip("scipy")
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
@@ -566,9 +564,9 @@ def test_cmov_window_regular_missing_data(win_types, step):
tm.assert_series_equal(xp, rs)
-@td.skip_if_no_scipy
def test_cmov_window_special(win_types_special, step):
# GH 8238
+ pytest.importorskip("scipy")
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
@@ -638,9 +636,9 @@ def test_cmov_window_special(win_types_special, step):
tm.assert_series_equal(xp, rs)
-@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(win_types_special, step):
# GH 8238
+ pytest.importorskip("scipy")
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
@@ -663,9 +661,9 @@ def test_cmov_window_special_linear_range(win_types_special, step):
tm.assert_series_equal(xp, rs)
-@td.skip_if_no_scipy
def test_weighted_var_big_window_no_segfault(win_types, center):
# GitHub Issue #46772
+ pytest.importorskip("scipy")
x = Series(0)
result = x.rolling(window=16, center=center, win_type=win_types).var()
expected = Series(np.NaN)
@@ -673,8 +671,8 @@ def test_weighted_var_big_window_no_segfault(win_types, center):
tm.assert_series_equal(result, expected)
-@td.skip_if_no_scipy
def test_rolling_center_axis_1():
+ pytest.importorskip("scipy")
df = DataFrame(
{"a": [1, 1, 0, 0, 0, 1], "b": [1, 0, 0, 1, 0, 0], "c": [1, 0, 0, 1, 0, 1]}
)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/54387 | 2023-08-03T17:03:18Z | 2023-08-04T17:07:12Z | 2023-08-04T17:07:12Z | 2023-08-04T17:07:15Z |
DOC: Fixing EX01 - Added example | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index a793e03b8745f..07c5ad1868d9e 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -57,13 +57,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (EX02, EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT02, RT04, RT05, SA02, SA03, SA04, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX02,EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05,SS06
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Partially validate docstrings (EX01)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01 --ignore_functions \
- pandas.api.extensions.ExtensionArray \
+ MSG='Validate docstrings (EX01, EX02, EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT02, RT04, RT05, SA02, SA03, SA04, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01,EX02,EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05,SS06
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 325edba670fce..7babce46a3977 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -239,6 +239,12 @@ class ExtensionArray:
By default, ExtensionArrays are not hashable. Immutable subclasses may
override this behavior.
+
+ Examples
+ --------
+ Please see the following:
+
+ https://github.com/pandas-dev/pandas/blob/main/pandas/tests/extension/list/array.py
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
| - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
closes https://github.com/pandas-dev/pandas/issues/37875 | https://api.github.com/repos/pandas-dev/pandas/pulls/54384 | 2023-08-03T10:21:52Z | 2023-08-03T14:58:09Z | 2023-08-03T14:58:09Z | 2023-08-03T15:50:09Z |
ENH: add consortium standard entrypoint | diff --git a/.github/workflows/package-checks.yml b/.github/workflows/package-checks.yml
index 6ff3d3b0a3b98..6c9ba89d82794 100644
--- a/.github/workflows/package-checks.yml
+++ b/.github/workflows/package-checks.yml
@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-22.04
strategy:
matrix:
- extra: ["test", "performance", "computation", "fss", "aws", "gcp", "excel", "parquet", "feather", "hdf5", "spss", "postgresql", "mysql", "sql-other", "html", "xml", "plot", "output_formatting", "clipboard", "compression", "all"]
+ extra: ["test", "performance", "computation", "fss", "aws", "gcp", "excel", "parquet", "feather", "hdf5", "spss", "postgresql", "mysql", "sql-other", "html", "xml", "plot", "output_formatting", "clipboard", "compression", "consortium-standard", "all"]
fail-fast: false
name: Install Extras - ${{ matrix.extra }}
concurrency:
diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml
index 7cf7f805cee31..a40640e99265a 100644
--- a/ci/deps/actions-311-downstream_compat.yaml
+++ b/ci/deps/actions-311-downstream_compat.yaml
@@ -73,5 +73,6 @@ dependencies:
- pyyaml
- py
- pip:
+ - dataframe-api-compat>=0.1.7
- pyqt5>=5.15.6
- tzdata>=2022.1
diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml
index 52274180ccb48..c1c7b986fe8a4 100644
--- a/ci/deps/actions-39-minimum_versions.yaml
+++ b/ci/deps/actions-39-minimum_versions.yaml
@@ -61,5 +61,6 @@ dependencies:
- zstandard=0.17.0
- pip:
+ - dataframe-api-compat==0.1.7
- pyqt5==5.15.6
- tzdata==2022.1
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 04dd6d78744ee..1896dffa9a105 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -415,3 +415,14 @@ brotli 0.7.0 compression Brotli compression
python-snappy 0.6.1 compression Snappy compression
Zstandard 0.17.0 compression Zstandard compression
========================= ================== =============== =============================================================
+
+Consortium Standard
+^^^^^^^^^^^^^^^^^^^
+
+Installable with ``pip install "pandas[consortium-standard]"``
+
+========================= ================== =================== =============================================================
+Dependency Minimum Version pip extra Notes
+========================= ================== =================== =============================================================
+dataframe-api-compat 0.1.7 consortium-standard Consortium Standard-compatible implementation based on pandas
+========================= ================== =================== =============================================================
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index 8b258b40f8ac5..a0c5b4523f782 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -218,6 +218,7 @@ Other enhancements
- Many read/to_* functions, such as :meth:`DataFrame.to_pickle` and :func:`read_csv`, support forwarding compression arguments to lzma.LZMAFile (:issue:`52979`)
- Reductions :meth:`Series.argmax`, :meth:`Series.argmin`, :meth:`Series.idxmax`, :meth:`Series.idxmin`, :meth:`Index.argmax`, :meth:`Index.argmin`, :meth:`DataFrame.idxmax`, :meth:`DataFrame.idxmin` are now supported for object-dtype objects (:issue:`4279`, :issue:`18021`, :issue:`40685`, :issue:`43697`)
- :meth:`DataFrame.to_parquet` and :func:`read_parquet` will now write and read ``attrs`` respectively (:issue:`54346`)
+- Added support for the DataFrame Consortium Standard (:issue:`54383`)
- Performance improvement in :meth:`GroupBy.quantile` (:issue:`51722`)
.. ---------------------------------------------------------------------------
@@ -256,65 +257,67 @@ Increased minimum versions for dependencies
Some minimum supported versions of dependencies were updated.
If installed, we now require:
-+-----------------+-----------------+----------+---------+
-| Package | Minimum Version | Required | Changed |
-+=================+=================+==========+=========+
-| numpy | 1.22.4 | X | X |
-+-----------------+-----------------+----------+---------+
-| mypy (dev) | 1.4.1 | | X |
-+-----------------+-----------------+----------+---------+
-| beautifulsoup4 | 4.11.1 | | X |
-+-----------------+-----------------+----------+---------+
-| bottleneck | 1.3.4 | | X |
-+-----------------+-----------------+----------+---------+
-| fastparquet | 0.8.1 | | X |
-+-----------------+-----------------+----------+---------+
-| fsspec | 2022.05.0 | | X |
-+-----------------+-----------------+----------+---------+
-| hypothesis | 6.46.1 | | X |
-+-----------------+-----------------+----------+---------+
-| gcsfs | 2022.05.0 | | X |
-+-----------------+-----------------+----------+---------+
-| jinja2 | 3.1.2 | | X |
-+-----------------+-----------------+----------+---------+
-| lxml | 4.8.0 | | X |
-+-----------------+-----------------+----------+---------+
-| numba | 0.55.2 | | X |
-+-----------------+-----------------+----------+---------+
-| numexpr | 2.8.0 | | X |
-+-----------------+-----------------+----------+---------+
-| openpyxl | 3.0.10 | | X |
-+-----------------+-----------------+----------+---------+
-| pandas-gbq | 0.17.5 | | X |
-+-----------------+-----------------+----------+---------+
-| psycopg2 | 2.9.3 | | X |
-+-----------------+-----------------+----------+---------+
-| pyreadstat | 1.1.5 | | X |
-+-----------------+-----------------+----------+---------+
-| pyqt5 | 5.15.6 | | X |
-+-----------------+-----------------+----------+---------+
-| pytables | 3.7.0 | | X |
-+-----------------+-----------------+----------+---------+
-| pytest | 7.3.2 | | X |
-+-----------------+-----------------+----------+---------+
-| python-snappy | 0.6.1 | | X |
-+-----------------+-----------------+----------+---------+
-| pyxlsb | 1.0.9 | | X |
-+-----------------+-----------------+----------+---------+
-| s3fs | 2022.05.0 | | X |
-+-----------------+-----------------+----------+---------+
-| scipy | 1.8.1 | | X |
-+-----------------+-----------------+----------+---------+
-| sqlalchemy | 1.4.36 | | X |
-+-----------------+-----------------+----------+---------+
-| tabulate | 0.8.10 | | X |
-+-----------------+-----------------+----------+---------+
-| xarray | 2022.03.0 | | X |
-+-----------------+-----------------+----------+---------+
-| xlsxwriter | 3.0.3 | | X |
-+-----------------+-----------------+----------+---------+
-| zstandard | 0.17.0 | | X |
-+-----------------+-----------------+----------+---------+
++----------------------+-----------------+----------+---------+
+| Package | Minimum Version | Required | Changed |
++======================+=================+==========+=========+
+| numpy | 1.22.4 | X | X |
++----------------------+-----------------+----------+---------+
+| mypy (dev) | 1.4.1 | | X |
++----------------------+-----------------+----------+---------+
+| beautifulsoup4 | 4.11.1 | | X |
++----------------------+-----------------+----------+---------+
+| bottleneck | 1.3.4 | | X |
++----------------------+-----------------+----------+---------+
+| dataframe-api-compat | 0.1.7 | | X |
++----------------------+-----------------+----------+---------+
+| fastparquet | 0.8.1 | | X |
++----------------------+-----------------+----------+---------+
+| fsspec | 2022.05.0 | | X |
++----------------------+-----------------+----------+---------+
+| hypothesis | 6.46.1 | | X |
++----------------------+-----------------+----------+---------+
+| gcsfs | 2022.05.0 | | X |
++----------------------+-----------------+----------+---------+
+| jinja2 | 3.1.2 | | X |
++----------------------+-----------------+----------+---------+
+| lxml | 4.8.0 | | X |
++----------------------+-----------------+----------+---------+
+| numba | 0.55.2 | | X |
++----------------------+-----------------+----------+---------+
+| numexpr | 2.8.0 | | X |
++----------------------+-----------------+----------+---------+
+| openpyxl | 3.0.10 | | X |
++----------------------+-----------------+----------+---------+
+| pandas-gbq | 0.17.5 | | X |
++----------------------+-----------------+----------+---------+
+| psycopg2 | 2.9.3 | | X |
++----------------------+-----------------+----------+---------+
+| pyreadstat | 1.1.5 | | X |
++----------------------+-----------------+----------+---------+
+| pyqt5 | 5.15.6 | | X |
++----------------------+-----------------+----------+---------+
+| pytables | 3.7.0 | | X |
++----------------------+-----------------+----------+---------+
+| pytest | 7.3.2 | | X |
++----------------------+-----------------+----------+---------+
+| python-snappy | 0.6.1 | | X |
++----------------------+-----------------+----------+---------+
+| pyxlsb | 1.0.9 | | X |
++----------------------+-----------------+----------+---------+
+| s3fs | 2022.05.0 | | X |
++----------------------+-----------------+----------+---------+
+| scipy | 1.8.1 | | X |
++----------------------+-----------------+----------+---------+
+| sqlalchemy | 1.4.36 | | X |
++----------------------+-----------------+----------+---------+
+| tabulate | 0.8.10 | | X |
++----------------------+-----------------+----------+---------+
+| xarray | 2022.03.0 | | X |
++----------------------+-----------------+----------+---------+
+| xlsxwriter | 3.0.3 | | X |
++----------------------+-----------------+----------+---------+
+| zstandard | 0.17.0 | | X |
++----------------------+-----------------+----------+---------+
For `optional libraries <https://pandas.pydata.org/docs/getting_started/install.html>`_ the general recommendation is to use the latest version.
The following table lists the lowest version per library that is currently being tested throughout the development of pandas.
diff --git a/environment.yml b/environment.yml
index d0536ea81c8cc..44c0ce37c2957 100644
--- a/environment.yml
+++ b/environment.yml
@@ -115,6 +115,7 @@ dependencies:
- pygments # Code highlighting
- pip:
+ - dataframe-api-compat>=0.1.7
- sphinx-toggleprompt # conda-forge version has stricter pins on jinja2
- typing_extensions; python_version<"3.11"
- tzdata>=2022.1
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index dcd49f65fc4cd..fe4e6457ff08c 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -19,6 +19,7 @@
"blosc": "1.21.0",
"bottleneck": "1.3.4",
"brotli": "0.7.0",
+ "dataframe-api-compat": "0.1.7",
"fastparquet": "0.8.1",
"fsspec": "2022.05.0",
"html5lib": "1.1",
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5ff0882477ca5..8704ee29bca1a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -932,6 +932,21 @@ def __dataframe__(
return PandasDataFrameXchg(self, nan_as_null, allow_copy)
+ def __dataframe_consortium_standard__(
+ self, *, api_version: str | None = None
+ ) -> Any:
+ """
+ Provide entry point to the Consortium DataFrame Standard API.
+
+ This is developed and maintained outside of pandas.
+ Please report any issues to https://github.com/data-apis/dataframe-api-compat.
+ """
+ dataframe_api_compat = import_optional_dependency("dataframe_api_compat")
+ convert_to_standard_compliant_dataframe = (
+ dataframe_api_compat.pandas_standard.convert_to_standard_compliant_dataframe
+ )
+ return convert_to_standard_compliant_dataframe(self, api_version=api_version)
+
# ----------------------------------------------------------------------
@property
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 4677dc2274a52..814a770b192bf 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -39,6 +39,7 @@
from pandas._libs.lib import is_range_indexer
from pandas.compat import PYPY
from pandas.compat._constants import REF_COUNT
+from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import (
ChainedAssignmentError,
@@ -955,6 +956,22 @@ def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
arr.flags.writeable = False
return arr
+ # ----------------------------------------------------------------------
+
+ def __column_consortium_standard__(self, *, api_version: str | None = None) -> Any:
+ """
+ Provide entry point to the Consortium DataFrame Standard API.
+
+ This is developed and maintained outside of pandas.
+ Please report any issues to https://github.com/data-apis/dataframe-api-compat.
+ """
+ dataframe_api_compat = import_optional_dependency("dataframe_api_compat")
+ return (
+ dataframe_api_compat.pandas_standard.convert_to_standard_compliant_column(
+ self, api_version=api_version
+ )
+ )
+
# ----------------------------------------------------------------------
# Unary Methods
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index e81c2f9c086b0..c541c5792ec7c 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -334,6 +334,27 @@ def test_from_obscure_array(dtype, array_likes):
tm.assert_index_equal(result, expected)
+def test_dataframe_consortium() -> None:
+ """
+ Test some basic methods of the dataframe consortium standard.
+
+ Full testing is done at https://github.com/data-apis/dataframe-api-compat,
+ this is just to check that the entry point works as expected.
+ """
+ pytest.importorskip("dataframe_api_compat")
+ df_pd = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
+ df = df_pd.__dataframe_consortium_standard__()
+ result_1 = df.get_column_names()
+ expected_1 = ["a", "b"]
+ assert result_1 == expected_1
+
+ ser = Series([1, 2, 3])
+ col = ser.__column_consortium_standard__()
+ result_2 = col.get_value(1)
+ expected_2 = 2
+ assert result_2 == expected_2
+
+
def test_xarray_coerce_unit():
# GH44053
xr = pytest.importorskip("xarray")
diff --git a/pyproject.toml b/pyproject.toml
index b69ac04ae7ed1..fdb9d1c83cc79 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -82,11 +82,13 @@ plot = ['matplotlib>=3.6.1']
output_formatting = ['jinja2>=3.1.2', 'tabulate>=0.8.10']
clipboard = ['PyQt5>=5.15.6', 'qtpy>=2.2.0']
compression = ['brotlipy>=0.7.0', 'python-snappy>=0.6.1', 'zstandard>=0.17.0']
+consortium-standard = ['dataframe-api-compat>=0.1.7']
all = ['beautifulsoup4>=4.11.1',
# blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297)
#'blosc>=1.21.0',
'bottleneck>=1.3.4',
'brotlipy>=0.7.0',
+ 'dataframe-api-compat>=0.1.7',
'fastparquet>=0.8.1',
'fsspec>=2022.05.0',
'gcsfs>=2022.05.0',
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 5bd34130cc525..4af4351413a5b 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -84,6 +84,7 @@ feedparser
pyyaml
requests
pygments
+dataframe-api-compat>=0.1.7
sphinx-toggleprompt
typing_extensions; python_version<"3.11"
tzdata>=2022.1
| Here's an entry point to the Consortium's DataFrame API Standard
It enables dataframe-consuming libraries to just check for a `__dataframe_consortium_standard__` attribute on a dataframe they receive - then, so long as they stick to the spec defined in https://data-apis.org/dataframe-api/draft/index.html, then their code should work the same way, regardless of what the original backing dataframe library was
Use-case: currently, scikit-learn is very keen on using this, as they're not keen on having to depend on pyarrow (which will become required in pandas), and the interchange protocol only goes so far (e.g. a way to convert to ndarray is out-of-scope for that). If we can get this to work there, then other use cases may emerge
The current spec should be enough for scikit-learn, and having this entry point makes it easier to move forwards with development (without monkey-patching / special-casing)
For reference, polars has already merged this: https://github.com/pola-rs/polars/pull/10244
## Maintenance burden on pandas
**None**
I want to be very clear about this: the compat package will be developed, maintained, and tested by the consortium and community of libraries which use it. It is up to consuming libraries to set a minimum version of the dataframe-api-compat package. **No responsibility will land on pandas maintainers**. If any bugs are reported to pandas, then they can (and should) be politely closed.
All this is just an entry point to the Consortium's Standard
Tagging @pandas-dev/pandas-core for visibility. Some people raised objections (maintenance burden, naming) when asked in private, and I think I've addressed the concerns. Anything else?
Thanks 🙌 | https://api.github.com/repos/pandas-dev/pandas/pulls/54383 | 2023-08-03T10:18:48Z | 2023-08-07T20:19:26Z | 2023-08-07T20:19:26Z | 2023-08-07T20:19:33Z |
CLN: VariableOffsetWindowIndexer | diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index 861d802d3ba62..d5bcbcd59b667 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -205,6 +205,7 @@ Other enhancements
- Improve error message when setting :class:`DataFrame` with wrong number of columns through :meth:`DataFrame.isetitem` (:issue:`51701`)
- Improved error handling when using :meth:`DataFrame.to_json` with incompatible ``index`` and ``orient`` arguments (:issue:`52143`)
- Improved error message when creating a DataFrame with empty data (0 rows), no index and an incorrect number of columns. (:issue:`52084`)
+- Improved error message when providing an invalid ``index`` or ``offset`` argument to :class:`pandas.api.indexers.VariableOffsetWindowIndexer` (:issue:`54379`)
- Let :meth:`DataFrame.to_feather` accept a non-default :class:`Index` and non-string column names (:issue:`51787`)
- Added a new parameter ``by_row`` to :meth:`Series.apply` and :meth:`DataFrame.apply`. When set to ``False`` the supplied callables will always operate on the whole Series or DataFrame (:issue:`53400`, :issue:`53601`).
- :meth:`DataFrame.shift` and :meth:`Series.shift` now allow shifting by multiple periods by supplying a list of periods (:issue:`44424`)
diff --git a/pandas/core/indexers/objects.py b/pandas/core/indexers/objects.py
index e60187f6b542b..694a420ad2494 100644
--- a/pandas/core/indexers/objects.py
+++ b/pandas/core/indexers/objects.py
@@ -5,11 +5,14 @@
import numpy as np
+from pandas._libs.tslibs import BaseOffset
from pandas._libs.window.indexers import calculate_variable_window_bounds
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import ensure_platform_int
+from pandas.core.indexes.datetimes import DatetimeIndex
+
from pandas.tseries.offsets import Nano
get_window_bounds_doc = """
@@ -183,12 +186,16 @@ def __init__(
self,
index_array: np.ndarray | None = None,
window_size: int = 0,
- index=None,
- offset=None,
+ index: DatetimeIndex | None = None,
+ offset: BaseOffset | None = None,
**kwargs,
) -> None:
super().__init__(index_array, window_size, **kwargs)
+ if not isinstance(index, DatetimeIndex):
+ raise ValueError("index must be a DatetimeIndex.")
self.index = index
+ if not isinstance(offset, BaseOffset):
+ raise ValueError("offset must be a DateOffset-like object.")
self.offset = offset
@Appender(get_window_bounds_doc)
@@ -216,6 +223,7 @@ def get_window_bounds(
index_growth_sign = -1
else:
index_growth_sign = 1
+ offset_diff = index_growth_sign * self.offset
start = np.empty(num_values, dtype="int64")
start.fill(-1)
@@ -231,11 +239,12 @@ def get_window_bounds(
else:
end[0] = 0
+ zero = timedelta(0)
# start is start of slice interval (including)
# end is end of slice interval (not including)
for i in range(1, num_values):
end_bound = self.index[i]
- start_bound = self.index[i] - index_growth_sign * self.offset
+ start_bound = end_bound - offset_diff
# left endpoint is closed
if left_closed:
@@ -245,13 +254,15 @@ def get_window_bounds(
# within the constraint
start[i] = i
for j in range(start[i - 1], i):
- if (self.index[j] - start_bound) * index_growth_sign > timedelta(0):
+ start_diff = (self.index[j] - start_bound) * index_growth_sign
+ if start_diff > zero:
start[i] = j
break
# end bound is previous end
# or current index
- if (self.index[end[i - 1]] - end_bound) * index_growth_sign <= timedelta(0):
+ end_diff = (self.index[end[i - 1]] - end_bound) * index_growth_sign
+ if end_diff <= zero:
end[i] = i + 1
else:
end[i] = end[i - 1]
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index fc4a20e4bb568..104acc1d527cb 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -268,6 +268,19 @@ def test_non_fixed_variable_window_indexer(closed, expected_data):
tm.assert_frame_equal(result, expected)
+def test_variableoffsetwindowindexer_not_dti():
+ # GH 54379
+ with pytest.raises(ValueError, match="index must be a DatetimeIndex."):
+ VariableOffsetWindowIndexer(index="foo", offset=BusinessDay(1))
+
+
+def test_variableoffsetwindowindexer_not_offset():
+ # GH 54379
+ idx = date_range("2020", periods=10)
+ with pytest.raises(ValueError, match="offset must be a DateOffset-like object."):
+ VariableOffsetWindowIndexer(index=idx, offset="foo")
+
+
def test_fixed_forward_indexer_count(step):
# GH: 35579
df = DataFrame({"b": [None, None, None, 7]})
| * Improved typing
* Added validation to arguments
* Clarified steps in `get_window_bounds` | https://api.github.com/repos/pandas-dev/pandas/pulls/54379 | 2023-08-03T01:58:32Z | 2023-08-04T17:06:30Z | 2023-08-04T17:06:30Z | 2023-08-04T17:06:34Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.