title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
BUG: various 32bit compat issues | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index e0d15c218ec85..09358f77c9c9e 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -824,6 +824,7 @@ Bug Fixes
- Bug in ``pd.qcut()`` with a single quantile and an array with identical values (:issue:`15431`)
- Compat with SciPy 0.19.0 for testing on ``.interpolate()`` (:issue:`15662`)
+- Compat for 32-bit platforms for ``.qcut/cut``; bins will now be ``in64`` dtype (:issue:`14866`)
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
- Bug in ``.replace()`` may result in incorrect dtypes. (:issue:`12747`, :issue:`15765`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index f9d4c9107d7cd..00a3264e6c74a 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -169,33 +169,45 @@ def isin(comps, values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(comps).__name__))
- comps = np.asarray(comps)
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(values).__name__))
- if not isinstance(values, np.ndarray):
- values = list(values)
+
+ from pandas import DatetimeIndex, PeriodIndex
+
+ if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
+ values = np.array(list(values), dtype='object')
+
+ if needs_i8_conversion(comps):
+ if is_period_dtype(values):
+ comps = PeriodIndex(comps)
+ values = PeriodIndex(values)
+ else:
+ comps = DatetimeIndex(comps)
+ values = DatetimeIndex(values)
+
+ values = values.asi8
+ comps = comps.asi8
+ elif is_bool_dtype(comps):
+
+ try:
+ comps = np.asarray(comps).view('uint8')
+ values = np.asarray(values).view('uint8')
+ except TypeError:
+ # object array conversion will fail
+ pass
+ else:
+ comps = np.asarray(comps)
+ values = np.asarray(values)
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
f = lambda x, y: np.in1d(x, np.asarray(list(y)))
- else:
- f = lambda x, y: lib.ismember_int64(x, set(y))
-
- # may need i8 conversion for proper membership testing
- if is_datetime64_dtype(comps):
- from pandas.tseries.tools import to_datetime
- values = to_datetime(values)._values.view('i8')
- comps = comps.view('i8')
- elif is_timedelta64_dtype(comps):
- from pandas.tseries.timedeltas import to_timedelta
- values = to_timedelta(values)._values.view('i8')
- comps = comps.view('i8')
elif is_int64_dtype(comps):
- pass
+ f = lambda x, y: lib.ismember_int64(x, set(y))
else:
f = lambda x, y: lib.ismember(x, set(values))
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index f67231e78983c..0c274b2f6c4ff 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -1359,7 +1359,7 @@ def test_hash_collisions(self):
names=['one', 'two'])
result = index.get_indexer(index.values)
self.assert_numpy_array_equal(result,
- np.arange(len(index), dtype='int64'))
+ np.arange(len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
diff --git a/pandas/tests/tools/test_tile.py b/pandas/tests/tools/test_tile.py
index 11b242bc06e15..cc80c1ff5db29 100644
--- a/pandas/tests/tools/test_tile.py
+++ b/pandas/tests/tools/test_tile.py
@@ -19,8 +19,8 @@ class TestCut(tm.TestCase):
def test_simple(self):
data = np.ones(5)
result = cut(data, 4, labels=False)
- desired = np.array([1, 1, 1, 1, 1])
- tm.assert_numpy_array_equal(result, desired,
+ expected = np.array([1, 1, 1, 1, 1])
+ tm.assert_numpy_array_equal(result, expected,
check_dtype=False)
def test_bins(self):
diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py
index ccd8c2478e8a5..4a3d452228e01 100644
--- a/pandas/tools/tile.py
+++ b/pandas/tools/tile.py
@@ -4,7 +4,7 @@
from pandas.types.missing import isnull
from pandas.types.common import (is_float, is_integer,
- is_scalar)
+ is_scalar, _ensure_int64)
from pandas.core.api import Series
from pandas.core.categorical import Categorical
@@ -215,7 +215,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None,
bins = unique_bins
side = 'left' if right else 'right'
- ids = bins.searchsorted(x, side=side)
+ ids = _ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
| closes #14866
xref #14183 | https://api.github.com/repos/pandas-dev/pandas/pulls/15766 | 2017-03-21T14:43:53Z | 2017-03-21T17:41:03Z | null | 2017-03-21T17:41:58Z |
CLN: replace _interleave_dtype with _find_common_type | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index d036049e3ffdb..e0d15c218ec85 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -826,7 +826,7 @@ Bug Fixes
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
-- Bug in ``.replace()`` may result in incorrect dtypes. (:issue:`12747`)
+- Bug in ``.replace()`` may result in incorrect dtypes. (:issue:`12747`, :issue:`15765`)
- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 60684a929889b..6487c2108028e 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -9,7 +9,8 @@
from pandas.core.base import PandasObject
-from pandas.types.dtypes import DatetimeTZDtype, CategoricalDtype
+from pandas.types.dtypes import (ExtensionDtype, DatetimeTZDtype,
+ CategoricalDtype)
from pandas.types.common import (_TD_DTYPE, _NS_DTYPE,
_ensure_int64, _ensure_platform_int,
is_integer,
@@ -4496,55 +4497,13 @@ def _interleaved_dtype(blocks):
if not len(blocks):
return None
- counts = defaultdict(list)
- for x in blocks:
- counts[type(x)].append(x)
-
- have_int = len(counts[IntBlock]) > 0
- have_bool = len(counts[BoolBlock]) > 0
- have_object = len(counts[ObjectBlock]) > 0
- have_float = len(counts[FloatBlock]) > 0
- have_complex = len(counts[ComplexBlock]) > 0
- have_dt64 = len(counts[DatetimeBlock]) > 0
- have_dt64_tz = len(counts[DatetimeTZBlock]) > 0
- have_td64 = len(counts[TimeDeltaBlock]) > 0
- have_cat = len(counts[CategoricalBlock]) > 0
- # TODO: have_sparse is not used
- have_sparse = len(counts[SparseBlock]) > 0 # noqa
- have_numeric = have_float or have_complex or have_int
- has_non_numeric = have_dt64 or have_dt64_tz or have_td64 or have_cat
-
- if (have_object or
- (have_bool and
- (have_numeric or have_dt64 or have_dt64_tz or have_td64)) or
- (have_numeric and has_non_numeric) or have_cat or have_dt64 or
- have_dt64_tz or have_td64):
- return np.dtype(object)
- elif have_bool:
- return np.dtype(bool)
- elif have_int and not have_float and not have_complex:
- # if we are mixing unsigned and signed, then return
- # the next biggest int type (if we can)
- lcd = _find_common_type([b.dtype for b in counts[IntBlock]])
- kinds = set([i.dtype.kind for i in counts[IntBlock]])
- if len(kinds) == 1:
- return lcd
-
- if lcd == 'uint64' or lcd == 'int64':
- return np.dtype('int64')
-
- # return 1 bigger on the itemsize if unsinged
- if lcd.kind == 'u':
- return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
- return lcd
-
- elif have_int and have_float and not have_complex:
- return np.dtype('float64')
- elif have_complex:
- return np.dtype('c16')
- else:
- introspection_blks = counts[FloatBlock] + counts[SparseBlock]
- return _find_common_type([b.dtype for b in introspection_blks])
+ dtype = _find_common_type([b.dtype for b in blocks])
+
+ # only numpy compat
+ if isinstance(dtype, ExtensionDtype):
+ dtype = np.object
+
+ return dtype
def _consolidate(blocks):
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index df95f563c0832..7216c05657102 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -1183,19 +1183,9 @@ def _assert_replace_conversion(self, from_key, to_key, how):
result = obj.replace(replacer)
- # buggy on windows for bool/int64
- if (from_key == 'bool' and
- to_key == 'int64' and
- tm.is_platform_windows()):
- pytest.skip("windows platform buggy: {0} -> {1}".format
- (from_key, to_key))
-
- if ((from_key == 'float64' and to_key in ('bool', 'int64')) or
+ if ((from_key == 'float64' and to_key in ('int64')) or
(from_key == 'complex128' and
- to_key in ('bool', 'int64', 'float64')) or
-
- # GH12747 The result must be int?
- (from_key == 'int64' and to_key in ('bool'))):
+ to_key in ('int64', 'float64'))):
# buggy on 32-bit
if tm.is_platform_32bit():
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index f5a25e93cc82d..0a53581e24ba5 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -152,8 +152,8 @@ def check_replace(to_rep, val, expected):
tr, v = [3, 4], [3.5, pd.Timestamp('20130101')]
check_replace(tr, v, e)
- # casts to float
- e = pd.Series([0, 1, 2, 3.5, 1])
+ # casts to object
+ e = pd.Series([0, 1, 2, 3.5, True], dtype='object')
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
diff --git a/pandas/tests/types/test_cast.py b/pandas/tests/types/test_cast.py
index 70f69cc7d5701..d7b086daea1e3 100644
--- a/pandas/tests/types/test_cast.py
+++ b/pandas/tests/types/test_cast.py
@@ -238,6 +238,20 @@ def test_numpy_dtypes(self):
((np.object, np.float32), np.object),
((np.object, np.int16), np.object),
+ # bool with int
+ ((np.dtype('bool'), np.int64), np.object),
+ ((np.dtype('bool'), np.int32), np.object),
+ ((np.dtype('bool'), np.int16), np.object),
+ ((np.dtype('bool'), np.int8), np.object),
+ ((np.dtype('bool'), np.uint64), np.object),
+ ((np.dtype('bool'), np.uint32), np.object),
+ ((np.dtype('bool'), np.uint16), np.object),
+ ((np.dtype('bool'), np.uint8), np.object),
+
+ # bool with float
+ ((np.dtype('bool'), np.float64), np.object),
+ ((np.dtype('bool'), np.float32), np.object),
+
((np.dtype('datetime64[ns]'), np.dtype('datetime64[ns]')),
np.dtype('datetime64[ns]')),
((np.dtype('timedelta64[ns]'), np.dtype('timedelta64[ns]')),
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index 11a837dd21159..0e26cd085db5a 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -892,12 +892,28 @@ def _possibly_cast_to_datetime(value, dtype, errors='raise'):
def _find_common_type(types):
- """Find a common data type among the given dtypes."""
+ """
+ Find a common data type among the given dtypes.
+
+ Parameters
+ ----------
+ types : list of dtypes
+
+ Returns
+ -------
+ pandas extension or numpy dtype
+
+ See Also
+ --------
+ numpy.find_common_type
+
+ """
if len(types) == 0:
raise ValueError('no types given')
first = types[0]
+
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
@@ -912,4 +928,14 @@ def _find_common_type(types):
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype('timedelta64[ns]')
+ # don't mix bool / int or float or complex
+ # this is different from numpy, which casts bool with float/int as int
+ has_bools = any(is_bool_dtype(t) for t in types)
+ if has_bools:
+ has_ints = any(is_integer_dtype(t) for t in types)
+ has_floats = any(is_float_dtype(t) for t in types)
+ has_complex = any(is_complex_dtype(t) for t in types)
+ if has_ints or has_floats or has_complex:
+ return np.object
+
return np.find_common_type(types, [])
| xref #15736
xref #12780 | https://api.github.com/repos/pandas-dev/pandas/pulls/15765 | 2017-03-21T13:35:18Z | 2017-03-21T14:50:13Z | null | 2017-03-21T14:53:08Z |
BUG: Series.asof fails for all NaN Series (GH15713) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index ebdd4060f0588..5a3b99d6bec0d 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -930,3 +930,5 @@ Bug Fixes
- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
- Bug in ``pd.read_msgpack`` which did not allow to load dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
+
+- Bug in ``Series.asof`` which raised an error if the series contained all ``nans`` (:issue:`15713`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1db9677659ca3..7ca3d10b7977f 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3972,6 +3972,14 @@ def asof(self, where, subset=None):
where = Index(where) if is_list else Index([where])
nulls = self.isnull() if is_series else self[subset].isnull().any(1)
+ if nulls.all():
+ if is_series:
+ return pd.Series(np.nan, index=where, name=self.name)
+ elif is_list:
+ return pd.DataFrame(np.nan, index=where, columns=self.columns)
+ else:
+ return pd.Series(np.nan, index=self.columns, name=where[0])
+
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
diff --git a/pandas/tests/frame/test_asof.py b/pandas/tests/frame/test_asof.py
index 8bb26d3d7474c..dd03f8f7cb7a9 100644
--- a/pandas/tests/frame/test_asof.py
+++ b/pandas/tests/frame/test_asof.py
@@ -4,22 +4,19 @@
from pandas import (DataFrame, date_range, Timestamp, Series,
to_datetime)
-from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestFrameAsof(TestData, tm.TestCase):
-
def setUp(self):
self.N = N = 50
- rng = date_range('1/1/1990', periods=N, freq='53s')
+ self.rng = date_range('1/1/1990', periods=N, freq='53s')
self.df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
- index=rng)
+ index=self.rng)
def test_basic(self):
-
df = self.df.copy()
df.loc[15:30, 'A'] = np.nan
dates = date_range('1/1/1990', periods=self.N * 3,
@@ -39,7 +36,6 @@ def test_basic(self):
self.assertTrue((rs == 14).all(1).all())
def test_subset(self):
-
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
@@ -51,19 +47,19 @@ def test_subset(self):
# with a subset of A should be the same
result = df.asof(dates, subset='A')
expected = df.asof(dates)
- assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
# same with A/B
result = df.asof(dates, subset=['A', 'B'])
expected = df.asof(dates)
- assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
# B gives self.df.asof
result = df.asof(dates, subset='B')
expected = df.resample('25s', closed='right').ffill().reindex(dates)
expected.iloc[20:] = 9
- assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
def test_missing(self):
# GH 15118
@@ -75,9 +71,38 @@ def test_missing(self):
result = df.asof('1989-12-31')
expected = Series(index=['A', 'B'], name=Timestamp('1989-12-31'))
- assert_series_equal(result, expected)
+ tm.assert_series_equal(result, expected)
result = df.asof(to_datetime(['1989-12-31']))
expected = DataFrame(index=to_datetime(['1989-12-31']),
columns=['A', 'B'], dtype='float64')
- assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
+
+ def test_all_nans(self):
+ # GH 15713
+ # DataFrame is all nans
+ result = DataFrame([np.nan]).asof([0])
+ expected = DataFrame([np.nan])
+ tm.assert_frame_equal(result, expected)
+
+ # testing non-default indexes, multiple inputs
+ dates = date_range('1/1/1990', periods=self.N * 3, freq='25s')
+ result = DataFrame(np.nan, index=self.rng, columns=['A']).asof(dates)
+ expected = DataFrame(np.nan, index=dates, columns=['A'])
+ tm.assert_frame_equal(result, expected)
+
+ # testing multiple columns
+ dates = date_range('1/1/1990', periods=self.N * 3, freq='25s')
+ result = DataFrame(np.nan, index=self.rng,
+ columns=['A', 'B', 'C']).asof(dates)
+ expected = DataFrame(np.nan, index=dates, columns=['A', 'B', 'C'])
+ tm.assert_frame_equal(result, expected)
+
+ # testing scalar input
+ result = DataFrame(np.nan, index=[1, 2], columns=['A', 'B']).asof([3])
+ expected = DataFrame(np.nan, index=[3], columns=['A', 'B'])
+ tm.assert_frame_equal(result, expected)
+
+ result = DataFrame(np.nan, index=[1, 2], columns=['A', 'B']).asof(3)
+ expected = Series(np.nan, index=['A', 'B'], name=3)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_asof.py b/pandas/tests/series/test_asof.py
index d2fd8858e7647..ee6ab15b8963a 100644
--- a/pandas/tests/series/test_asof.py
+++ b/pandas/tests/series/test_asof.py
@@ -148,3 +148,28 @@ def test_errors(self):
s = Series(np.random.randn(N), index=rng)
with self.assertRaises(ValueError):
s.asof(s.index[0], subset='foo')
+
+ def test_all_nans(self):
+ # GH 15713
+ # series is all nans
+ result = Series([np.nan]).asof([0])
+ expected = Series([np.nan])
+ tm.assert_series_equal(result, expected)
+
+ # testing non-default indexes
+ N = 50
+ rng = date_range('1/1/1990', periods=N, freq='53s')
+
+ dates = date_range('1/1/1990', periods=N * 3, freq='25s')
+ result = Series(np.nan, index=rng).asof(dates)
+ expected = Series(np.nan, index=dates)
+ tm.assert_series_equal(result, expected)
+
+ # testing scalar input
+ date = date_range('1/1/1990', periods=N * 3, freq='25s')[0]
+ result = Series(np.nan, index=rng).asof(date)
+ assert isnull(result)
+
+ # test name is propagated
+ result = Series(np.nan, index=[1, 2, 3, 4], name='test').asof([4, 5])
+ self.assertEqual(result.name, 'test')
| - [x] closes bug #15713
- [x] 1 tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
Added the test if the series is all nans
Added the code that check if that's the case: if yes, return the expected output
As this is my first contribution, please comment if I did it right :)
Thanks!
| https://api.github.com/repos/pandas-dev/pandas/pulls/15758 | 2017-03-21T03:05:24Z | 2017-03-26T02:31:15Z | null | 2017-03-26T04:01:09Z |
MAINT: Drop convert_objects from NDFrame | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 8482eef552c17..81d48ff3bacb4 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -95,17 +95,16 @@ will be completed:
df2.append df2.combine_first
df2.apply df2.compound
df2.applymap df2.consolidate
- df2.as_blocks df2.convert_objects
- df2.asfreq df2.copy
- df2.as_matrix df2.corr
- df2.astype df2.corrwith
- df2.at df2.count
- df2.at_time df2.cov
- df2.axes df2.cummax
- df2.B df2.cummin
- df2.between_time df2.cumprod
- df2.bfill df2.cumsum
- df2.blocks df2.D
+ df2.as_blocks df2.copy
+ df2.asfreq df2.corr
+ df2.as_matrix df2.corrwith
+ df2.astype df2.count
+ df2.at df2.cov
+ df2.at_time df2.cummax
+ df2.axes df2.cumprod
+ df2.between_time df2.cumsum
+ df2.bfill df2.D
+ df2.blocks
As you can see, the columns ``A``, ``B``, ``C``, and ``D`` are automatically
tab completed. ``E`` is there as well; the rest of the attributes have been
diff --git a/doc/source/api.rst b/doc/source/api.rst
index d6053791d6f4b..1a62cde362ede 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -776,7 +776,6 @@ Conversion
:toctree: generated/
DataFrame.astype
- DataFrame.convert_objects
DataFrame.copy
DataFrame.isnull
DataFrame.notnull
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 015fdf1f45f47..4c129e5b76dd4 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -122,6 +122,7 @@ Deprecations
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- ``Series``, ``Index``, and ``DataFrame`` have dropped the ``convert_objects`` method (:issue:`11173`)
- :func:`read_excel()` has dropped the ``has_index_names`` parameter (:issue:`10967`)
- The ``pd.options.display.mpl_style`` configuration has been dropped (:issue:`12190`)
- ``Index`` has dropped the ``.sym_diff()`` method in favor of ``.symmetric_difference()`` (:issue:`12591`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6559fc4c24ce2..57a175fa56b8a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3808,7 +3808,7 @@ def combine(self, other, func, fill_value=None, overwrite=True):
result[col] = arr
- # convert_objects just in case
+ # Convert objects just in case.
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5722539b87aec..a0121bb4258d1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3599,51 +3599,6 @@ def _convert(self, datetime=False, numeric=False, timedelta=False,
timedelta=timedelta, coerce=coerce,
copy=copy)).__finalize__(self)
- # TODO: Remove in 0.18 or 2017, which ever is sooner
- def convert_objects(self, convert_dates=True, convert_numeric=False,
- convert_timedeltas=True, copy=True):
- """
- Deprecated.
- Attempt to infer better dtype for object columns
-
- Parameters
- ----------
- convert_dates : boolean, default True
- If True, convert to date where possible. If 'coerce', force
- conversion, with unconvertible values becoming NaT.
- convert_numeric : boolean, default False
- If True, attempt to coerce to numbers (including strings), with
- unconvertible values becoming NaN.
- convert_timedeltas : boolean, default True
- If True, convert to timedelta where possible. If 'coerce', force
- conversion, with unconvertible values becoming NaT.
- copy : boolean, default True
- If True, return a copy even if no copy is necessary (e.g. no
- conversion was done). Note: This is meant for internal use, and
- should not be confused with inplace.
-
- See Also
- --------
- pandas.to_datetime : Convert argument to datetime.
- pandas.to_timedelta : Convert argument to timedelta.
- pandas.to_numeric : Return a fixed frequency timedelta index,
- with day as the default.
-
- Returns
- -------
- converted : same as input object
- """
- from warnings import warn
- warn("convert_objects is deprecated. Use the data-type specific "
- "converters pd.to_datetime, pd.to_timedelta and pd.to_numeric.",
- FutureWarning, stacklevel=2)
-
- return self._constructor(
- self._data.convert(convert_dates=convert_dates,
- convert_numeric=convert_numeric,
- convert_timedeltas=convert_timedeltas,
- copy=copy)).__finalize__(self)
-
# ----------------------------------------------------------------------
# Filling NA's
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f2a7ac76481d4..29a6276aee4bd 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -768,7 +768,7 @@ def _is_empty_indexer(indexer):
values = self._try_coerce_and_cast_result(values, dtype)
block = self.make_block(transf(values), fastpath=True)
- # may have to soft convert_objects here
+ # May have to soft convert objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
@@ -1850,12 +1850,13 @@ def is_bool(self):
"""
return lib.is_bool_array(self.values.ravel())
- # TODO: Refactor when convert_objects is removed since there will be 1 path
def convert(self, *args, **kwargs):
- """ attempt to coerce any object types to better types return a copy of
- the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
+ """
+ Attempt to coerce any object types to more specific data types.
+ If copy = True, return a copy of the block.
- can return multiple blocks!
+ NOTE: This function can can return multiple blocks!
+ NOTE: By definition, we are an ObjectBlock!
"""
if args:
diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py
index 2c8bf57f20fae..5a4a402b8e02d 100644
--- a/pandas/tests/groupby/test_whitelist.py
+++ b/pandas/tests/groupby/test_whitelist.py
@@ -206,10 +206,8 @@ def test_groupby_blacklist(df_letters):
s = df_letters.floats
blacklist = [
- 'eval', 'query', 'abs', 'where',
- 'mask', 'align', 'groupby', 'clip', 'astype',
- 'at', 'combine', 'consolidate', 'convert_objects',
- ]
+ 'eval', 'query', 'abs', 'where', 'mask', 'align',
+ 'groupby', 'clip', 'astype', 'at', 'combine', 'consolidate']
to_methods = [method for method in dir(df) if method.startswith('to_')]
blacklist.extend(to_methods)
diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py
index 79e23459ac992..4e4754efba5fc 100644
--- a/pandas/tests/series/test_internals.py
+++ b/pandas/tests/series/test_internals.py
@@ -18,131 +18,6 @@
class TestSeriesInternals(object):
- def test_convert_objects(self):
-
- s = Series([1., 2, 3], index=['a', 'b', 'c'])
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_dates=False,
- convert_numeric=True)
- assert_series_equal(result, s)
-
- # force numeric conversion
- r = s.copy().astype('O')
- r['a'] = '1'
- with tm.assert_produces_warning(FutureWarning):
- result = r.convert_objects(convert_dates=False,
- convert_numeric=True)
- assert_series_equal(result, s)
-
- r = s.copy().astype('O')
- r['a'] = '1.'
- with tm.assert_produces_warning(FutureWarning):
- result = r.convert_objects(convert_dates=False,
- convert_numeric=True)
- assert_series_equal(result, s)
-
- r = s.copy().astype('O')
- r['a'] = 'garbled'
- expected = s.copy()
- expected['a'] = np.nan
- with tm.assert_produces_warning(FutureWarning):
- result = r.convert_objects(convert_dates=False,
- convert_numeric=True)
- assert_series_equal(result, expected)
-
- # GH 4119, not converting a mixed type (e.g.floats and object)
- s = Series([1, 'na', 3, 4])
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_numeric=True)
- expected = Series([1, np.nan, 3, 4])
- assert_series_equal(result, expected)
-
- s = Series([1, '', 3, 4])
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_numeric=True)
- expected = Series([1, np.nan, 3, 4])
- assert_series_equal(result, expected)
-
- # dates
- s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
- datetime(2001, 1, 3, 0, 0)])
- s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
- datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
- Timestamp('20010104'), '20010105'],
- dtype='O')
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_dates=True,
- convert_numeric=False)
- expected = Series([Timestamp('20010101'), Timestamp('20010102'),
- Timestamp('20010103')], dtype='M8[ns]')
- assert_series_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_dates='coerce',
- convert_numeric=False)
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_dates='coerce',
- convert_numeric=True)
- assert_series_equal(result, expected)
-
- expected = Series([Timestamp('20010101'), Timestamp('20010102'),
- Timestamp('20010103'),
- lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'),
- Timestamp('20010105')], dtype='M8[ns]')
- with tm.assert_produces_warning(FutureWarning):
- result = s2.convert_objects(convert_dates='coerce',
- convert_numeric=False)
- assert_series_equal(result, expected)
- with tm.assert_produces_warning(FutureWarning):
- result = s2.convert_objects(convert_dates='coerce',
- convert_numeric=True)
- assert_series_equal(result, expected)
-
- # preserver all-nans (if convert_dates='coerce')
- s = Series(['foo', 'bar', 1, 1.0], dtype='O')
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_dates='coerce',
- convert_numeric=False)
- expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
- assert_series_equal(result, expected)
-
- # preserver if non-object
- s = Series([1], dtype='float32')
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_dates='coerce',
- convert_numeric=False)
- assert_series_equal(result, s)
-
- # r = s.copy()
- # r[0] = np.nan
- # result = r.convert_objects(convert_dates=True,convert_numeric=False)
- # assert result.dtype == 'M8[ns]'
-
- # dateutil parses some single letters into today's value as a date
- for x in 'abcdefghijklmnopqrstuvwxyz':
- s = Series([x])
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_dates='coerce')
- assert_series_equal(result, s)
- s = Series([x.upper()])
- with tm.assert_produces_warning(FutureWarning):
- result = s.convert_objects(convert_dates='coerce')
- assert_series_equal(result, s)
-
- def test_convert_objects_preserve_bool(self):
- s = Series([1, True, 3, 5], dtype=object)
- with tm.assert_produces_warning(FutureWarning):
- r = s.convert_objects(convert_numeric=True)
- e = Series([1, 1, 3, 5], dtype='i8')
- tm.assert_series_equal(r, e)
-
- def test_convert_objects_preserve_all_bool(self):
- s = Series([False, True, False, False], dtype=object)
- with tm.assert_produces_warning(FutureWarning):
- r = s.convert_objects(convert_numeric=True)
- e = Series([False, True, False, False], dtype=bool)
- tm.assert_series_equal(r, e)
-
# GH 10265
def test_convert(self):
# Tests: All to nans, coerce, true
| Deprecated since 0.17.0
xref #11173 | https://api.github.com/repos/pandas-dev/pandas/pulls/15757 | 2017-03-21T01:46:21Z | 2017-07-13T14:34:28Z | null | 2017-07-13T14:49:37Z |
ENH: support "nrows" and "chunksize" together | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index d036049e3ffdb..2be809fbf548d 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -291,6 +291,7 @@ Other enhancements
- ``Series`` provides a ``to_excel`` method to output Excel files (:issue:`8825`)
- The ``usecols`` argument in ``pd.read_csv`` now accepts a callable function as a value (:issue:`14154`)
- The ``skiprows`` argument in ``pd.read_csv`` now accepts a callable function as a value (:issue:`10882`)
+- The ``nrows`` and ``chunksize`` arguments in ``pd.read_csv`` are supported if both are passed (:issue:`15755`)
- ``pd.DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`)
- ``pd.Series.interpolate`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`)
- ``Timedelta.isoformat`` method added for formatting Timedeltas as an `ISO 8601 duration`_. See the :ref:`Timedelta docs <timedeltas.isoformat>` (:issue:`15136`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 9aedddc811830..18343670fb39e 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -384,29 +384,18 @@ def _read(filepath_or_buffer, kwds):
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get('iterator', False)
chunksize = kwds.get('chunksize', None)
- nrows = _validate_nrows(kwds.pop('nrows', None))
+ nrows = _validate_nrows(kwds.get('nrows', None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
- if (nrows is not None) and (chunksize is not None):
- raise NotImplementedError("'nrows' and 'chunksize' cannot be used"
- " together yet.")
- elif nrows is not None:
- try:
- data = parser.read(nrows)
- finally:
- parser.close()
- return data
-
- elif chunksize or iterator:
+ if chunksize or iterator:
return parser
try:
- data = parser.read()
+ data = parser.read(nrows)
finally:
parser.close()
-
return data
@@ -445,7 +434,7 @@ def _read(filepath_or_buffer, kwds):
'usecols': None,
- # 'nrows': None,
+ 'nrows': None,
# 'iterator': False,
'chunksize': None,
'verbose': False,
@@ -749,6 +738,7 @@ def __init__(self, f, engine=None, **kwds):
options = self._get_options_with_defaults(engine)
self.chunksize = options.pop('chunksize', None)
+ self.nrows = options.pop('nrows', None)
self.squeeze = options.pop('squeeze', False)
# might mutate self.engine
@@ -1009,6 +999,10 @@ def _create_index(self, ret):
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
+ if self.nrows is not None:
+ if self._currow >= self.nrows:
+ raise StopIteration
+ size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index df75d14e9702d..120d2bb8f6759 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -402,6 +402,30 @@ def test_read_chunksize(self):
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
+ # With nrows
+ reader = self.read_csv(StringIO(self.data1), index_col=0,
+ chunksize=2, nrows=5)
+ df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
+
+ tm.assert_frame_equal(pd.concat(reader), df)
+
+ # chunksize > nrows
+ reader = self.read_csv(StringIO(self.data1), index_col=0,
+ chunksize=8, nrows=5)
+ df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
+
+ tm.assert_frame_equal(pd.concat(reader), df)
+
+ # with changing "size":
+ reader = self.read_csv(StringIO(self.data1), index_col=0,
+ chunksize=8, nrows=5)
+ df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
+
+ tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
+ tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
+ with tm.assertRaises(StopIteration):
+ reader.get_chunk(size=3)
+
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py
index 999db47cf2eaf..48dd5d4ba506b 100644
--- a/pandas/tests/io/parser/test_unsupported.py
+++ b/pandas/tests/io/parser/test_unsupported.py
@@ -29,15 +29,6 @@ def test_mangle_dupe_cols_false(self):
read_csv(StringIO(data), engine=engine,
mangle_dupe_cols=False)
- def test_nrows_and_chunksize(self):
- data = 'a b c'
- msg = "cannot be used together yet"
-
- for engine in ('c', 'python'):
- with tm.assertRaisesRegexp(NotImplementedError, msg):
- read_csv(StringIO(data), engine=engine,
- nrows=10, chunksize=5)
-
def test_c_engine(self):
# see gh-6607
data = 'a b c\n1 2 3'
| - [x] closes #15755
- [x] tests added / passed
- [x] passes ``git diff master | flake8 --diff`` (except for ``whatsnew/v0.20.0.txt``, I don't know why)
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15756 | 2017-03-21T00:41:13Z | 2017-03-21T18:06:01Z | null | 2017-03-21T18:54:48Z |
DOC: Patch new flake8 command grep | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 7961780d0c79b..7ad5916a8809d 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -527,7 +527,12 @@ unused function. However, style-checking the diff will not catch this because
the actual import is not part of the diff. Thus, for completeness, you should
run this command, though it will take longer::
- git diff master --name-only -- '*.py' | grep 'pandas' | xargs -r flake8
+ git diff master --name-only -- '*.py' | grep 'pandas/' | xargs -r flake8
+
+Note that on OSX, the ``-r`` flag is not available, so you have to omit it and
+run this slightly modified command::
+
+ git diff master --name-only -- '*.py' | grep 'pandas/' | xargs flake8
Backwards Compatibility
~~~~~~~~~~~~~~~~~~~~~~~
| The grep was initially matching to "pandas," which is incorrect because that was also matching files containing "pandas" in the name but that were not in the main `pandas` directory (e.g. performance test code). This change enforces that we match to any Python files in the main `pandas` directory.
Also picked up compatibility issue with OSX, in which the `-r` flag does not exist. However, `xargs` terminates if the argument list is empty, which was the whole point of passing in `-r` in the first place.
Follow-up to #15712 | https://api.github.com/repos/pandas-dev/pandas/pulls/15749 | 2017-03-20T19:01:50Z | 2017-03-20T19:39:25Z | null | 2017-03-21T00:39:14Z |
MAINT: Remove Long and WidePanel | diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index 56ccc94c414fb..a7e530e7f5ef1 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -25,11 +25,6 @@
except:
pass
-try:
- Panel = Panel
-except Exception:
- Panel = WidePanel
-
# didn't add to namespace until later
try:
from pandas.core.index import MultiIndex
diff --git a/bench/bench_join_panel.py b/bench/bench_join_panel.py
index f3c3f8ba15f70..113b317dd8ff8 100644
--- a/bench/bench_join_panel.py
+++ b/bench/bench_join_panel.py
@@ -45,8 +45,8 @@ def reindex_on_axis(panels, axis, axis_reindex):
return p
-# does the job but inefficient (better to handle like you read a table in
-# pytables...e.g create a LongPanel then convert to Wide)
+# Does the job but inefficient. It is better to handle
+# this like you read a table in pytables.
def create_panels_join(cls, panels):
""" given an array of panels's, create a single panel """
panels = [a for a in panels if a is not None]
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 98407aacb993b..ebdd4060f0588 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -772,6 +772,7 @@ Removal of prior version deprecations/changes
- The ``Categorical`` constructor has dropped the ``name`` parameter (:issue:`10632`)
- The ``take_last`` parameter has been dropped from ``duplicated()``, ``drop_duplicates()``, ``nlargest()``, and ``nsmallest()`` methods (:issue:`10236`, :issue:`10792`, :issue:`10920`)
- ``Series``, ``Index``, and ``DataFrame`` have dropped the ``sort`` and ``order`` methods (:issue:`10726`)
+- The ``LongPanel`` and ``WidePanel`` classes have been removed (:issue:`10892`)
.. _whatsnew_0200.performance:
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 65253dedb8b53..5018de39ca907 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -15,7 +15,7 @@
from pandas.core.series import Series
from pandas.core.frame import DataFrame
-from pandas.core.panel import Panel, WidePanel
+from pandas.core.panel import Panel
from pandas.core.panel4d import Panel4D
from pandas.core.reshape import (pivot_simple as pivot, get_dummies,
lreshape, wide_to_long)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 4a6c6cf291316..5c7b66a2d1356 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -4,8 +4,6 @@
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
-import warnings
-
import numpy as np
from pandas.types.cast import (_infer_dtype_from_scalar,
@@ -1556,24 +1554,3 @@ def f(self, other, axis=0):
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
-
-
-# legacy
-class WidePanel(Panel):
-
- def __init__(self, *args, **kwargs):
- # deprecation, #10892
- warnings.warn("WidePanel is deprecated. Please use Panel",
- FutureWarning, stacklevel=2)
-
- super(WidePanel, self).__init__(*args, **kwargs)
-
-
-class LongPanel(DataFrame):
-
- def __init__(self, *args, **kwargs):
- # deprecation, #10892
- warnings.warn("LongPanel is deprecated. Please use DataFrame",
- FutureWarning, stacklevel=2)
-
- super(LongPanel, self).__init__(*args, **kwargs)
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 73222c246fc70..2c7dcf2501f32 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -54,8 +54,7 @@ class TestPDApi(Base, tm.TestCase):
'TimedeltaIndex', 'Timestamp']
# these are already deprecated; awaiting removal
- deprecated_classes = ['WidePanel', 'Panel4D',
- 'SparseList', 'Expr', 'Term']
+ deprecated_classes = ['Panel4D', 'SparseList', 'Expr', 'Term']
# these should be deprecated in the future
deprecated_classes_in_future = ['Panel']
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 40866b8702fe2..324160d5b1ae6 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -3017,9 +3017,6 @@ def _check(left, right):
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
- def test_longpanel(self):
- pass
-
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index ab0322abbcf06..13e16f3b90730 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -178,10 +178,6 @@ def wrapper(x):
class SafeForSparse(object):
- @classmethod
- def assert_panel_equal(cls, x, y):
- assert_panel_equal(x, y)
-
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
@@ -346,10 +342,10 @@ def check_op(op, name):
def test_combinePanel(self):
result = self.panel.add(self.panel)
- self.assert_panel_equal(result, self.panel * 2)
+ assert_panel_equal(result, self.panel * 2)
def test_neg(self):
- self.assert_panel_equal(-self.panel, self.panel * -1)
+ assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
@@ -369,22 +365,22 @@ def test_select(self):
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
- self.assert_panel_equal(result, expected)
+ assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
- self.assert_panel_equal(result, expected)
+ assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
- self.assert_panel_equal(result, expected)
+ assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
- self.assert_panel_equal(result, p.reindex(items=[]))
+ assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
@@ -399,8 +395,8 @@ def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
- self.assert_panel_equal(result, expected)
- self.assert_panel_equal(result2, expected)
+ assert_panel_equal(result, expected)
+ assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
@@ -867,10 +863,6 @@ def test_set_value(self):
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
- @classmethod
- def assert_panel_equal(cls, x, y):
- assert_panel_equal(x, y)
-
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
@@ -1967,7 +1959,7 @@ def test_round(self):
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
- self.assert_panel_equal(expected, result)
+ assert_panel_equal(expected, result)
def test_numpy_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
@@ -1983,7 +1975,7 @@ def test_numpy_round(self):
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = np.round(p)
- self.assert_panel_equal(expected, result)
+ assert_panel_equal(expected, result)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.round, p, out=p)
@@ -2270,15 +2262,12 @@ def test_all_any_unhandled(self):
self.assertRaises(NotImplementedError, self.panel.any, bool_only=True)
-class TestLongPanel(tm.TestCase):
+class TestPanelFrame(tm.TestCase):
"""
- LongPanel no longer exists, but...
+ Check that conversions to and from Panel to DataFrame work.
"""
def setUp(self):
- import warnings
- warnings.filterwarnings(action='ignore', category=FutureWarning)
-
panel = tm.makePanel()
tm.add_nans(panel)
diff --git a/vb_suite/pandas_vb_common.py b/vb_suite/pandas_vb_common.py
index bd2e8a1c1d504..41e43d6ab10e5 100644
--- a/vb_suite/pandas_vb_common.py
+++ b/vb_suite/pandas_vb_common.py
@@ -18,11 +18,6 @@
except:
import pandas._libs.lib as lib
-try:
- Panel = WidePanel
-except Exception:
- pass
-
# didn't add to namespace until later
try:
from pandas.core.index import MultiIndex
| Deprecated since 0.17.0.
xref #10892 | https://api.github.com/repos/pandas-dev/pandas/pulls/15748 | 2017-03-20T18:26:53Z | 2017-03-20T19:47:49Z | 2017-03-20T19:47:49Z | 2017-03-21T00:39:17Z |
Return mode even if single value (#15714) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 29d05ddcfb497..66de10c84ff36 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -924,3 +924,4 @@ Bug Fixes
- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
- Bug in ``pd.read_msgpack`` which did not allow to load dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
+- Bug in ``.mode()`` where ``mode`` was not returned for a single value (:issue:`15714`)
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index fa373905ef08a..cf6b3d9ce5c10 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -233,7 +233,7 @@ def mode_{{dtype}}(ndarray[{{ctype}}] values):
def mode_{{dtype}}({{ctype}}[:] values):
{{endif}}
cdef:
- int count, max_count = 2
+ int count, max_count = 1
int j = -1 # so you can do +=
Py_ssize_t k
kh_{{table_type}}_t *table
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index af51c7f2e2dc1..579f8fe791e06 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1876,8 +1876,7 @@ def mode(self):
"""
Returns the mode(s) of the Categorical.
- Empty if nothing occurs at least 2 times. Always returns `Categorical`
- even if only one value.
+ Always returns `Categorical` even if only one value.
Returns
-------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 987eb10101f12..51a39f4b69693 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5168,9 +5168,8 @@ def _get_agg_axis(self, axis_num):
def mode(self, axis=0, numeric_only=False):
"""
- Gets the mode(s) of each element along the axis selected. Empty if
- nothing has 2+ occurrences. Adds a row for each mode per label, fills
- in gaps with nan.
+ Gets the mode(s) of each element along the axis selected. Adds a row
+ for each mode per label, fills in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cfa25ca1299eb..2ae98fc77db53 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1192,8 +1192,7 @@ def count(self, level=None):
def mode(self):
"""Return the mode(s) of the dataset.
- Empty if nothing occurs at least 2 times. Always returns Series even
- if only one value is returned.
+ Always returns Series even if only one value is returned.
Returns
-------
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 6c917444f9f43..dec233a4421d2 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -809,18 +809,23 @@ def test_mode(self):
"E": [8, 8, 1, 1, 3, 3]})
tm.assert_frame_equal(df[["A"]].mode(),
pd.DataFrame({"A": [12]}))
- expected = pd.Series([], dtype='int64', name='D').to_frame()
+ expected = pd.Series([0, 1, 2, 3, 4, 5], dtype='int64', name='D').\
+ to_frame()
tm.assert_frame_equal(df[["D"]].mode(), expected)
expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame()
tm.assert_frame_equal(df[["E"]].mode(), expected)
tm.assert_frame_equal(df[["A", "B"]].mode(),
pd.DataFrame({"A": [12], "B": [10.]}))
tm.assert_frame_equal(df.mode(),
- pd.DataFrame({"A": [12, np.nan, np.nan],
- "B": [10, np.nan, np.nan],
- "C": [8, 9, np.nan],
- "D": [np.nan, np.nan, np.nan],
- "E": [1, 3, 8]}))
+ pd.DataFrame({"A": [12, np.nan, np.nan, np.nan,
+ np.nan, np.nan],
+ "B": [10, np.nan, np.nan, np.nan,
+ np.nan, np.nan],
+ "C": [8, 9, np.nan, np.nan, np.nan,
+ np.nan],
+ "D": [0, 1, 2, 3, 4, 5],
+ "E": [1, 3, 8, np.nan, np.nan,
+ np.nan]}))
# outputs in sorted order
df["C"] = list(reversed(df["C"]))
@@ -837,20 +842,12 @@ def test_mode(self):
df = pd.DataFrame({"A": np.arange(6, dtype='int64'),
"B": pd.date_range('2011', periods=6),
"C": list('abcdef')})
- exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype),
- "B": pd.Series([], dtype=df["B"].dtype),
- "C": pd.Series([], dtype=df["C"].dtype)})
- tm.assert_frame_equal(df.mode(), exp)
-
- # and also when not empty
- df.loc[1, "A"] = 0
- df.loc[4, "B"] = df.loc[3, "B"]
- df.loc[5, "C"] = 'e'
- exp = pd.DataFrame({"A": pd.Series([0], dtype=df["A"].dtype),
- "B": pd.Series([df.loc[3, "B"]],
+ exp = pd.DataFrame({"A": pd.Series(np.arange(6, dtype='int64'),
+ dtype=df["A"].dtype),
+ "B": pd.Series(pd.date_range('2011', periods=6),
dtype=df["B"].dtype),
- "C": pd.Series(['e'], dtype=df["C"].dtype)})
-
+ "C": pd.Series(list('abcdef'),
+ dtype=df["C"].dtype)})
tm.assert_frame_equal(df.mode(), exp)
def test_operators_timedelta64(self):
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index c2543581dca50..6c607ff580cbb 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -130,10 +130,10 @@ def test_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(Series([]).mode(), exp)
- exp = Series([], dtype=np.int64)
+ exp = Series([1], dtype=np.int64)
tm.assert_series_equal(Series([1]).mode(), exp)
- exp = Series([], dtype=np.object)
+ exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(Series(['a', 'b', 'c']).mode(), exp)
# Test numerical data types.
@@ -169,7 +169,8 @@ def test_mode(self):
tm.assert_series_equal(s.mode(), exp)
# Test datetime types.
- exp = Series([], dtype="M8[ns]")
+ exp = Series(['1900-05-03', '2011-01-03',
+ '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(s.mode(), exp)
@@ -180,7 +181,7 @@ def test_mode(self):
tm.assert_series_equal(s.mode(), exp)
# gh-5986: Test timedelta types.
- exp = Series([], dtype='timedelta64[ns]')
+ exp = Series(['-1 days', '0 days', '1 days'], dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(s.mode(), exp)
@@ -200,13 +201,13 @@ def test_mode(self):
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(s.mode(), exp)
- exp = Series([], dtype=np.uint64)
+ exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(s.mode(), exp)
# Test category dtype.
c = Categorical([1, 2])
- exp = Categorical([], categories=[1, 2])
+ exp = Categorical([1, 2], categories=[1, 2])
exp = Series(exp, dtype='category')
tm.assert_series_equal(Series(c).mode(), exp)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 7a3cc3e2c3cd7..39527dcf5ac78 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1256,10 +1256,27 @@ def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
- exp = Series([], dtype=np.int)
+ # GH 15714
+ def test_mode_single(self):
+ exp_single = [1]
+ data_single = [1]
+
+ exp_multi = [1]
+ data_multi = [1, 1]
+
+ for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
+ s = Series(data_single, dtype=dt)
+ exp = Series(exp_single, dtype=dt)
+ tm.assert_series_equal(algos.mode(s), exp)
+
+ s = Series(data_multi, dtype=dt)
+ exp = Series(exp_multi, dtype=dt)
+ tm.assert_series_equal(algos.mode(s), exp)
+
+ exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
- exp = Series([], dtype=np.object)
+ exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
@@ -1295,7 +1312,8 @@ def test_strobj_mode(self):
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
- exp = Series([], dtype="M8[ns]")
+ exp = Series(['1900-05-03', '2011-01-03',
+ '2013-01-02'], dtype="M8[ns]")
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
@@ -1306,7 +1324,8 @@ def test_datelike_mode(self):
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
- exp = Series([], dtype='timedelta64[ns]')
+ exp = Series(['-1 days', '0 days', '1 days'],
+ dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
@@ -1326,13 +1345,13 @@ def test_uint64_overflow(self):
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
- exp = Series([], dtype=np.uint64)
+ exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
- exp = Series([], dtype=np.int64)
+ exp = Series([1, 2], dtype=np.int64)
tm.assert_series_equal(algos.mode(c), exp)
c = Categorical([1, 'a', 'a'])
@@ -1345,7 +1364,7 @@ def test_categorical(self):
def test_index(self):
idx = Index([1, 2, 3])
- exp = Series([], dtype=np.int64)
+ exp = Series([1, 2, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 'a', 'a'])
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 6c8aeba704c7b..40cba7ee78b3a 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1372,13 +1372,13 @@ def test_mode(self):
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
- exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
+ exp = Categorical([5, 4, 3, 2, 1], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
- exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
+ exp = Categorical([5, 4], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
@@ -2980,7 +2980,7 @@ def test_mode(self):
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
- exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
+ exp = Series(Categorical([5, 4, 3, 2, 1], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
| - [X] Please accept this PR which closes #15714
- [X] Tests added — `test_mode_single`
- [X] Passes ``git diff upstream/master | flake8 --diff`` (however: it complains about some non-changed rows in hashtable_func_helper.pxi.in) | https://api.github.com/repos/pandas-dev/pandas/pulls/15744 | 2017-03-20T15:59:20Z | 2017-03-29T23:53:57Z | null | 2017-03-29T23:54:14Z |
BUG: replace coerces incorrect dtype | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index af0d0d7b04475..7c78132232077 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -823,6 +823,7 @@ Bug Fixes
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
+- Bug in ``.replace()`` may result in incorrect dtypes. (:issue:`12747`)
- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 9db01713b05ed..60684a929889b 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1894,8 +1894,11 @@ def convert(self, *args, **kwargs):
blocks.append(newb)
else:
- values = fn(
- self.values.ravel(), **fn_kwargs).reshape(self.values.shape)
+ values = fn(self.values.ravel(), **fn_kwargs)
+ try:
+ values = values.reshape(self.values.shape)
+ except NotImplementedError:
+ pass
blocks.append(make_block(values, ndim=self.ndim,
placement=self.mgr_locs))
@@ -3238,6 +3241,16 @@ def comp(s):
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
+ def _cast_scalar(block, scalar):
+ dtype, val = _infer_dtype_from_scalar(scalar, pandas_dtype=True)
+ if not is_dtype_equal(block.dtype, dtype):
+ dtype = _find_common_type([block.dtype, dtype])
+ block = block.astype(dtype)
+ # use original value
+ val = scalar
+
+ return block, val
+
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
@@ -3260,7 +3273,8 @@ def comp(s):
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
- new_rb.extend(b.putmask(m, d, inplace=True))
+ b, val = _cast_scalar(b, d)
+ new_rb.extend(b.putmask(m, val, inplace=True))
else:
new_rb.append(b)
rb = new_rb
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 38f8bb5355a69..df95f563c0832 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -1153,12 +1153,27 @@ def setUp(self):
self.rep['float64'] = [1.1, 2.2]
self.rep['complex128'] = [1 + 1j, 2 + 2j]
self.rep['bool'] = [True, False]
+ self.rep['datetime64[ns]'] = [pd.Timestamp('2011-01-01'),
+ pd.Timestamp('2011-01-03')]
+
+ for tz in ['UTC', 'US/Eastern']:
+ # to test tz => different tz replacement
+ key = 'datetime64[ns, {0}]'.format(tz)
+ self.rep[key] = [pd.Timestamp('2011-01-01', tz=tz),
+ pd.Timestamp('2011-01-03', tz=tz)]
+
+ self.rep['timedelta64[ns]'] = [pd.Timedelta('1 day'),
+ pd.Timedelta('2 day')]
def _assert_replace_conversion(self, from_key, to_key, how):
index = pd.Index([3, 4], name='xxx')
obj = pd.Series(self.rep[from_key], index=index, name='yyy')
self.assertEqual(obj.dtype, from_key)
+ if (from_key.startswith('datetime') and to_key.startswith('datetime')):
+ # different tz, currently mask_missing raises SystemError
+ return
+
if how == 'dict':
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == 'series':
@@ -1175,17 +1190,12 @@ def _assert_replace_conversion(self, from_key, to_key, how):
pytest.skip("windows platform buggy: {0} -> {1}".format
(from_key, to_key))
- if ((from_key == 'float64' and
- to_key in ('bool', 'int64')) or
-
+ if ((from_key == 'float64' and to_key in ('bool', 'int64')) or
(from_key == 'complex128' and
to_key in ('bool', 'int64', 'float64')) or
- (from_key == 'int64' and
- to_key in ('bool')) or
-
- # TODO_GH12747 The result must be int?
- (from_key == 'bool' and to_key == 'int64')):
+ # GH12747 The result must be int?
+ (from_key == 'int64' and to_key in ('bool'))):
# buggy on 32-bit
if tm.is_platform_32bit():
@@ -1248,13 +1258,31 @@ def test_replace_series_bool(self):
self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_series_datetime64(self):
- pass
+ from_key = 'datetime64[ns]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='dict')
+
+ from_key = 'datetime64[ns]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_series_datetime64tz(self):
- pass
+ from_key = 'datetime64[ns, US/Eastern]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='dict')
+
+ from_key = 'datetime64[ns, US/Eastern]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_series_timedelta64(self):
- pass
+ from_key = 'timedelta64[ns]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='dict')
+
+ from_key = 'timedelta64[ns]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_series_period(self):
pass
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index 0acd03316339e..f5a25e93cc82d 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -132,8 +132,8 @@ def check_replace(to_rep, val, expected):
tm.assert_series_equal(expected, r)
tm.assert_series_equal(expected, sc)
- # should NOT upcast to float
- e = pd.Series([0, 1, 2, 3, 4])
+ # MUST upcast to float
+ e = pd.Series([0., 1., 2., 3., 4.])
tr, v = [3], [3.0]
check_replace(tr, v, e)
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index 1cd55274b9b49..11a837dd21159 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -21,7 +21,7 @@
_ensure_int32, _ensure_int64,
_NS_DTYPE, _TD_DTYPE, _INT64_DTYPE,
_POSSIBLY_CAST_DTYPES)
-from .dtypes import ExtensionDtype
+from .dtypes import ExtensionDtype, DatetimeTZDtype, PeriodDtype
from .generic import ABCDatetimeIndex, ABCPeriodIndex, ABCSeries
from .missing import isnull, notnull
from .inference import is_list_like
@@ -312,8 +312,17 @@ def _maybe_promote(dtype, fill_value=np.nan):
return dtype, fill_value
-def _infer_dtype_from_scalar(val):
- """ interpret the dtype from a scalar """
+def _infer_dtype_from_scalar(val, pandas_dtype=False):
+ """
+ interpret the dtype from a scalar
+
+ Parameters
+ ----------
+ pandas_dtype : bool, default False
+ whether to infer dtype including pandas extension types.
+ If False, scalar belongs to pandas extension types is inferred as
+ object
+ """
dtype = np.object_
@@ -336,13 +345,20 @@ def _infer_dtype_from_scalar(val):
dtype = np.object_
- elif isinstance(val, (np.datetime64,
- datetime)) and getattr(val, 'tzinfo', None) is None:
- val = lib.Timestamp(val).value
- dtype = np.dtype('M8[ns]')
+ elif isinstance(val, (np.datetime64, datetime)):
+ val = tslib.Timestamp(val)
+ if val is tslib.NaT or val.tz is None:
+ dtype = np.dtype('M8[ns]')
+ else:
+ if pandas_dtype:
+ dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
+ else:
+ # return datetimetz as object
+ return np.object_, val
+ val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
- val = lib.Timedelta(val).value
+ val = tslib.Timedelta(val).value
dtype = np.dtype('m8[ns]')
elif is_bool(val):
@@ -363,6 +379,11 @@ def _infer_dtype_from_scalar(val):
elif is_complex(val):
dtype = np.complex_
+ elif pandas_dtype:
+ if lib.is_period(val):
+ dtype = PeriodDtype(freq=val.freq)
+ val = val.ordinal
+
return dtype, val
| closes #12747
Author: sinhrks <sinhrks@gmail.com>
This patch had conflicts when merged, resolved by
Committer: Jeff Reback <jeff@reback.net>
closes #15742
Closes #12780 from sinhrks/replace_type and squashes the following commits:
f9154e8 [sinhrks] remove unnecessary comments
279fdf6 [sinhrks] remove import failure
de44877 [sinhrks] BUG: replace coerces incorrect dtype | https://api.github.com/repos/pandas-dev/pandas/pulls/15742 | 2017-03-20T14:22:06Z | 2017-03-20T16:25:54Z | 2017-03-20T16:25:53Z | 2017-03-20T17:22:51Z |
BUG: .replace coerces incorrect dtype | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index af0d0d7b04475..7c78132232077 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -823,6 +823,7 @@ Bug Fixes
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
+- Bug in ``.replace()`` may result in incorrect dtypes. (:issue:`12747`)
- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 9db01713b05ed..60684a929889b 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1894,8 +1894,11 @@ def convert(self, *args, **kwargs):
blocks.append(newb)
else:
- values = fn(
- self.values.ravel(), **fn_kwargs).reshape(self.values.shape)
+ values = fn(self.values.ravel(), **fn_kwargs)
+ try:
+ values = values.reshape(self.values.shape)
+ except NotImplementedError:
+ pass
blocks.append(make_block(values, ndim=self.ndim,
placement=self.mgr_locs))
@@ -3238,6 +3241,16 @@ def comp(s):
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
+ def _cast_scalar(block, scalar):
+ dtype, val = _infer_dtype_from_scalar(scalar, pandas_dtype=True)
+ if not is_dtype_equal(block.dtype, dtype):
+ dtype = _find_common_type([block.dtype, dtype])
+ block = block.astype(dtype)
+ # use original value
+ val = scalar
+
+ return block, val
+
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
@@ -3260,7 +3273,8 @@ def comp(s):
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
- new_rb.extend(b.putmask(m, d, inplace=True))
+ b, val = _cast_scalar(b, d)
+ new_rb.extend(b.putmask(m, val, inplace=True))
else:
new_rb.append(b)
rb = new_rb
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 38f8bb5355a69..e2c4628f89fe5 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -1153,12 +1153,27 @@ def setUp(self):
self.rep['float64'] = [1.1, 2.2]
self.rep['complex128'] = [1 + 1j, 2 + 2j]
self.rep['bool'] = [True, False]
+ self.rep['datetime64[ns]'] = [pd.Timestamp('2011-01-01'),
+ pd.Timestamp('2011-01-03')]
+
+ for tz in ['UTC', 'US/Eastern']:
+ # to test tz => different tz replacement
+ key = 'datetime64[ns, {0}]'.format(tz)
+ self.rep[key] = [pd.Timestamp('2011-01-01', tz=tz),
+ pd.Timestamp('2011-01-03', tz=tz)]
+
+ self.rep['timedelta64[ns]'] = [pd.Timedelta('1 day'),
+ pd.Timedelta('2 day')]
def _assert_replace_conversion(self, from_key, to_key, how):
index = pd.Index([3, 4], name='xxx')
obj = pd.Series(self.rep[from_key], index=index, name='yyy')
self.assertEqual(obj.dtype, from_key)
+ if (from_key.startswith('datetime') and to_key.startswith('datetime')):
+ # different tz, currently mask_missing raises SystemError
+ return
+
if how == 'dict':
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == 'series':
@@ -1175,17 +1190,12 @@ def _assert_replace_conversion(self, from_key, to_key, how):
pytest.skip("windows platform buggy: {0} -> {1}".format
(from_key, to_key))
- if ((from_key == 'float64' and
- to_key in ('bool', 'int64')) or
-
+ if ((from_key == 'float64' and to_key in ('bool', 'int64')) or
(from_key == 'complex128' and
to_key in ('bool', 'int64', 'float64')) or
- (from_key == 'int64' and
- to_key in ('bool')) or
-
# TODO_GH12747 The result must be int?
- (from_key == 'bool' and to_key == 'int64')):
+ (from_key == 'int64' and to_key in ('bool'))):
# buggy on 32-bit
if tm.is_platform_32bit():
@@ -1248,13 +1258,31 @@ def test_replace_series_bool(self):
self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_series_datetime64(self):
- pass
+ from_key = 'datetime64[ns]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='dict')
+
+ from_key = 'datetime64[ns]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_series_datetime64tz(self):
- pass
+ from_key = 'datetime64[ns, US/Eastern]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='dict')
+
+ from_key = 'datetime64[ns, US/Eastern]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_series_timedelta64(self):
- pass
+ from_key = 'timedelta64[ns]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='dict')
+
+ from_key = 'timedelta64[ns]'
+ for to_key in self.rep:
+ self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_series_period(self):
pass
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index 0acd03316339e..f5a25e93cc82d 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -132,8 +132,8 @@ def check_replace(to_rep, val, expected):
tm.assert_series_equal(expected, r)
tm.assert_series_equal(expected, sc)
- # should NOT upcast to float
- e = pd.Series([0, 1, 2, 3, 4])
+ # MUST upcast to float
+ e = pd.Series([0., 1., 2., 3., 4.])
tr, v = [3], [3.0]
check_replace(tr, v, e)
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index 1cd55274b9b49..11a837dd21159 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -21,7 +21,7 @@
_ensure_int32, _ensure_int64,
_NS_DTYPE, _TD_DTYPE, _INT64_DTYPE,
_POSSIBLY_CAST_DTYPES)
-from .dtypes import ExtensionDtype
+from .dtypes import ExtensionDtype, DatetimeTZDtype, PeriodDtype
from .generic import ABCDatetimeIndex, ABCPeriodIndex, ABCSeries
from .missing import isnull, notnull
from .inference import is_list_like
@@ -312,8 +312,17 @@ def _maybe_promote(dtype, fill_value=np.nan):
return dtype, fill_value
-def _infer_dtype_from_scalar(val):
- """ interpret the dtype from a scalar """
+def _infer_dtype_from_scalar(val, pandas_dtype=False):
+ """
+ interpret the dtype from a scalar
+
+ Parameters
+ ----------
+ pandas_dtype : bool, default False
+ whether to infer dtype including pandas extension types.
+ If False, scalar belongs to pandas extension types is inferred as
+ object
+ """
dtype = np.object_
@@ -336,13 +345,20 @@ def _infer_dtype_from_scalar(val):
dtype = np.object_
- elif isinstance(val, (np.datetime64,
- datetime)) and getattr(val, 'tzinfo', None) is None:
- val = lib.Timestamp(val).value
- dtype = np.dtype('M8[ns]')
+ elif isinstance(val, (np.datetime64, datetime)):
+ val = tslib.Timestamp(val)
+ if val is tslib.NaT or val.tz is None:
+ dtype = np.dtype('M8[ns]')
+ else:
+ if pandas_dtype:
+ dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
+ else:
+ # return datetimetz as object
+ return np.object_, val
+ val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
- val = lib.Timedelta(val).value
+ val = tslib.Timedelta(val).value
dtype = np.dtype('m8[ns]')
elif is_bool(val):
@@ -363,6 +379,11 @@ def _infer_dtype_from_scalar(val):
elif is_complex(val):
dtype = np.complex_
+ elif pandas_dtype:
+ if lib.is_period(val):
+ dtype = PeriodDtype(freq=val.freq)
+ val = val.ordinal
+
return dtype, val
| closes #12747
supersedes 12780 | https://api.github.com/repos/pandas-dev/pandas/pulls/15741 | 2017-03-20T14:17:19Z | 2017-03-20T14:17:36Z | null | 2017-03-20T14:17:36Z |
BUG: tz aware Timestamp field accessors returns local values (#13303) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 680aefc4041fb..0fe6cc34c2f70 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -796,6 +796,7 @@ Bug Fixes
~~~~~~~~~
- Bug in ``Timestamp.replace`` now raises ``TypeError`` when incorrect argument names are given; previously this raised ``ValueError`` (:issue:`15240`)
+- Bug in ``Timestamp`` returning UTC based time/date attributes when a timezone was provided (:issue:`13303`)
- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
- Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`)
- Bug in ``TimedeltaIndex`` raising a ``ValueError`` when boolean indexing with ``loc`` (:issue:`14946`)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 8ee92e9fb900d..055534bbdb7ee 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -1233,7 +1233,10 @@ cdef class _Timestamp(datetime):
return datetime.__sub__(self, other)
cpdef _get_field(self, field):
- out = get_date_field(np.array([self.value], dtype=np.int64), field)
+ val = self.value
+ if self.tz is not None and not _is_utc(self.tz):
+ val = tz_convert_single(self.value, 'UTC', self.tz)
+ out = get_date_field(np.array([val], dtype=np.int64), field)
return int(out[0])
cpdef _get_start_end_field(self, field):
@@ -1241,8 +1244,11 @@ cdef class _Timestamp(datetime):
'startingMonth', self.freq.kwds.get(
'month', 12)) if self.freq else 12
freqstr = self.freqstr if self.freq else None
+ val = self.value
+ if self.tz is not None and not _is_utc(self.tz):
+ val = tz_convert_single(self.value, 'UTC', self.tz)
out = get_start_end_field(
- np.array([self.value], dtype=np.int64), field, freqstr, month_kw)
+ np.array([val], dtype=np.int64), field, freqstr, month_kw)
return out[0]
property _repr_base:
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 6b0191edbda5a..e99f1d46637c2 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -172,82 +172,88 @@ def test_normalize(self):
class TestDatetime64(tm.TestCase):
def test_datetimeindex_accessors(self):
- dti = DatetimeIndex(freq='D', start=datetime(1998, 1, 1), periods=365)
-
- self.assertEqual(dti.year[0], 1998)
- self.assertEqual(dti.month[0], 1)
- self.assertEqual(dti.day[0], 1)
- self.assertEqual(dti.hour[0], 0)
- self.assertEqual(dti.minute[0], 0)
- self.assertEqual(dti.second[0], 0)
- self.assertEqual(dti.microsecond[0], 0)
- self.assertEqual(dti.dayofweek[0], 3)
-
- self.assertEqual(dti.dayofyear[0], 1)
- self.assertEqual(dti.dayofyear[120], 121)
-
- self.assertEqual(dti.weekofyear[0], 1)
- self.assertEqual(dti.weekofyear[120], 18)
-
- self.assertEqual(dti.quarter[0], 1)
- self.assertEqual(dti.quarter[120], 2)
-
- self.assertEqual(dti.days_in_month[0], 31)
- self.assertEqual(dti.days_in_month[90], 30)
-
- self.assertEqual(dti.is_month_start[0], True)
- self.assertEqual(dti.is_month_start[1], False)
- self.assertEqual(dti.is_month_start[31], True)
- self.assertEqual(dti.is_quarter_start[0], True)
- self.assertEqual(dti.is_quarter_start[90], True)
- self.assertEqual(dti.is_year_start[0], True)
- self.assertEqual(dti.is_year_start[364], False)
- self.assertEqual(dti.is_month_end[0], False)
- self.assertEqual(dti.is_month_end[30], True)
- self.assertEqual(dti.is_month_end[31], False)
- self.assertEqual(dti.is_month_end[364], True)
- self.assertEqual(dti.is_quarter_end[0], False)
- self.assertEqual(dti.is_quarter_end[30], False)
- self.assertEqual(dti.is_quarter_end[89], True)
- self.assertEqual(dti.is_quarter_end[364], True)
- self.assertEqual(dti.is_year_end[0], False)
- self.assertEqual(dti.is_year_end[364], True)
-
- # GH 11128
- self.assertEqual(dti.weekday_name[4], u'Monday')
- self.assertEqual(dti.weekday_name[5], u'Tuesday')
- self.assertEqual(dti.weekday_name[6], u'Wednesday')
- self.assertEqual(dti.weekday_name[7], u'Thursday')
- self.assertEqual(dti.weekday_name[8], u'Friday')
- self.assertEqual(dti.weekday_name[9], u'Saturday')
- self.assertEqual(dti.weekday_name[10], u'Sunday')
-
- self.assertEqual(Timestamp('2016-04-04').weekday_name, u'Monday')
- self.assertEqual(Timestamp('2016-04-05').weekday_name, u'Tuesday')
- self.assertEqual(Timestamp('2016-04-06').weekday_name, u'Wednesday')
- self.assertEqual(Timestamp('2016-04-07').weekday_name, u'Thursday')
- self.assertEqual(Timestamp('2016-04-08').weekday_name, u'Friday')
- self.assertEqual(Timestamp('2016-04-09').weekday_name, u'Saturday')
- self.assertEqual(Timestamp('2016-04-10').weekday_name, u'Sunday')
-
- self.assertEqual(len(dti.year), 365)
- self.assertEqual(len(dti.month), 365)
- self.assertEqual(len(dti.day), 365)
- self.assertEqual(len(dti.hour), 365)
- self.assertEqual(len(dti.minute), 365)
- self.assertEqual(len(dti.second), 365)
- self.assertEqual(len(dti.microsecond), 365)
- self.assertEqual(len(dti.dayofweek), 365)
- self.assertEqual(len(dti.dayofyear), 365)
- self.assertEqual(len(dti.weekofyear), 365)
- self.assertEqual(len(dti.quarter), 365)
- self.assertEqual(len(dti.is_month_start), 365)
- self.assertEqual(len(dti.is_month_end), 365)
- self.assertEqual(len(dti.is_quarter_start), 365)
- self.assertEqual(len(dti.is_quarter_end), 365)
- self.assertEqual(len(dti.is_year_start), 365)
- self.assertEqual(len(dti.is_year_end), 365)
- self.assertEqual(len(dti.weekday_name), 365)
+ dti_naive = DatetimeIndex(freq='D', start=datetime(1998, 1, 1),
+ periods=365)
+ # GH 13303
+ dti_tz = DatetimeIndex(freq='D', start=datetime(1998, 1, 1),
+ periods=365, tz='US/Eastern')
+ for dti in [dti_naive, dti_tz]:
+
+ self.assertEqual(dti.year[0], 1998)
+ self.assertEqual(dti.month[0], 1)
+ self.assertEqual(dti.day[0], 1)
+ self.assertEqual(dti.hour[0], 0)
+ self.assertEqual(dti.minute[0], 0)
+ self.assertEqual(dti.second[0], 0)
+ self.assertEqual(dti.microsecond[0], 0)
+ self.assertEqual(dti.dayofweek[0], 3)
+
+ self.assertEqual(dti.dayofyear[0], 1)
+ self.assertEqual(dti.dayofyear[120], 121)
+
+ self.assertEqual(dti.weekofyear[0], 1)
+ self.assertEqual(dti.weekofyear[120], 18)
+
+ self.assertEqual(dti.quarter[0], 1)
+ self.assertEqual(dti.quarter[120], 2)
+
+ self.assertEqual(dti.days_in_month[0], 31)
+ self.assertEqual(dti.days_in_month[90], 30)
+
+ self.assertEqual(dti.is_month_start[0], True)
+ self.assertEqual(dti.is_month_start[1], False)
+ self.assertEqual(dti.is_month_start[31], True)
+ self.assertEqual(dti.is_quarter_start[0], True)
+ self.assertEqual(dti.is_quarter_start[90], True)
+ self.assertEqual(dti.is_year_start[0], True)
+ self.assertEqual(dti.is_year_start[364], False)
+ self.assertEqual(dti.is_month_end[0], False)
+ self.assertEqual(dti.is_month_end[30], True)
+ self.assertEqual(dti.is_month_end[31], False)
+ self.assertEqual(dti.is_month_end[364], True)
+ self.assertEqual(dti.is_quarter_end[0], False)
+ self.assertEqual(dti.is_quarter_end[30], False)
+ self.assertEqual(dti.is_quarter_end[89], True)
+ self.assertEqual(dti.is_quarter_end[364], True)
+ self.assertEqual(dti.is_year_end[0], False)
+ self.assertEqual(dti.is_year_end[364], True)
+
+ # GH 11128
+ self.assertEqual(dti.weekday_name[4], u'Monday')
+ self.assertEqual(dti.weekday_name[5], u'Tuesday')
+ self.assertEqual(dti.weekday_name[6], u'Wednesday')
+ self.assertEqual(dti.weekday_name[7], u'Thursday')
+ self.assertEqual(dti.weekday_name[8], u'Friday')
+ self.assertEqual(dti.weekday_name[9], u'Saturday')
+ self.assertEqual(dti.weekday_name[10], u'Sunday')
+
+ self.assertEqual(Timestamp('2016-04-04').weekday_name, u'Monday')
+ self.assertEqual(Timestamp('2016-04-05').weekday_name, u'Tuesday')
+ self.assertEqual(Timestamp('2016-04-06').weekday_name,
+ u'Wednesday')
+ self.assertEqual(Timestamp('2016-04-07').weekday_name, u'Thursday')
+ self.assertEqual(Timestamp('2016-04-08').weekday_name, u'Friday')
+ self.assertEqual(Timestamp('2016-04-09').weekday_name, u'Saturday')
+ self.assertEqual(Timestamp('2016-04-10').weekday_name, u'Sunday')
+
+ self.assertEqual(len(dti.year), 365)
+ self.assertEqual(len(dti.month), 365)
+ self.assertEqual(len(dti.day), 365)
+ self.assertEqual(len(dti.hour), 365)
+ self.assertEqual(len(dti.minute), 365)
+ self.assertEqual(len(dti.second), 365)
+ self.assertEqual(len(dti.microsecond), 365)
+ self.assertEqual(len(dti.dayofweek), 365)
+ self.assertEqual(len(dti.dayofyear), 365)
+ self.assertEqual(len(dti.weekofyear), 365)
+ self.assertEqual(len(dti.quarter), 365)
+ self.assertEqual(len(dti.is_month_start), 365)
+ self.assertEqual(len(dti.is_month_end), 365)
+ self.assertEqual(len(dti.is_quarter_start), 365)
+ self.assertEqual(len(dti.is_quarter_end), 365)
+ self.assertEqual(len(dti.is_year_start), 365)
+ self.assertEqual(len(dti.is_year_end), 365)
+ self.assertEqual(len(dti.weekday_name), 365)
dti = DatetimeIndex(freq='BQ-FEB', start=datetime(1998, 1, 1),
periods=4)
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index d5d92dcf96eab..082f0fa9c40d5 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -550,6 +550,32 @@ def check(value, equal):
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
+ # GH 13303
+ ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
+ check(ts.year, 2014)
+ check(ts.month, 12)
+ check(ts.day, 31)
+ check(ts.hour, 23)
+ check(ts.minute, 59)
+ check(ts.second, 0)
+ self.assertRaises(AttributeError, lambda: ts.millisecond)
+ check(ts.microsecond, 0)
+ check(ts.nanosecond, 0)
+ check(ts.dayofweek, 2)
+ check(ts.quarter, 4)
+ check(ts.dayofyear, 365)
+ check(ts.week, 1)
+ check(ts.daysinmonth, 31)
+
+ ts = Timestamp('2014-01-01 00:00:00+01:00')
+ starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
+ for start in starts:
+ self.assertTrue(getattr(ts, start))
+ ts = Timestamp('2014-12-31 23:59:59+01:00')
+ ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
+ for end in ends:
+ self.assertTrue(getattr(ts, end))
+
def test_nat_fields(self):
# GH 10050
ts = Timestamp('NaT')
| - [x] closes #13303
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
Previously, calling a date/time attribute with Timestamp that's tz aware (e.g. `Timestamp('...', tz='...').dayofyear`) would return the attribute in UTC instead of the local tz. | https://api.github.com/repos/pandas-dev/pandas/pulls/15740 | 2017-03-20T06:20:20Z | 2017-03-20T17:51:28Z | null | 2017-12-20T02:00:47Z |
DOC: Fix typo in docstring param name | diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index d3d936693c266..9acb52ebe0e9f 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -365,7 +365,7 @@ def holidays(self, start=None, end=None, return_name=False):
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
- return_names : bool, optional
+ return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15739 | 2017-03-20T05:45:44Z | 2017-03-20T08:13:53Z | 2017-03-20T08:13:53Z | 2017-03-20T08:14:00Z | |
PERF: Improve drop_duplicates for bool columns (#12963) | diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py
index 6fe6c32a96df9..537d275e7c727 100644
--- a/asv_bench/benchmarks/reindex.py
+++ b/asv_bench/benchmarks/reindex.py
@@ -132,6 +132,9 @@ def setup(self):
self.K = 10000
self.key1 = np.random.randint(0, self.K, size=self.N)
self.df_int = DataFrame({'key1': self.key1})
+ self.df_bool = DataFrame({i: np.random.randint(0, 2, size=self.K,
+ dtype=bool)
+ for i in range(10)})
def time_frame_drop_dups(self):
self.df.drop_duplicates(['key1', 'key2'])
@@ -154,6 +157,8 @@ def time_series_drop_dups_string(self):
def time_frame_drop_dups_int(self):
self.df_int.drop_duplicates()
+ def time_frame_drop_dups_bool(self):
+ self.df_bool.drop_duplicates()
#----------------------------------------------------------------------
# blog "pandas escaped the zoo"
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 680aefc4041fb..02e80dd77aa0a 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -788,6 +788,7 @@ Performance Improvements
- Improved performance of ``.rank()`` for categorical data (:issue:`15498`)
- Improved performance when using ``.unstack()`` (:issue:`15503`)
- Improved performance of merge/join on ``category`` columns (:issue:`10409`)
+- Improved performance of ``drop_duplicates()`` on ``bool`` columns (:issue:`12963`)
.. _whatsnew_0200.bug_fixes:
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 6937675603c10..b6f496f417a74 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -19,6 +19,7 @@
is_period_dtype,
is_period_arraylike,
is_float_dtype,
+ is_bool_dtype,
needs_i8_conversion,
is_categorical,
is_datetime64_dtype,
@@ -341,6 +342,10 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
# numpy dtype
dtype = values.dtype
vals = values.view(np.int64)
+ elif is_bool_dtype(values):
+ dtype = bool
+ # transform to int dtype to avoid object path
+ vals = np.asarray(values).view('uint8')
else:
vals = np.asarray(values)
| - [x] closes #12963
- [x] tests passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
Converts `bool` columns to `int` when calling `drop_duplicates()` so the factorization does not go down the `object` path.
```
before after ratio
[9ab57dc5] [4b05fccb]
- 10.2±0.01ms 4.68±0.01ms 0.46 reindex.Duplicates.time_frame_drop_dups_bool
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/15738 | 2017-03-20T02:05:03Z | 2017-03-20T22:47:22Z | null | 2017-12-20T02:00:38Z |
Value returns ndarray for dataframes with a single column with datetime64 tz-aware | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index a56212328f5c3..ae0f2181a9850 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -792,6 +792,7 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+- Bug in ``DataFrame.values`` now returns object dtyped numpy array of ``Timestamp`` for tz-aware columns; previously this returned ``DateTimeIndex`` (:issue:`14052`)
- Bug in ``Timestamp.replace`` now raises ``TypeError`` when incorrect argument names are given; previously this raised ``ValueError`` (:issue:`15240`)
- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
- Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1db9677659ca3..d1de9d0bfa01a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3076,7 +3076,9 @@ def values(self):
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By numpy.find_common_type convention, mixing int64 and uint64
- will result in a flot64 dtype.
+ will result in a float64 dtype.
+
+ Unlike ``Series.values``, tz-aware dtypes will be upcasted to object.
"""
return self.as_matrix()
@@ -5098,6 +5100,7 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
try_cast=False, raise_on_error=True):
other = com._apply_if_callable(other, self)
+
return self._where(cond, other, inplace, axis, level, try_cast,
raise_on_error)
@@ -5783,7 +5786,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs)) - 1)
if freq is None:
mask = isnull(_values_from_object(self))
- np.putmask(rs.values, mask, np.nan)
+ rs.iloc[mask] = np.nan
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 0e6c176d950a1..cfa991eefef16 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2388,9 +2388,15 @@ def get_values(self, dtype=None):
# return object dtype as Timestamps with the zones
if is_object_dtype(dtype):
f = lambda x: lib.Timestamp(x, tz=self.values.tz)
- return lib.map_infer(
+ values = lib.map_infer(
self.values.ravel(), f).reshape(self.values.shape)
- return self.values
+
+ if values.ndim == self.ndim - 1:
+ values = values.reshape((1,) + values.shape)
+ else:
+ return self.values
+
+ return values
def to_object_block(self, mgr):
"""
@@ -3424,10 +3430,7 @@ def as_matrix(self, items=None):
else:
mgr = self
- if self._is_single_block or not self.is_mixed_type:
- return mgr.blocks[0].get_values()
- else:
- return mgr._interleave()
+ return mgr._interleave()
def _interleave(self):
"""
@@ -3436,6 +3439,10 @@ def _interleave(self):
"""
dtype = _interleaved_dtype(self.blocks)
+ if self._is_single_block or not self.is_mixed_type:
+ return np.array(self.blocks[0].get_values(dtype),
+ dtype=dtype, copy=False)
+
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
@@ -4485,33 +4492,64 @@ def _interleaved_dtype(blocks):
for x in blocks:
counts[type(x)].append(x)
- have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
+ have_int = len(counts[IntBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_dt64_tz = len(counts[DatetimeTZBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
- have_cat = len(counts[CategoricalBlock]) > 0
+ have_cat = len(counts[CategoricalBlock])
# TODO: have_sparse is not used
have_sparse = len(counts[SparseBlock]) > 0 # noqa
- have_numeric = have_float or have_complex or have_int
- has_non_numeric = have_dt64 or have_dt64_tz or have_td64 or have_cat
+ have_numeric = have_float + have_complex + have_int
+ have_dt = have_dt64 + have_dt64_tz
+ have_non_numeric = have_dt64 + have_dt64_tz + have_td64 + have_cat
+ have_non_dt = have_td64 + have_cat
+ have_mixed = bool(have_numeric) + bool(have_non_dt) + bool(have_dt)
if (have_object or
- (have_bool and
- (have_numeric or have_dt64 or have_dt64_tz or have_td64)) or
- (have_numeric and has_non_numeric) or have_cat or have_dt64 or
- have_dt64_tz or have_td64):
+ (have_non_numeric > 1) or # more than one type of non numeric
+ (have_bool and have_mixed) or # mix of a numeric et non numeric
+ (have_mixed > 1) or # mix of a numeric et non numeric
+ have_dt64_tz or
+ (have_cat > 1)):
return np.dtype(object)
+ elif have_dt64:
+ return np.dtype("datetime64[ns]")
+ elif have_td64:
+ return np.dtype("timedelta64[ns]")
elif have_bool:
- return np.dtype(bool)
+ return np.dtype("bool")
+ elif have_cat:
+ # return blocks[0].get_values().dtype
+ # if we are mixing unsigned and signed, then return
+ # the next biggest int type (if we can)
+
+ dts = [b.get_values().dtype for b in counts[CategoricalBlock]]
+ lcd = _find_common_type(dts)
+ kinds = set([_dt.kind for _dt in dts])
+
+ if len(kinds) == 1:
+ return lcd
+
+ if lcd == 'uint64' or lcd == 'int64':
+ return np.dtype('int64')
+
+ # return 1 bigger on the itemsize if unsinged
+ if lcd.kind == 'u':
+ return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
+ return lcd
+
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
- lcd = _find_common_type([b.dtype for b in counts[IntBlock]])
- kinds = set([i.dtype.kind for i in counts[IntBlock]])
+
+ dts = [b.dtype for b in counts[IntBlock]]
+ lcd = _find_common_type(dts)
+ kinds = set([_dt.kind for _dt in dts])
+
if len(kinds) == 1:
return lcd
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index f7d2c1a654cd5..3d399e7b848bf 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -1,23 +1,25 @@
# -*- coding: utf-8 -*-
from __future__ import print_function
+
+import itertools
from datetime import timedelta
import numpy as np
+
+import pandas as pd
+import pandas.util.testing as tm
from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp,
compat, concat, option_context)
from pandas.compat import u
-from pandas.types.dtypes import DatetimeTZDtype
from pandas.tests.frame.common import TestData
+from pandas.types.dtypes import DatetimeTZDtype
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
makeCustomDataframe as mkdf)
-import pandas.util.testing as tm
-import pandas as pd
class TestDataFrameDataTypes(tm.TestCase, TestData):
-
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df['a'] = df['a'].astype(np.bool_)
@@ -198,7 +200,7 @@ def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
def test_select_dtypes_empty(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assertRaisesRegexp(ValueError, 'at least one of include or '
- 'exclude must be nonempty'):
+ 'exclude must be nonempty'):
df.select_dtypes()
def test_select_dtypes_raises_on_string(self):
@@ -536,7 +538,6 @@ def test_arg_for_errors_in_astype(self):
class TestDataFrameDatetimeWithTZ(tm.TestCase, TestData):
-
def test_interleave(self):
# interleave with object
@@ -622,3 +623,77 @@ def test_astype_str(self):
'NaT NaT' in result)
self.assertTrue('2 2013-01-03 2013-01-03 00:00:00-05:00 '
'2013-01-03 00:00:00+01:00' in result)
+
+ def test_values_is_ndarray_with_datetime64tz(self):
+ df = DataFrame({
+ 'A': date_range('20130101', periods=3),
+ 'B': date_range('20130101', periods=3, tz='US/Eastern'),
+ })
+
+ for col in [
+ ["A"],
+ ["A", "A"],
+ ["A", "B"],
+ ["B", "B"],
+ ["B"],
+ ]:
+ arr = df[col].values
+ dtype_expected = "<M8[ns]" if "B" not in col else object
+ arr_expected = np.array(list(df[col].itertuples(index=False)),
+ dtype=dtype_expected)
+
+ tm.assert_numpy_array_equal(arr, arr_expected)
+
+ def test_values_dtypes_with_datetime64tz(self):
+ df = DataFrame({'dt': date_range('20130101', periods=3),
+ 'dttz': date_range('20130101', periods=3,
+ tz='US/Eastern'),
+ 'td': (date_range('20130102', periods=3) -
+ date_range('20130101', periods=3)),
+ 'cat': pd.Categorical(['a', 'b', 'b']),
+ 'cati': pd.Categorical([100, 4, 3]),
+ 'b': [True, False, False],
+ 'i': [1, 2, 3],
+ 'f': [1.3, 2, 3],
+ 'c': [1j, 2, 3],
+ })
+
+ cols = itertools.chain(
+ itertools.combinations_with_replacement(df.columns, 1),
+ itertools.combinations_with_replacement(df.columns, 2)
+ )
+ for col in cols:
+ df_sub = df[list(col)]
+ dts = df_sub.dtypes.values
+
+ # calculate dtype_expected in function of dtypes of dataframe
+ # (testing the logic of the _interleaved_dtype
+ # function in pandas/core/internals.py
+
+ # all columns of the same type
+ if len(set(dts)) == 1:
+ if dts[0] in ("M8[ns]", "m8[ns]",
+ bool, complex, int, float):
+ dtype_expected = dts[0]
+ else:
+ if col == ("cati", ):
+ dtype_expected = 'int64'
+ else:
+ dtype_expected = object
+
+ # different type of columns
+ else:
+ # all numeric and complex
+ if all(np.in1d(dts, (complex, int, float))) and complex in dts:
+ dtype_expected = complex
+ # all numeric and float
+ elif all(np.in1d(dts, (complex, int, float))) and float in dts:
+ dtype_expected = float
+ else:
+ dtype_expected = object
+
+ arr = df_sub.values
+ arr_expected = np.array(list(df_sub.itertuples(index=False)),
+ dtype=dtype_expected)
+
+ tm.assert_numpy_array_equal(arr, arr_expected)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 7dbcf25c60b45..594b33000965f 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -938,6 +938,7 @@ def test_tz_range_is_utc(self):
'"1":"2013-01-02T05:00:00.000Z"}}')
tz_range = pd.date_range('2013-01-01 05:00:00Z', periods=2)
+
self.assertEqual(exp, dumps(tz_range, iso_dates=True))
dti = pd.DatetimeIndex(tz_range)
self.assertEqual(exp, dumps(dti, iso_dates=True))
| - [x] closes #14052
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15736 | 2017-03-19T20:08:37Z | 2017-07-26T23:57:53Z | null | 2017-07-26T23:57:54Z |
MAINT: Drop order and sort from pandas objects | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 4949b68d46723..680aefc4041fb 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -771,6 +771,7 @@ Removal of prior version deprecations/changes
- The deprecated ``DataFrame.iterkv()`` has been removed in favor of ``DataFrame.iteritems()`` (:issue:`10711`)
- The ``Categorical`` constructor has dropped the ``name`` parameter (:issue:`10632`)
- The ``take_last`` parameter has been dropped from ``duplicated()``, ``drop_duplicates()``, ``nlargest()``, and ``nsmallest()`` methods (:issue:`10236`, :issue:`10792`, :issue:`10920`)
+- ``Series``, ``Index``, and ``DataFrame`` have dropped the ``sort`` and ``order`` methods (:issue:`10726`)
.. _whatsnew_0200.performance:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3696051b269e3..732d88b47ae2a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3304,56 +3304,6 @@ def trans(v):
else:
return self._constructor(new_data).__finalize__(self)
- def sort(self, columns=None, axis=0, ascending=True, inplace=False,
- kind='quicksort', na_position='last', **kwargs):
- """
- DEPRECATED: use :meth:`DataFrame.sort_values`
-
- Sort DataFrame either by labels (along either axis) or by the values in
- column(s)
-
- Parameters
- ----------
- columns : object
- Column name(s) in frame. Accepts a column name or a list
- for a nested sort. A tuple will be interpreted as the
- levels of a multi-index.
- ascending : boolean or list, default True
- Sort ascending vs. descending. Specify list for multiple sort
- orders
- axis : {0 or 'index', 1 or 'columns'}, default 0
- Sort index/rows versus columns
- inplace : boolean, default False
- Sort the DataFrame without creating a new instance
- kind : {'quicksort', 'mergesort', 'heapsort'}, optional
- This option is only applied when sorting on a single column or
- label.
- na_position : {'first', 'last'} (optional, default='last')
- 'first' puts NaNs at the beginning
- 'last' puts NaNs at the end
-
- Examples
- --------
- >>> result = df.sort(['A', 'B'], ascending=[1, 0])
-
- Returns
- -------
- sorted : DataFrame
- """
- nv.validate_sort(tuple(), kwargs)
-
- if columns is None:
- warnings.warn("sort(....) is deprecated, use sort_index(.....)",
- FutureWarning, stacklevel=2)
- return self.sort_index(axis=axis, ascending=ascending,
- inplace=inplace)
-
- warnings.warn("sort(columns=....) is deprecated, use "
- "sort_values(by=.....)", FutureWarning, stacklevel=2)
- return self.sort_values(by=columns, axis=axis, ascending=ascending,
- inplace=inplace, kind=kind,
- na_position=na_position)
-
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7ee3b3e8fb519..4c51ced1845fe 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1777,77 +1777,6 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
else:
return result.__finalize__(self)
- def sort(self, axis=0, ascending=True, kind='quicksort',
- na_position='last', inplace=True):
- """
- DEPRECATED: use :meth:`Series.sort_values(inplace=True)` for INPLACE
- sorting
-
- Sort values and index labels by value. This is an inplace sort by
- default. Series.order is the equivalent but returns a new Series.
-
- Parameters
- ----------
- axis : int (can only be zero)
- ascending : boolean, default True
- Sort ascending. Passing False sorts descending
- kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
- Choice of sorting algorithm. See np.sort for more
- information. 'mergesort' is the only stable algorithm
- na_position : {'first', 'last'} (optional, default='last')
- 'first' puts NaNs at the beginning
- 'last' puts NaNs at the end
- inplace : boolean, default True
- Do operation in place.
-
- See Also
- --------
- Series.sort_values
- """
- warnings.warn("sort is deprecated, use sort_values(inplace=True) for "
- "INPLACE sorting", FutureWarning, stacklevel=2)
-
- return self.sort_values(ascending=ascending, kind=kind,
- na_position=na_position, inplace=inplace)
-
- def order(self, na_last=None, ascending=True, kind='quicksort',
- na_position='last', inplace=False):
- """
- DEPRECATED: use :meth:`Series.sort_values`
-
- Sorts Series object, by value, maintaining index-value link.
- This will return a new Series by default. Series.sort is the equivalent
- but as an inplace method.
-
- Parameters
- ----------
- na_last : boolean (optional, default=True)--DEPRECATED; use na_position
- Put NaN's at beginning or end
- ascending : boolean, default True
- Sort ascending. Passing False sorts descending
- kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
- Choice of sorting algorithm. See np.sort for more
- information. 'mergesort' is the only stable algorithm
- na_position : {'first', 'last'} (optional, default='last')
- 'first' puts NaNs at the beginning
- 'last' puts NaNs at the end
- inplace : boolean, default False
- Do operation in place.
-
- Returns
- -------
- y : Series
-
- See Also
- --------
- Series.sort_values
- """
- warnings.warn("order is deprecated, use sort_values(...)",
- FutureWarning, stacklevel=2)
-
- return self.sort_values(ascending=ascending, kind=kind,
- na_position=na_position, inplace=inplace)
-
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 381e4d5caa8ac..d262ecd818f1d 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -1912,17 +1912,6 @@ def sort_values(self, return_indexer=False, ascending=True):
else:
return sorted_index
- def order(self, return_indexer=False, ascending=True):
- """
- Return sorted copy of Index
-
- DEPRECATED: use :meth:`Index.sort_values`
- """
- warnings.warn("order is deprecated, use sort_values(...)",
- FutureWarning, stacklevel=2)
- return self.sort_values(return_indexer=return_indexer,
- ascending=ascending)
-
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 4fb1d2222fa06..735d3786e6a54 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -660,26 +660,6 @@ def test_sem(self):
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
- def test_sort_invalid_kwargs(self):
- df = DataFrame([1, 2, 3], columns=['a'])
-
- msg = r"sort\(\) got an unexpected keyword argument 'foo'"
- tm.assertRaisesRegexp(TypeError, msg, df.sort, foo=2)
-
- # Neither of these should raise an error because they
- # are explicit keyword arguments in the signature and
- # hence should not be swallowed by the kwargs parameter
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- df.sort(axis=1)
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- df.sort(kind='mergesort')
-
- msg = "the 'order' parameter is not supported"
- tm.assertRaisesRegexp(ValueError, msg, df.sort, order=2)
-
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py
index 7779afdc47b48..5108fc6080866 100644
--- a/pandas/tests/frame/test_sorting.py
+++ b/pandas/tests/frame/test_sorting.py
@@ -62,11 +62,7 @@ def test_sort(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
- # 9816 deprecated
- with tm.assert_produces_warning(FutureWarning):
- frame.sort(columns='A')
- with tm.assert_produces_warning(FutureWarning):
- frame.sort()
+ # see gh-9816
with tm.assert_produces_warning(FutureWarning):
frame.sortlevel()
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 3581f894e53a3..b1e6bd7520c69 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -346,12 +346,6 @@ def test_sort(self):
for ind in self.indices.values():
self.assertRaises(TypeError, ind.sort)
- def test_order(self):
- for ind in self.indices.values():
- # 9816 deprecated
- with tm.assert_produces_warning(FutureWarning):
- ind.order()
-
def test_mutability(self):
for ind in self.indices.values():
if not len(ind):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 05d3478ab0705..7199a38bb7a80 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1808,21 +1808,6 @@ def setUp(self):
def create_index(self):
return self.mixedIndex
- def test_order(self):
- idx = self.create_index()
- # 9816 deprecated
- if PY36:
- with tm.assertRaisesRegexp(TypeError, "'>' not supported"):
- with tm.assert_produces_warning(FutureWarning):
- idx.order()
- elif PY3:
- with tm.assertRaisesRegexp(TypeError, "unorderable types"):
- with tm.assert_produces_warning(FutureWarning):
- idx.order()
- else:
- with tm.assert_produces_warning(FutureWarning):
- idx.order()
-
def test_argsort(self):
idx = self.create_index()
if PY36:
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py
index 590a530a847bd..66ecba960ae0b 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/test_sorting.py
@@ -13,24 +13,13 @@
class TestSeriesSorting(TestData, tm.TestCase):
- def test_sort(self):
-
+ def test_sortlevel_deprecated(self):
ts = self.ts.copy()
- # 9816 deprecated
- with tm.assert_produces_warning(FutureWarning):
- ts.sort() # sorts inplace
- self.assert_series_equal(ts, self.ts.sort_values())
+ # see gh-9816
with tm.assert_produces_warning(FutureWarning):
ts.sortlevel()
- def test_order(self):
-
- # 9816 deprecated
- with tm.assert_produces_warning(FutureWarning):
- result = self.ts.order()
- self.assert_series_equal(result, self.ts.sort_values())
-
def test_sort_values(self):
# check indexes are reordered corresponding with the values
| Affected classes:
1) `Index`
2) `Series`
2) `DataFrame`
xref #10726 | https://api.github.com/repos/pandas-dev/pandas/pulls/15735 | 2017-03-19T01:16:36Z | 2017-03-19T02:02:21Z | 2017-03-19T02:02:21Z | 2017-03-19T02:08:50Z |
TST: clean up build testing | diff --git a/.travis.yml b/.travis.yml
index 88e1655363a4e..705b2380ac697 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -33,7 +33,6 @@ matrix:
- PYTHON_VERSION=3.5
- JOB_NAME: "35_osx"
- TEST_ARGS="--skip-slow --skip-network"
- - BUILD_TYPE=conda
- JOB_TAG=_OSX
- TRAVIS_PYTHON_VERSION=3.5
- CACHE_NAME="35_osx"
@@ -107,12 +106,12 @@ matrix:
- python: 2.7
env:
- PYTHON_VERSION=2.7
- - JOB_NAME: "27_build_test_conda"
+ - JOB_NAME: "27_build_test"
- JOB_TAG=_BUILD_TEST
- TEST_ARGS="--skip-slow"
- FULL_DEPS=true
- BUILD_TEST=true
- - CACHE_NAME="27_build_test_conda"
+ - CACHE_NAME="27_build_test"
- USE_CACHE=true
# In allow_failures
- python: 3.5
@@ -147,12 +146,12 @@ matrix:
- python: 2.7
env:
- PYTHON_VERSION=2.7
- - JOB_NAME: "27_build_test_conda"
+ - JOB_NAME: "27_build_test"
- JOB_TAG=_BUILD_TEST
- TEST_ARGS="--skip-slow"
- FULL_DEPS=true
- BUILD_TEST=true
- - CACHE_NAME="27_build_test_conda"
+ - CACHE_NAME="27_build_test"
- USE_CACHE=true
- python: 3.5
env:
diff --git a/ci/install_test.sh b/ci/install_test.sh
deleted file mode 100755
index 9ace633d7f39d..0000000000000
--- a/ci/install_test.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-echo "inside $0"
-
-if [ "$INSTALL_TEST" ]; then
- source activate pandas
- echo "Starting installation test."
- conda uninstall cython || exit 1
- python "$TRAVIS_BUILD_DIR"/setup.py sdist --formats=zip,gztar || exit 1
- pip install "$TRAVIS_BUILD_DIR"/dist/*tar.gz || exit 1
- pytest pandas/tests/test_series.py --junitxml=/tmp/pytest_install.xml
-else
- echo "Skipping installation test."
-fi
-RET="$?"
-
-exit "$RET"
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index de3b3fb6a464e..053a2d15a287c 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -131,10 +131,13 @@ fi
if [ "$BUILD_TEST" ]; then
- # build testing
- pip uninstall --yes cython
- pip install cython==0.23
- ( python setup.py build_ext --inplace && python setup.py develop ) || true
+ # build & install testing
+ echo ["Starting installation test."]
+ python setup.py clean
+ python setup.py build_ext --inplace
+ python setup.py sdist --formats=gztar
+ conda uninstall cython
+ pip install dist/*tar.gz || exit 1
else
@@ -142,26 +145,31 @@ else
echo "[build em]"
time python setup.py build_ext --inplace || exit 1
- # we may have run installations
- echo "[conda installs]"
- REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.run"
- if [ -e ${REQ} ]; then
- time conda install -n pandas --file=${REQ} || exit 1
- fi
+fi
- # we may have additional pip installs
- echo "[pip installs]"
- REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.pip"
- if [ -e ${REQ} ]; then
- pip install -r $REQ
- fi
+# we may have run installations
+echo "[conda installs]"
+REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.run"
+if [ -e ${REQ} ]; then
+ time conda install -n pandas --file=${REQ} || exit 1
+fi
- # may have addtl installation instructions for this build
- echo "[addtl installs]"
- REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.sh"
- if [ -e ${REQ} ]; then
- time bash $REQ || exit 1
- fi
+# we may have additional pip installs
+echo "[pip installs]"
+REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.pip"
+if [ -e ${REQ} ]; then
+ pip install -r $REQ
+fi
+
+# may have addtl installation instructions for this build
+echo "[addtl installs]"
+REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.sh"
+if [ -e ${REQ} ]; then
+ time bash $REQ || exit 1
+fi
+
+# finish install if we are not doing a build-testk
+if [ -z "$BUILD_TEST" ]; then
# remove any installed pandas package
# w/o removing anything else
diff --git a/ci/script_multi.sh b/ci/script_multi.sh
index 41f71fd21f63f..2d1211b2f7b96 100755
--- a/ci/script_multi.sh
+++ b/ci/script_multi.sh
@@ -24,7 +24,8 @@ export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 429496
echo PYTHONHASHSEED=$PYTHONHASHSEED
if [ "$BUILD_TEST" ]; then
- echo "We are not running pytest as this is simply a build test."
+ cd /tmp
+ python -c "import pandas; pandas.test(['-n 2'])"
elif [ "$COVERAGE" ]; then
echo pytest -s -n 2 -m "not single" --cov=pandas --cov-append --cov-report xml:/tmp/cov.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
pytest -s -n 2 -m "not single" --cov=pandas --cov-append --cov-report xml:/tmp/cov.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
diff --git a/versioneer.py b/versioneer.py
index c010f63e3ead8..104e8e97c6bd6 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1130,7 +1130,9 @@ def versions_from_parentdir(parentdir_prefix, root, verbose):
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
-import json
+from warnings import catch_warnings
+with catch_warnings(record=True):
+ import json
import sys
version_json = '''
| https://api.github.com/repos/pandas-dev/pandas/pulls/15734 | 2017-03-18T23:38:48Z | 2017-03-19T02:00:54Z | null | 2017-03-19T02:01:31Z | |
CI: turn on cache for osx | diff --git a/.travis.yml b/.travis.yml
index cafe46059e6c0..88e1655363a4e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -28,6 +28,7 @@ matrix:
os: osx
compiler: clang
osx_image: xcode6.4
+ cache: ccache
env:
- PYTHON_VERSION=3.5
- JOB_NAME: "35_osx"
diff --git a/ci/submit_cython_cache.sh b/ci/submit_cython_cache.sh
index cfbced4988357..b87acef0ba11c 100755
--- a/ci/submit_cython_cache.sh
+++ b/ci/submit_cython_cache.sh
@@ -9,7 +9,7 @@ rm -rf $PYX_CACHE_DIR
home_dir=$(pwd)
-mkdir $PYX_CACHE_DIR
+mkdir -p $PYX_CACHE_DIR
rsync -Rv $pyx_file_list $PYX_CACHE_DIR
echo "pyx files:"
| https://api.github.com/repos/pandas-dev/pandas/pulls/15733 | 2017-03-18T16:50:20Z | 2017-03-18T16:50:27Z | 2017-03-18T16:50:27Z | 2017-03-18T16:50:27Z | |
TST: move conftest.py to top-level | diff --git a/pandas/conftest.py b/conftest.py
similarity index 100%
rename from pandas/conftest.py
rename to conftest.py
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 73222c246fc70..2972427f1b245 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -29,7 +29,7 @@ class TestPDApi(Base, tm.TestCase):
# these are optionally imported based on testing
# & need to be ignored
- ignored = ['tests', 'locale', 'conftest']
+ ignored = ['tests', 'locale']
# top-level sub-packages
lib = ['api', 'compat', 'computation', 'core',
| xref #15341 | https://api.github.com/repos/pandas-dev/pandas/pulls/15731 | 2017-03-18T15:46:43Z | 2017-03-18T16:01:20Z | 2017-03-18T16:01:20Z | 2017-03-21T00:14:28Z |
CI: remove 3.5 appveyor build | diff --git a/appveyor.yml b/appveyor.yml
index 1c14698430996..5d748ddf1a108 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -30,13 +30,6 @@ environment:
CONDA_PY: "27"
CONDA_NPY: "110"
- - CONDA_ROOT: "C:\\Miniconda3_64"
- PYTHON_VERSION: "3.5"
- PYTHON_ARCH: "64"
- CONDA_PY: "35"
- CONDA_NPY: "111"
-
-
# We always use a 64-bit machine, but can build x86 distributions
# with the PYTHON_ARCH variable (which is used by CMD_IN_ENV).
platform:
diff --git a/ci/requirements-3.5-64.run b/ci/requirements-3.5-64.run
deleted file mode 100644
index ad66f578d702a..0000000000000
--- a/ci/requirements-3.5-64.run
+++ /dev/null
@@ -1,13 +0,0 @@
-python-dateutil
-pytz
-numpy=1.11*
-openpyxl
-xlsxwriter
-xlrd
-xlwt
-scipy
-feather-format
-numexpr
-pytables
-matplotlib
-blosc
| not really necessary & just makes the building take longer | https://api.github.com/repos/pandas-dev/pandas/pulls/15730 | 2017-03-18T15:45:35Z | 2017-03-18T16:02:10Z | 2017-03-18T16:02:10Z | 2017-03-18T16:02:10Z |
DOC: Fix typos in merge_asof() docstring | diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 261884bba54bd..60d523a8ea539 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -295,7 +295,7 @@ def merge_asof(left, right, on=None,
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
- The default is "backward" and is the compatible in versions below 0.20.0.
+ The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
@@ -340,13 +340,13 @@ def merge_asof(left, right, on=None,
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
- side, respectively
+ side, respectively.
tolerance : integer or Timedelta, optional, default None
- select asof tolerance within this range; must be compatible
- to the merge index.
+ Select asof tolerance within this range; must be compatible
+ with the merge index.
allow_exact_matches : boolean, default True
- - If True, allow matching the same 'on' value
+ - If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., stricly less-than / strictly greater-than)
| https://api.github.com/repos/pandas-dev/pandas/pulls/15729 | 2017-03-18T03:29:38Z | 2017-03-18T03:32:47Z | 2017-03-18T03:32:47Z | 2017-03-18T04:00:32Z | |
CI: install nomkl to speed building | diff --git a/.travis.yml b/.travis.yml
index c1419dd0c5d3b..cafe46059e6c0 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -74,7 +74,7 @@ matrix:
- CLIPBOARD=xsel
- COVERAGE=true
- CACHE_NAME="35_nslow"
-# - USE_CACHE=true # Don't use cache for 35_nslow
+ - USE_CACHE=true
addons:
apt:
packages:
@@ -86,6 +86,7 @@ matrix:
- TEST_ARGS="--skip-slow --skip-network"
- PANDAS_TESTING_MODE="deprecate"
- CONDA_FORGE=true
+ - USE_CACHE=true
addons:
apt:
packages:
@@ -154,13 +155,13 @@ matrix:
- USE_CACHE=true
- python: 3.5
env:
- - PYTHON_VERSION=3.5
- - JOB_NAME: "35_numpy_dev"
- - JOB_TAG=_NUMPY_DEV
- - TEST_ARGS="--skip-slow --skip-network"
- - PANDAS_TESTING_MODE="deprecate"
- - CACHE_NAME="35_numpy_dev"
- - USE_CACHE=true
+ - PYTHON_VERSION=3.5
+ - JOB_NAME: "35_numpy_dev"
+ - JOB_TAG=_NUMPY_DEV
+ - TEST_ARGS="--skip-slow --skip-network"
+ - PANDAS_TESTING_MODE="deprecate"
+ - CACHE_NAME="35_numpy_dev"
+ - USE_CACHE=true
- python: 3.5
env:
- PYTHON_VERSION=3.5
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index 610e6255e6832..de3b3fb6a464e 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -99,7 +99,7 @@ if [ -e ${INSTALL} ]; then
else
# create new env
# this may already exists, in which case our caching worked
- time conda create -n pandas python=$PYTHON_VERSION pytest
+ time conda create -n pandas python=$PYTHON_VERSION pytest nomkl
fi
# build deps
| https://api.github.com/repos/pandas-dev/pandas/pulls/15728 | 2017-03-18T02:10:56Z | 2017-03-18T02:51:36Z | 2017-03-18T02:51:36Z | 2017-03-18T02:51:36Z | |
CI: remove caching for miniconda itself | diff --git a/.travis.yml b/.travis.yml
index af3098b3fc715..c1419dd0c5d3b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -8,7 +8,6 @@ language: python
# The cash directories will be deleted if anything in ci/ changes in a commit
cache:
directories:
- - $HOME/miniconda3 # miniconda cache
- $HOME/.cache # cython cache
- $HOME/.ccache # compiler cache
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index 8bf6de3efe7c4..e59502b810975 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -35,24 +35,19 @@ echo "[home_dir: $home_dir]"
# install miniconda
MINICONDA_DIR="$HOME/miniconda3"
-if [ "$USE_CACHE" ] && [ -d "$MINICONDA_DIR/bin" ]; then
- echo "[Using cached Miniconda install]"
+echo "[Using clean Miniconda install]"
-else
- echo "[Using clean Miniconda install]"
-
- if [ -d "$MINICONDA_DIR" ]; then
- rm -rf "$MINICONDA_DIR"
- fi
+if [ -d "$MINICONDA_DIR" ]; then
+ rm -rf "$MINICONDA_DIR"
+fi
- # install miniconda
- if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
- time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1
- else
- time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1
- fi
- time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1
+# install miniconda
+if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
+ time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1
+else
+ time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1
fi
+time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1
echo "[show conda]"
which conda
| well got this to work, and it *does* cache, but the cache file is so huge that its just not worth it. going back to clean installs for miniconda itself. | https://api.github.com/repos/pandas-dev/pandas/pulls/15722 | 2017-03-17T17:10:53Z | 2017-03-17T17:11:56Z | 2017-03-17T17:11:56Z | 2017-03-17T17:11:56Z |
DOC: Update broken link in cookbook.rst #15605 | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 841195de3da47..8fa1283ffc924 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -905,7 +905,7 @@ CSV
The :ref:`CSV <io.read_csv_table>` docs
-`read_csv in action <http://wesmckinney.com/blog/?p=635>`__
+`read_csv in action <http://wesmckinney.com/blog/update-on-upcoming-pandas-v0-10-new-file-parser-other-performance-wins/>`__
`appending to a csv
<http://stackoverflow.com/questions/17134942/pandas-dataframe-output-end-of-csv>`__
| - [ ] closes #15605
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15720 | 2017-03-17T15:39:33Z | 2017-03-17T15:42:26Z | null | 2018-12-12T21:40:25Z |
TST: only catch deprecation warnings for top-level module imports | diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index db92210478182..73222c246fc70 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -249,31 +249,26 @@ def test_groupby(self):
class TestJson(tm.TestCase):
def test_deprecation_access_func(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
pd.json.dumps([])
class TestParser(tm.TestCase):
def test_deprecation_access_func(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
pd.parser.na_values
class TestLib(tm.TestCase):
def test_deprecation_access_func(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
pd.lib.infer_dtype
class TestTSLib(tm.TestCase):
def test_deprecation_access_func(self):
- # some libraries may be imported before we
- # test and could show the warning
with catch_warnings(record=True):
pd.tslib.Timestamp
| https://api.github.com/repos/pandas-dev/pandas/pulls/15718 | 2017-03-17T14:58:28Z | 2017-03-17T14:58:35Z | 2017-03-17T14:58:35Z | 2017-03-21T00:18:42Z | |
DOC: Add gotcha about flake8-ing diff | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 83f99b4f01b26..7961780d0c79b 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -520,6 +520,15 @@ submitting code to run the check yourself on the diff::
git diff master | flake8 --diff
+This command will catch any stylistic errors in your changes specifically, but
+be beware it may not catch all of them. For example, if you delete the only
+usage of an imported function, it is stylistically incorrect to import an
+unused function. However, style-checking the diff will not catch this because
+the actual import is not part of the diff. Thus, for completeness, you should
+run this command, though it will take longer::
+
+ git diff master --name-only -- '*.py' | grep 'pandas' | xargs -r flake8
+
Backwards Compatibility
~~~~~~~~~~~~~~~~~~~~~~~
| The `flake8`-ing the diff will not catch any import style errors. I put an alternative check that is more comprehensive but will take longer to run since you will be checking entire files instead of the diff.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15712 | 2017-03-17T05:29:23Z | 2017-03-17T12:51:41Z | null | 2017-03-17T13:11:44Z |
BUG: TZ-aware Series.where() appropriately handles default other=nan (#15701) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index a56212328f5c3..29d05ddcfb497 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -829,6 +829,7 @@ Bug Fixes
- Bug in ``DataFrame.isin`` comparing datetimelike to empty frame (:issue:`15473`)
- Bug in ``Series.where()`` and ``DataFrame.where()`` where array-like conditionals were being rejected (:issue:`15414`)
+- Bug in ``Series.where()`` where TZ-aware data was converted to float representation (:issue:`15701`)
- Bug in ``Index`` construction with ``NaN`` elements and integer dtype specified (:issue:`15187`)
- Bug in ``Series`` construction with a datetimetz (:issue:`14928`)
- Bug in output formatting of a ``MultiIndex`` when names are integers (:issue:`12223`, :issue:`15262`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 0e6c176d950a1..9db01713b05ed 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2440,7 +2440,8 @@ def _try_coerce_args(self, values, other):
if isinstance(other, bool):
raise TypeError
- elif is_null_datelike_scalar(other):
+ elif (is_null_datelike_scalar(other) or
+ (is_scalar(other) and isnull(other))):
other = tslib.iNaT
other_mask = True
elif isinstance(other, self._holder):
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 9d93d9f01b161..2bbd46532c287 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -1385,6 +1385,14 @@ def test_where_datetime(self):
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
+ # GH 15701
+ timestamps = ['2016-12-31 12:00:04+00:00',
+ '2016-12-31 12:00:04.010000+00:00']
+ s = Series([pd.Timestamp(s) for s in timestamps])
+ rs = s.where(Series([False, True]))
+ expected = Series([pd.NaT, s[1]])
+ assert_series_equal(rs, expected)
+
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
| - [x] closes #15701
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15711 | 2017-03-17T04:07:18Z | 2017-03-17T13:11:05Z | null | 2017-03-17T13:23:43Z |
MAINT: Drop take_last kwarg from method signatures | diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index 413c4e044fd3a..c66654ee1e006 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -68,8 +68,8 @@ def setup(self):
self.s4 = self.s3.astype('object')
def time_series_nlargest1(self):
- self.s1.nlargest(3, take_last=True)
- self.s1.nlargest(3, take_last=False)
+ self.s1.nlargest(3, keep='last')
+ self.s1.nlargest(3, keep='first')
class series_nlargest2(object):
@@ -83,8 +83,8 @@ def setup(self):
self.s4 = self.s3.astype('object')
def time_series_nlargest2(self):
- self.s2.nlargest(3, take_last=True)
- self.s2.nlargest(3, take_last=False)
+ self.s2.nlargest(3, keep='last')
+ self.s2.nlargest(3, keep='first')
class series_nsmallest2(object):
@@ -98,8 +98,8 @@ def setup(self):
self.s4 = self.s3.astype('object')
def time_series_nsmallest2(self):
- self.s2.nsmallest(3, take_last=True)
- self.s2.nsmallest(3, take_last=False)
+ self.s2.nsmallest(3, keep='last')
+ self.s2.nsmallest(3, keep='first')
class series_dropna_int64(object):
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 29d05ddcfb497..9cf53300f8cca 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -769,6 +769,7 @@ Removal of prior version deprecations/changes
in favor of ``iloc`` and ``iat`` as explained :ref:`here <whatsnew_0170.deprecations>` (:issue:`10711`).
- The deprecated ``DataFrame.iterkv()`` has been removed in favor of ``DataFrame.iteritems()`` (:issue:`10711`)
- The ``Categorical`` constructor has dropped the ``name`` parameter (:issue:`10632`)
+- The ``take_last`` parameter has been dropped from ``duplicated()``, ``drop_duplicates()``, ``nlargest()``, and ``nsmallest()`` methods (:issue:`10236`, :issue:`10792`, :issue:`10920`)
.. _whatsnew_0200.performance:
diff --git a/pandas/core/base.py b/pandas/core/base.py
index d7c9e35ab6a51..bde60be3ddcff 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1065,7 +1065,6 @@ def searchsorted(self, value, side='left', sorter=None):
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
- take_last : deprecated
%(inplace)s
Returns
@@ -1073,8 +1072,6 @@ def searchsorted(self, value, side='left', sorter=None):
deduplicated : %(klass)s
""")
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
@Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
@@ -1100,15 +1097,12 @@ def drop_duplicates(self, keep='first', inplace=False):
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
- take_last : deprecated
Returns
-------
duplicated : %(duplicated)s
""")
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
@Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.core.algorithms import duplicated
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 987eb10101f12..3696051b269e3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -77,8 +77,7 @@
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat.numpy import function as nv
-from pandas.util.decorators import (deprecate_kwarg, Appender,
- Substitution)
+from pandas.util.decorators import Appender, Substitution
from pandas.util.validators import validate_bool_kwarg
from pandas.tseries.period import PeriodIndex
@@ -3169,8 +3168,6 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
else:
return result
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
@@ -3185,7 +3182,6 @@ def drop_duplicates(self, subset=None, keep='first', inplace=False):
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
- take_last : deprecated
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
@@ -3203,8 +3199,6 @@ def drop_duplicates(self, subset=None, keep='first', inplace=False):
else:
return self[-duplicated]
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
@@ -3221,7 +3215,6 @@ def duplicated(self, subset=None, keep='first'):
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
- take_last : deprecated
Returns
-------
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 7a017ffae284c..4095a14aa5970 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -51,8 +51,8 @@
from pandas.core.sorting import (get_group_index_sorter, get_group_index,
compress_group_index, get_flattened_iterator,
decons_obs_group_ids, get_indexer_dict)
-from pandas.util.decorators import (cache_readonly, Substitution, Appender,
- make_signature, deprecate_kwarg)
+from pandas.util.decorators import (cache_readonly, Substitution,
+ Appender, make_signature)
from pandas.formats.printing import pprint_thing
from pandas.util.validators import validate_kwargs
@@ -94,12 +94,12 @@
'corr', 'cov', 'diff',
]) | _plotting_methods
-_series_apply_whitelist = \
- (_common_apply_whitelist - set(['boxplot'])) | \
- frozenset(['dtype', 'unique'])
+_series_apply_whitelist = ((_common_apply_whitelist |
+ {'nlargest', 'nsmallest'}) -
+ {'boxplot'}) | frozenset(['dtype', 'unique'])
-_dataframe_apply_whitelist = \
- _common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
+_dataframe_apply_whitelist = (_common_apply_whitelist |
+ frozenset(['dtypes', 'corrwith']))
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',
'cummin', 'cummax'])
@@ -3025,20 +3025,6 @@ def nunique(self, dropna=True):
index=ri,
name=self.name)
- @deprecate_kwarg('take_last', 'keep',
- mapping={True: 'last', False: 'first'})
- @Appender(Series.nlargest.__doc__)
- def nlargest(self, n=5, keep='first'):
- # ToDo: When we remove deprecate_kwargs, we can remote these methods
- # and include nlargest and nsmallest to _series_apply_whitelist
- return self.apply(lambda x: x.nlargest(n=n, keep=keep))
-
- @deprecate_kwarg('take_last', 'keep',
- mapping={True: 'last', False: 'first'})
- @Appender(Series.nsmallest.__doc__)
- def nsmallest(self, n=5, keep='first'):
- return self.apply(lambda x: x.nsmallest(n=n, keep=keep))
-
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
self._set_group_selection()
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cfa25ca1299eb..7ee3b3e8fb519 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1211,14 +1211,10 @@ def unique(self):
return result.asobject.values
return result
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
@Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
return super(Series, self).drop_duplicates(keep=keep, inplace=inplace)
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
@Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)
def duplicated(self, keep='first'):
return super(Series, self).duplicated(keep=keep)
@@ -1888,8 +1884,6 @@ def argsort(self, axis=0, kind='quicksort', order=None):
np.argsort(values, kind=kind), index=self.index,
dtype='int64').__finalize__(self)
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
def nlargest(self, n=5, keep='first'):
"""Return the largest `n` elements.
@@ -1901,7 +1895,6 @@ def nlargest(self, n=5, keep='first'):
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- take_last : deprecated
Returns
-------
@@ -1938,8 +1931,6 @@ def nlargest(self, n=5, keep='first'):
return algorithms.select_n_series(self, n=n, keep=keep,
method='nlargest')
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
def nsmallest(self, n=5, keep='first'):
"""Return the smallest `n` elements.
@@ -1951,7 +1942,6 @@ def nsmallest(self, n=5, keep='first'):
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- take_last : deprecated
Returns
-------
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 5b942e2565c29..381e4d5caa8ac 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -3500,14 +3500,10 @@ def unique(self):
result = super(Index, self).unique()
return self._shallow_copy(result)
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
@Appender(base._shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, keep='first'):
return super(Index, self).drop_duplicates(keep=keep)
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
return super(Index, self).duplicated(keep=keep)
diff --git a/pandas/indexes/category.py b/pandas/indexes/category.py
index 923dd4ec785c5..7cfc95de5f538 100644
--- a/pandas/indexes/category.py
+++ b/pandas/indexes/category.py
@@ -11,8 +11,7 @@
from pandas.types.missing import array_equivalent
-from pandas.util.decorators import (Appender, cache_readonly,
- deprecate_kwarg)
+from pandas.util.decorators import Appender, cache_readonly
from pandas.core.config import get_option
from pandas.indexes.base import Index, _index_shared_docs
import pandas.core.base as base
@@ -301,8 +300,6 @@ def unique(self):
return self._shallow_copy(result, categories=result.categories,
ordered=result.ordered)
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas._libs.hashtable import duplicated_int64
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 1c1609fed1dd1..978492131ca89 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -755,8 +755,6 @@ def f(k, stringify):
for k, stringify in zip(key, self._have_mixed_levels)])
return hash_tuples(key)
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
- False: 'first'})
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.core.sorting import get_group_index
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 6c917444f9f43..4fb1d2222fa06 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1381,12 +1381,6 @@ def test_drop_duplicates(self):
tm.assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- result = df.drop_duplicates('AAA', take_last=True)
- expected = df.loc[[6, 7]]
- tm.assert_frame_equal(result, expected)
-
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
@@ -1402,12 +1396,6 @@ def test_drop_duplicates(self):
expected = df.loc[[0]]
tm.assert_frame_equal(result, expected)
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- result = df.drop_duplicates(('AAA', 'B'), take_last=True)
- expected = df.loc[[0, 5, 6, 7]]
- tm.assert_frame_equal(result, expected)
-
# consider everything
df2 = df.loc[:, ['AAA', 'B', 'C']]
@@ -1424,13 +1412,6 @@ def test_drop_duplicates(self):
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
tm.assert_frame_equal(result, expected)
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- result = df2.drop_duplicates(take_last=True)
- with tm.assert_produces_warning(FutureWarning):
- expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
- tm.assert_frame_equal(result, expected)
-
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
@@ -1529,12 +1510,6 @@ def test_drop_duplicates_tuple(self):
self.assertEqual(len(result), 0)
tm.assert_frame_equal(result, expected)
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- result = df.drop_duplicates(('AA', 'AB'), take_last=True)
- expected = df.loc[[6, 7]]
- tm.assert_frame_equal(result, expected)
-
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
@@ -1563,12 +1538,6 @@ def test_drop_duplicates_NA(self):
tm.assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- result = df.drop_duplicates('A', take_last=True)
- expected = df.loc[[1, 6, 7]]
- tm.assert_frame_equal(result, expected)
-
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.loc[[0, 2, 3, 6]]
@@ -1582,12 +1551,6 @@ def test_drop_duplicates_NA(self):
expected = df.loc[[6]]
tm.assert_frame_equal(result, expected)
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- result = df.drop_duplicates(['A', 'B'], take_last=True)
- expected = df.loc[[1, 5, 6, 7]]
- tm.assert_frame_equal(result, expected)
-
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
@@ -1610,12 +1573,6 @@ def test_drop_duplicates_NA(self):
tm.assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- result = df.drop_duplicates('C', take_last=True)
- expected = df.loc[[3, 7]]
- tm.assert_frame_equal(result, expected)
-
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.loc[[0, 1, 2, 4]]
@@ -1629,12 +1586,6 @@ def test_drop_duplicates_NA(self):
expected = df.loc[[1]]
tm.assert_frame_equal(result, expected)
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- result = df.drop_duplicates(['C', 'B'], take_last=True)
- expected = df.loc[[1, 3, 6, 7]]
- tm.assert_frame_equal(result, expected)
-
def test_drop_duplicates_NA_for_take_all(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
@@ -1697,14 +1648,6 @@ def test_drop_duplicates_inplace(self):
tm.assert_frame_equal(result, expected)
self.assertEqual(len(df), 0)
- # deprecate take_last
- df = orig.copy()
- with tm.assert_produces_warning(FutureWarning):
- df.drop_duplicates('A', take_last=True, inplace=True)
- expected = orig.loc[[6, 7]]
- result = df
- tm.assert_frame_equal(result, expected)
-
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
@@ -1724,14 +1667,6 @@ def test_drop_duplicates_inplace(self):
result = df
tm.assert_frame_equal(result, expected)
- # deprecate take_last
- df = orig.copy()
- with tm.assert_produces_warning(FutureWarning):
- df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
- expected = orig.loc[[0, 5, 6, 7]]
- result = df
- tm.assert_frame_equal(result, expected)
-
# consider everything
orig2 = orig.loc[:, ['A', 'B', 'C']].copy()
@@ -1754,17 +1689,7 @@ def test_drop_duplicates_inplace(self):
result = df2
tm.assert_frame_equal(result, expected)
- # deprecate take_last
- df2 = orig2.copy()
- with tm.assert_produces_warning(FutureWarning):
- df2.drop_duplicates(take_last=True, inplace=True)
- with tm.assert_produces_warning(FutureWarning):
- expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
- result = df2
- tm.assert_frame_equal(result, expected)
-
# Rounding
-
def test_round(self):
# GH 2665
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index c25974c94bfd1..a355dca3029c7 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -3816,7 +3816,8 @@ def test_groupby_whitelist(self):
'cov',
'diff',
'unique',
- # 'nlargest', 'nsmallest',
+ 'nlargest',
+ 'nsmallest',
])
for obj, whitelist in zip((df, s), (df_whitelist, s_whitelist)):
@@ -4025,8 +4026,6 @@ def test_nlargest(self):
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
assert_series_equal(gb.nlargest(3, keep='last'), e)
- with tm.assert_produces_warning(FutureWarning):
- assert_series_equal(gb.nlargest(3, take_last=True), e)
def test_nsmallest(self):
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
@@ -4044,8 +4043,6 @@ def test_nsmallest(self):
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
assert_series_equal(gb.nsmallest(3, keep='last'), e)
- with tm.assert_produces_warning(FutureWarning):
- assert_series_equal(gb.nsmallest(3, take_last=True), e)
def test_transform_doesnt_clobber_ints(self):
# GH 7972
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index c2543581dca50..dc71fafb1094f 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -917,17 +917,6 @@ def test_drop_duplicates(self):
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- assert_series_equal(s.duplicated(take_last=True), expected)
- with tm.assert_produces_warning(FutureWarning):
- assert_series_equal(
- s.drop_duplicates(take_last=True), s[~expected])
- sc = s.copy()
- with tm.assert_produces_warning(FutureWarning):
- sc.drop_duplicates(take_last=True, inplace=True)
- assert_series_equal(sc, s[~expected])
-
expected = Series([False, False, True, True])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
@@ -951,17 +940,6 @@ def test_drop_duplicates(self):
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- assert_series_equal(s.duplicated(take_last=True), expected)
- with tm.assert_produces_warning(FutureWarning):
- assert_series_equal(
- s.drop_duplicates(take_last=True), s[~expected])
- sc = s.copy()
- with tm.assert_produces_warning(FutureWarning):
- sc.drop_duplicates(take_last=True, inplace=True)
- assert_series_equal(sc, s[~expected])
-
expected = Series([False, True, True, False, True, True, False])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
@@ -1443,18 +1421,7 @@ def test_nsmallest_nlargest(self):
for s in s_list:
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
-
assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
- with tm.assert_produces_warning(FutureWarning):
- assert_series_equal(
- s.nsmallest(2, take_last=True), s.iloc[[2, 3]])
-
- assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])
-
- assert_series_equal(s.nlargest(3, keep='last'), s.iloc[[4, 0, 3]])
- with tm.assert_produces_warning(FutureWarning):
- assert_series_equal(
- s.nlargest(3, take_last=True), s.iloc[[4, 0, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 1d4dddf6477df..68db0d19344b9 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -816,15 +816,6 @@ def test_duplicated_drop_duplicates_index(self):
result = idx.drop_duplicates(keep='last')
tm.assert_index_equal(result, idx[~expected])
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- duplicated = idx.duplicated(take_last=True)
- tm.assert_numpy_array_equal(duplicated, expected)
- self.assertTrue(duplicated.dtype == bool)
- with tm.assert_produces_warning(FutureWarning):
- result = idx.drop_duplicates(take_last=True)
- tm.assert_index_equal(result, idx[~expected])
-
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
@@ -867,13 +858,6 @@ def test_duplicated_drop_duplicates_index(self):
tm.assert_series_equal(s.drop_duplicates(keep='last'),
s[~np.array(base)])
- # deprecate take_last
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_series_equal(
- s.duplicated(take_last=True), expected)
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_series_equal(s.drop_duplicates(take_last=True),
- s[~np.array(base)])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index d7b115d808312..fd5421abc89ad 100755
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -2037,17 +2037,6 @@ def test_duplicated_drop_duplicates(self):
expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
- # deprecate take_last
- expected = np.array([True, False, False, False, False, False])
- with tm.assert_produces_warning(FutureWarning):
- duplicated = idx.duplicated(take_last=True)
- tm.assert_numpy_array_equal(duplicated, expected)
- self.assertTrue(duplicated.dtype == bool)
- expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_index_equal(
- idx.drop_duplicates(take_last=True), expected)
-
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
diff --git a/vb_suite/series_methods.py b/vb_suite/series_methods.py
index cd8688495fa09..c545f419c2dec 100644
--- a/vb_suite/series_methods.py
+++ b/vb_suite/series_methods.py
@@ -12,22 +12,22 @@
s4 = s3.astype('object')
"""
-series_nlargest1 = Benchmark('s1.nlargest(3, take_last=True);'
- 's1.nlargest(3, take_last=False)',
+series_nlargest1 = Benchmark("s1.nlargest(3, keep='last');"
+ "s1.nlargest(3, keep='first')",
setup,
start_date=datetime(2014, 1, 25))
-series_nlargest2 = Benchmark('s2.nlargest(3, take_last=True);'
- 's2.nlargest(3, take_last=False)',
+series_nlargest2 = Benchmark("s2.nlargest(3, keep='last');"
+ "s2.nlargest(3, keep='first')",
setup,
start_date=datetime(2014, 1, 25))
-series_nsmallest2 = Benchmark('s1.nsmallest(3, take_last=True);'
- 's1.nsmallest(3, take_last=False)',
+series_nsmallest2 = Benchmark("s1.nsmallest(3, keep='last');"
+ "s1.nsmallest(3, keep='first')",
setup,
start_date=datetime(2014, 1, 25))
-series_nsmallest2 = Benchmark('s2.nsmallest(3, take_last=True);'
- 's2.nsmallest(3, take_last=False)',
+series_nsmallest2 = Benchmark("s2.nsmallest(3, keep='last');"
+ "s2.nsmallest(3, keep='first')",
setup,
start_date=datetime(2014, 1, 25))
| Affected methods:
1) `nlargest`
2) `nsmallest`
3) `duplicated`
4) `drop_duplicates`
xref #10236, #10792, #10920. | https://api.github.com/repos/pandas-dev/pandas/pulls/15710 | 2017-03-17T00:52:22Z | 2017-03-18T00:13:21Z | null | 2017-03-18T01:27:14Z |
TST: Reorganize package_check and skip_if_no_package | diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 5592c564e51df..8ea8088a297b8 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -728,7 +728,8 @@ def test_put_compression(self):
format='fixed', complib='zlib')
def test_put_compression_blosc(self):
- tm.skip_if_no_package('tables', '2.2', app='blosc support')
+ tm.skip_if_no_package('tables', min_version='2.2',
+ app='blosc support')
if skip_compression:
pytest.skip("skipping on windows/PY3")
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 529ecef3e2d6a..154476ce8340a 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -2010,12 +2010,16 @@ def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
-# Dependency checks. Copied this from Nipy/Nipype (Copyright of
-# respective developers, license: BSD-3)
-def package_check(pkg_name, min_version=None, max_version=None, app='pandas',
- checker=LooseVersion):
+# Dependency checker when running tests.
+#
+# Copied this from nipy/nipype
+# Copyright of respective developers, License: BSD-3
+def skip_if_no_package(pkg_name, min_version=None, max_version=None,
+ app='pandas', checker=LooseVersion):
"""Check that the min/max version of the required package is installed.
+ If the package check fails, the test is automatically skipped.
+
Parameters
----------
pkg_name : string
@@ -2025,11 +2029,11 @@ def package_check(pkg_name, min_version=None, max_version=None, app='pandas',
max_version : string, optional
Max version number for required package.
app : string, optional
- Application that is performing the check. For instance, the
+ Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages.
checker : object, optional
- The class that will perform the version checking. Default is
+ The class that will perform the version checking. Default is
distutils.version.LooseVersion.
Examples
@@ -2061,17 +2065,6 @@ def package_check(pkg_name, min_version=None, max_version=None, app='pandas',
pytest.skip(msg)
-def skip_if_no_package(*args, **kwargs):
- """pytest.skip() if package_check fails
-
- Parameters
- ----------
- *args Positional parameters passed to `package_check`
- *kwargs Keyword parameters passed to `package_check`
- """
- package_check(*args, **kwargs)
-
-
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
| `skip_if_no_package` literally just calls `package_check` with no additional decorations and with confusing `*args` and `**kwargs`. However, `skip_if_no_package` is a better name than `package_check`. | https://api.github.com/repos/pandas-dev/pandas/pulls/15709 | 2017-03-17T00:51:16Z | 2017-03-17T12:50:27Z | 2017-03-17T12:50:27Z | 2017-03-17T13:11:46Z |
TST: remove rest of yield warnings | diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py
index b1f163ccf9429..44a7f2b45e759 100644
--- a/pandas/tests/formats/test_format.py
+++ b/pandas/tests/formats/test_format.py
@@ -1392,24 +1392,26 @@ def test_repr_html_long(self):
assert u('2 columns') in long_repr
def test_repr_html_float(self):
- max_rows = get_option('display.max_rows')
- h = max_rows - 1
- df = DataFrame({'idx': np.linspace(-10, 10, h),
- 'A': np.arange(1, 1 + h),
- 'B': np.arange(41, 41 + h)}).set_index('idx')
- reg_repr = df._repr_html_()
- assert '..' not in reg_repr
- assert str(40 + h) in reg_repr
-
- h = max_rows + 1
- df = DataFrame({'idx': np.linspace(-10, 10, h),
- 'A': np.arange(1, 1 + h),
- 'B': np.arange(41, 41 + h)}).set_index('idx')
- long_repr = df._repr_html_()
- assert '..' in long_repr
- assert '31' not in long_repr
- assert u('%d rows ') % h in long_repr
- assert u('2 columns') in long_repr
+ with option_context('display.max_rows', 60):
+
+ max_rows = get_option('display.max_rows')
+ h = max_rows - 1
+ df = DataFrame({'idx': np.linspace(-10, 10, h),
+ 'A': np.arange(1, 1 + h),
+ 'B': np.arange(41, 41 + h)}).set_index('idx')
+ reg_repr = df._repr_html_()
+ assert '..' not in reg_repr
+ assert str(40 + h) in reg_repr
+
+ h = max_rows + 1
+ df = DataFrame({'idx': np.linspace(-10, 10, h),
+ 'A': np.arange(1, 1 + h),
+ 'B': np.arange(41, 41 + h)}).set_index('idx')
+ long_repr = df._repr_html_()
+ assert '..' in long_repr
+ assert '31' not in long_repr
+ assert u('%d rows ') % h in long_repr
+ assert u('2 columns') in long_repr
def test_repr_html_long_multiindex(self):
max_rows = get_option('display.max_rows')
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index df5e843097514..29920b165d3f6 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -23,11 +23,19 @@
from pandas.compat import zip, u
+@pytest.fixture
+def mgr():
+ return create_mgr(
+ 'a: f8; b: object; c: f8; d: object; e: f8;'
+ 'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
+ 'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
+
+
def assert_block_equal(left, right):
tm.assert_numpy_array_equal(left.values, right.values)
- assert (left.dtype == right.dtype)
- tm.assertIsInstance(left.mgr_locs, lib.BlockPlacement)
- tm.assertIsInstance(right.mgr_locs, lib.BlockPlacement)
+ assert left.dtype == right.dtype
+ assert isinstance(left.mgr_locs, lib.BlockPlacement)
+ assert isinstance(right.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(left.mgr_locs.as_array,
right.mgr_locs.as_array)
@@ -197,11 +205,11 @@ def setUp(self):
def test_constructor(self):
int32block = create_block('i4', [0])
- self.assertEqual(int32block.dtype, np.int32)
+ assert int32block.dtype == np.int32
def test_pickle(self):
def _check(blk):
- assert_block_equal(self.round_trip_pickle(blk), blk)
+ assert_block_equal(tm.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
@@ -209,14 +217,14 @@ def _check(blk):
_check(self.bool_block)
def test_mgr_locs(self):
- tm.assertIsInstance(self.fblock.mgr_locs, lib.BlockPlacement)
+ assert isinstance(self.fblock.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,
np.array([0, 2, 4], dtype=np.int64))
def test_attrs(self):
- self.assertEqual(self.fblock.shape, self.fblock.values.shape)
- self.assertEqual(self.fblock.dtype, self.fblock.values.dtype)
- self.assertEqual(len(self.fblock), len(self.fblock.values))
+ assert self.fblock.shape == self.fblock.values.shape
+ assert self.fblock.dtype == self.fblock.values.dtype
+ assert len(self.fblock) == len(self.fblock.values)
def test_merge(self):
avals = randn(2, 10)
@@ -251,26 +259,27 @@ def test_insert(self):
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
- tm.assertIsInstance(newb.mgr_locs, lib.BlockPlacement)
+ assert isinstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([2, 4], dtype=np.int64))
- self.assertTrue((newb.values[0] == 1).all())
+ assert (newb.values[0] == 1).all()
newb = self.fblock.copy()
newb.delete(1)
- tm.assertIsInstance(newb.mgr_locs, lib.BlockPlacement)
+ assert isinstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 4], dtype=np.int64))
- self.assertTrue((newb.values[1] == 2).all())
+ assert (newb.values[1] == 2).all()
newb = self.fblock.copy()
newb.delete(2)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 2], dtype=np.int64))
- self.assertTrue((newb.values[1] == 1).all())
+ assert (newb.values[1] == 1).all()
newb = self.fblock.copy()
- self.assertRaises(Exception, newb.delete, 3)
+ with pytest.raises(Exception):
+ newb.delete(3)
def test_split_block_at(self):
@@ -279,21 +288,21 @@ def test_split_block_at(self):
pytest.skip("skipping for now")
bs = list(self.fblock.split_block_at('a'))
- self.assertEqual(len(bs), 1)
- self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
+ assert len(bs) == 1
+ assert np.array_equal(bs[0].items, ['c', 'e'])
bs = list(self.fblock.split_block_at('c'))
- self.assertEqual(len(bs), 2)
- self.assertTrue(np.array_equal(bs[0].items, ['a']))
- self.assertTrue(np.array_equal(bs[1].items, ['e']))
+ assert len(bs) == 2
+ assert np.array_equal(bs[0].items, ['a'])
+ assert np.array_equal(bs[1].items, ['e'])
bs = list(self.fblock.split_block_at('e'))
- self.assertEqual(len(bs), 1)
- self.assertTrue(np.array_equal(bs[0].items, ['a', 'c']))
+ assert len(bs) == 1
+ assert np.array_equal(bs[0].items, ['a', 'c'])
# bblock = get_bool_ex(['f'])
# bs = list(bblock.split_block_at('f'))
- # self.assertEqual(len(bs), 0)
+ # assert len(bs), 0)
class TestDatetimeBlock(tm.TestCase):
@@ -303,50 +312,44 @@ def test_try_coerce_arg(self):
# coerce None
none_coerced = block._try_coerce_args(block.values, None)[2]
- self.assertTrue(pd.Timestamp(none_coerced) is pd.NaT)
+ assert pd.Timestamp(none_coerced) is pd.NaT
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(block.values, val)[2]
- self.assertEqual(np.int64, type(coerced))
- self.assertEqual(pd.Timestamp('2010-10-10'), pd.Timestamp(coerced))
-
+ assert np.int64 == type(coerced)
+ assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)
-class TestBlockManager(tm.TestCase):
- def setUp(self):
- self.mgr = create_mgr(
- 'a: f8; b: object; c: f8; d: object; e: f8;'
- 'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
- 'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
+class TestBlockManager(object):
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
- self.assertEqual(mgr.nblocks, 2)
- self.assertEqual(len(mgr), 6)
+ assert mgr.nblocks == 2
+ assert len(mgr) == 6
def test_is_mixed_dtype(self):
- self.assertFalse(create_mgr('a,b:f8').is_mixed_type)
- self.assertFalse(create_mgr('a:f8-1; b:f8-2').is_mixed_type)
+ assert not create_mgr('a,b:f8').is_mixed_type
+ assert not create_mgr('a:f8-1; b:f8-2').is_mixed_type
- self.assertTrue(create_mgr('a,b:f8; c,d: f4').is_mixed_type)
- self.assertTrue(create_mgr('a,b:f8; c,d: object').is_mixed_type)
+ assert create_mgr('a,b:f8; c,d: f4').is_mixed_type
+ assert create_mgr('a,b:f8; c,d: object').is_mixed_type
def test_is_indexed_like(self):
mgr1 = create_mgr('a,b: f8')
mgr2 = create_mgr('a:i8; b:bool')
mgr3 = create_mgr('a,b,c: f8')
- self.assertTrue(mgr1._is_indexed_like(mgr1))
- self.assertTrue(mgr1._is_indexed_like(mgr2))
- self.assertTrue(mgr1._is_indexed_like(mgr3))
+ assert mgr1._is_indexed_like(mgr1)
+ assert mgr1._is_indexed_like(mgr2)
+ assert mgr1._is_indexed_like(mgr3)
- self.assertFalse(mgr1._is_indexed_like(mgr1.get_slice(
- slice(-1), axis=1)))
+ assert not mgr1._is_indexed_like(mgr1.get_slice(
+ slice(-1), axis=1))
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
@@ -355,61 +358,63 @@ def test_duplicate_ref_loc_failure(self):
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
+
# test trying to create block manager with overlapping ref locs
- self.assertRaises(AssertionError, BlockManager, blocks, axes)
+ with pytest.raises(AssertionError):
+ BlockManager(blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
- def test_contains(self):
- self.assertIn('a', self.mgr)
- self.assertNotIn('baz', self.mgr)
+ def test_contains(self, mgr):
+ assert 'a' in mgr
+ assert 'baz' not in mgr
- def test_pickle(self):
+ def test_pickle(self, mgr):
- mgr2 = self.round_trip_pickle(self.mgr)
- assert_frame_equal(DataFrame(self.mgr), DataFrame(mgr2))
+ mgr2 = tm.round_trip_pickle(mgr)
+ assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
# share ref_items
# self.assertIs(mgr2.blocks[0].ref_items, mgr2.blocks[1].ref_items)
# GH2431
- self.assertTrue(hasattr(mgr2, "_is_consolidated"))
- self.assertTrue(hasattr(mgr2, "_known_consolidated"))
+ assert hasattr(mgr2, "_is_consolidated")
+ assert hasattr(mgr2, "_known_consolidated")
# reset to False on load
- self.assertFalse(mgr2._is_consolidated)
- self.assertFalse(mgr2._known_consolidated)
+ assert not mgr2._is_consolidated
+ assert not mgr2._known_consolidated
def test_non_unique_pickle(self):
mgr = create_mgr('a,a,a:f8')
- mgr2 = self.round_trip_pickle(mgr)
+ mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
- mgr2 = self.round_trip_pickle(mgr)
+ mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr('a: category')
- mgr2 = self.round_trip_pickle(mgr)
+ mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr('category')
- smgr2 = self.round_trip_pickle(smgr)
+ smgr2 = tm.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
- def test_get_scalar(self):
- for item in self.mgr.items:
- for i, index in enumerate(self.mgr.axes[1]):
- res = self.mgr.get_scalar((item, index))
- exp = self.mgr.get(item, fastpath=False)[i]
- self.assertEqual(res, exp)
- exp = self.mgr.get(item).internal_values()[i]
- self.assertEqual(res, exp)
+ def test_get_scalar(self, mgr):
+ for item in mgr.items:
+ for i, index in enumerate(mgr.axes[1]):
+ res = mgr.get_scalar((item, index))
+ exp = mgr.get(item, fastpath=False)[i]
+ assert res == exp
+ exp = mgr.get(item).internal_values()[i]
+ assert res == exp
def test_get(self):
cols = Index(list('abc'))
@@ -438,30 +443,21 @@ def test_set(self):
tm.assert_numpy_array_equal(mgr.get('d').internal_values(),
np.array(['foo'] * 3, dtype=np.object_))
- def test_insert(self):
- self.mgr.insert(0, 'inserted', np.arange(N))
-
- self.assertEqual(self.mgr.items[0], 'inserted')
- assert_almost_equal(self.mgr.get('inserted'), np.arange(N))
+ def test_set_change_dtype(self, mgr):
+ mgr.set('baz', np.zeros(N, dtype=bool))
- for blk in self.mgr.blocks:
- yield self.assertIs, self.mgr.items, blk.ref_items
+ mgr.set('baz', np.repeat('foo', N))
+ assert mgr.get('baz').dtype == np.object_
- def test_set_change_dtype(self):
- self.mgr.set('baz', np.zeros(N, dtype=bool))
-
- self.mgr.set('baz', np.repeat('foo', N))
- self.assertEqual(self.mgr.get('baz').dtype, np.object_)
-
- mgr2 = self.mgr.consolidate()
+ mgr2 = mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
- self.assertEqual(mgr2.get('baz').dtype, np.object_)
+ assert mgr2.get('baz').dtype == np.object_
mgr2.set('quux', randn(N).astype(int))
- self.assertEqual(mgr2.get('quux').dtype, np.int_)
+ assert mgr2.get('quux').dtype == np.int_
mgr2.set('quux', randn(N))
- self.assertEqual(mgr2.get('quux').dtype, np.float_)
+ assert mgr2.get('quux').dtype == np.float_
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')
@@ -469,70 +465,69 @@ def test_set_change_dtype_slice(self): # GH8850
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
- self.assertEqual(sorted(df.blocks.keys()), ['float64', 'int64'])
+ assert sorted(df.blocks.keys()) == ['float64', 'int64']
assert_frame_equal(df.blocks['float64'], DataFrame(
[[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
assert_frame_equal(df.blocks['int64'], DataFrame(
[[3], [6]], columns=cols[2:]))
- def test_copy(self):
- cp = self.mgr.copy(deep=False)
- for blk, cp_blk in zip(self.mgr.blocks, cp.blocks):
+ def test_copy(self, mgr):
+ cp = mgr.copy(deep=False)
+ for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# view assertion
- self.assertTrue(cp_blk.equals(blk))
- self.assertTrue(cp_blk.values.base is blk.values.base)
+ assert cp_blk.equals(blk)
+ assert cp_blk.values.base is blk.values.base
- cp = self.mgr.copy(deep=True)
- for blk, cp_blk in zip(self.mgr.blocks, cp.blocks):
+ cp = mgr.copy(deep=True)
+ for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
- self.assertTrue(cp_blk.equals(blk))
+ assert cp_blk.equals(blk)
if cp_blk.values.base is not None and blk.values.base is not None:
- self.assertFalse(cp_blk.values.base is blk.values.base)
+ assert cp_blk.values.base is not blk.values.base
else:
- self.assertTrue(cp_blk.values.base is None and blk.values.base
- is None)
+ assert cp_blk.values.base is None and blk.values.base is None
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
- self.assertEqual(mgr.as_matrix().dtype, np.float64)
+ assert mgr.as_matrix().dtype == np.float64
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
- self.assertEqual(len(mgr.blocks), 3)
- self.assertIsInstance(mgr, BlockManager)
+ assert len(mgr.blocks) == 3
+ assert isinstance(mgr, BlockManager)
# what to test here?
def test_as_matrix_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
- self.assertEqual(mgr.as_matrix().dtype, np.float64)
+ assert mgr.as_matrix().dtype == np.float64
mgr = create_mgr('c: f4; d: f2')
- self.assertEqual(mgr.as_matrix().dtype, np.float32)
+ assert mgr.as_matrix().dtype == np.float32
def test_as_matrix_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
- self.assertEqual(mgr.as_matrix().dtype, np.bool_)
+ assert mgr.as_matrix().dtype == np.bool_
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
- self.assertEqual(mgr.as_matrix().dtype, np.int64)
+ assert mgr.as_matrix().dtype == np.int64
mgr = create_mgr('c: i4; d: i2; e: u1')
- self.assertEqual(mgr.as_matrix().dtype, np.int32)
+ assert mgr.as_matrix().dtype == np.int32
def test_as_matrix_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
- self.assertEqual(mgr.as_matrix().dtype, 'M8[ns]')
+ assert mgr.as_matrix().dtype == 'M8[ns]'
def test_as_matrix_datetime_tz(self):
mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
- self.assertEqual(mgr.get('h').dtype, 'datetime64[ns, US/Eastern]')
- self.assertEqual(mgr.get('g').dtype, 'datetime64[ns, CET]')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.get('h').dtype == 'datetime64[ns, US/Eastern]'
+ assert mgr.get('g').dtype == 'datetime64[ns, CET]'
+ assert mgr.as_matrix().dtype == 'object'
def test_astype(self):
# coerce all
@@ -540,9 +535,9 @@ def test_astype(self):
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
- self.assertEqual(tmgr.get('c').dtype.type, t)
- self.assertEqual(tmgr.get('d').dtype.type, t)
- self.assertEqual(tmgr.get('e').dtype.type, t)
+ assert tmgr.get('c').dtype.type == t
+ assert tmgr.get('d').dtype.type == t
+ assert tmgr.get('e').dtype.type == t
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
@@ -550,24 +545,24 @@ def test_astype(self):
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, errors='ignore')
- self.assertEqual(tmgr.get('c').dtype.type, t)
- self.assertEqual(tmgr.get('e').dtype.type, t)
- self.assertEqual(tmgr.get('f').dtype.type, t)
- self.assertEqual(tmgr.get('g').dtype.type, t)
+ assert tmgr.get('c').dtype.type == t
+ assert tmgr.get('e').dtype.type == t
+ assert tmgr.get('f').dtype.type == t
+ assert tmgr.get('g').dtype.type == t
- self.assertEqual(tmgr.get('a').dtype.type, np.object_)
- self.assertEqual(tmgr.get('b').dtype.type, np.object_)
+ assert tmgr.get('a').dtype.type == np.object_
+ assert tmgr.get('b').dtype.type == np.object_
if t != np.int64:
- self.assertEqual(tmgr.get('d').dtype.type, np.datetime64)
+ assert tmgr.get('d').dtype.type == np.datetime64
else:
- self.assertEqual(tmgr.get('d').dtype.type, t)
+ assert tmgr.get('d').dtype.type == t
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
- self.assertEqual(len(old_blocks), len(new_blocks))
+ assert len(old_blocks) == len(new_blocks)
# compare non-numeric
for b in old_blocks:
@@ -576,7 +571,7 @@ def _compare(old_mgr, new_mgr):
if (b.values == nb.values).all():
found = True
break
- self.assertTrue(found)
+ assert found
for b in new_blocks:
found = False
@@ -584,7 +579,7 @@ def _compare(old_mgr, new_mgr):
if (b.values == ob.values).all():
found = True
break
- self.assertTrue(found)
+ assert found
# noops
mgr = create_mgr('f: i8; g: f8')
@@ -601,11 +596,11 @@ def _compare(old_mgr, new_mgr):
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
- self.assertEqual(new_mgr.get('a').dtype, np.int64)
- self.assertEqual(new_mgr.get('b').dtype, np.float64)
- self.assertEqual(new_mgr.get('foo').dtype, np.object_)
- self.assertEqual(new_mgr.get('f').dtype, np.int64)
- self.assertEqual(new_mgr.get('g').dtype, np.float64)
+ assert new_mgr.get('a').dtype == np.int64
+ assert new_mgr.get('b').dtype == np.float64
+ assert new_mgr.get('foo').dtype == np.object_
+ assert new_mgr.get('f').dtype == np.int64
+ assert new_mgr.get('g').dtype == np.float64
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
@@ -613,15 +608,15 @@ def _compare(old_mgr, new_mgr):
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
- self.assertEqual(new_mgr.get('a').dtype, np.int64)
- self.assertEqual(new_mgr.get('b').dtype, np.float64)
- self.assertEqual(new_mgr.get('foo').dtype, np.object_)
- self.assertEqual(new_mgr.get('f').dtype, np.int32)
- self.assertEqual(new_mgr.get('bool').dtype, np.bool_)
- self.assertEqual(new_mgr.get('dt').dtype.type, np.datetime64)
- self.assertEqual(new_mgr.get('i').dtype, np.int64)
- self.assertEqual(new_mgr.get('g').dtype, np.float64)
- self.assertEqual(new_mgr.get('h').dtype, np.float16)
+ assert new_mgr.get('a').dtype == np.int64
+ assert new_mgr.get('b').dtype == np.float64
+ assert new_mgr.get('foo').dtype == np.object_
+ assert new_mgr.get('f').dtype == np.int32
+ assert new_mgr.get('bool').dtype == np.bool_
+ assert new_mgr.get('dt').dtype.type, np.datetime64
+ assert new_mgr.get('i').dtype == np.int64
+ assert new_mgr.get('g').dtype == np.float64
+ assert new_mgr.get('h').dtype == np.float16
def test_interleave(self):
@@ -629,49 +624,49 @@ def test_interleave(self):
for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',
'm8[ns]']:
mgr = create_mgr('a: {0}'.format(dtype))
- self.assertEqual(mgr.as_matrix().dtype, dtype)
+ assert mgr.as_matrix().dtype == dtype
mgr = create_mgr('a: {0}; b: {0}'.format(dtype))
- self.assertEqual(mgr.as_matrix().dtype, dtype)
+ assert mgr.as_matrix().dtype == dtype
# will be converted according the actual dtype of the underlying
mgr = create_mgr('a: category')
- self.assertEqual(mgr.as_matrix().dtype, 'i8')
+ assert mgr.as_matrix().dtype == 'i8'
mgr = create_mgr('a: category; b: category')
- self.assertEqual(mgr.as_matrix().dtype, 'i8'),
+ assert mgr.as_matrix().dtype == 'i8'
mgr = create_mgr('a: category; b: category2')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: category2')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: category2; b: category2')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
# combinations
mgr = create_mgr('a: f8')
- self.assertEqual(mgr.as_matrix().dtype, 'f8')
+ assert mgr.as_matrix().dtype == 'f8'
mgr = create_mgr('a: f8; b: i8')
- self.assertEqual(mgr.as_matrix().dtype, 'f8')
+ assert mgr.as_matrix().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8')
- self.assertEqual(mgr.as_matrix().dtype, 'f8')
+ assert mgr.as_matrix().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8; d: object')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: bool; b: i8')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: complex')
- self.assertEqual(mgr.as_matrix().dtype, 'complex')
+ assert mgr.as_matrix().dtype == 'complex'
mgr = create_mgr('a: f8; b: category')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: category')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: bool')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: i8')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: bool')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: i8')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: m8[ns]')
- self.assertEqual(mgr.as_matrix().dtype, 'object')
+ assert mgr.as_matrix().dtype == 'object'
def test_interleave_non_unique_cols(self):
df = DataFrame([
@@ -682,26 +677,26 @@ def test_interleave_non_unique_cols(self):
df_unique = df.copy()
df_unique.columns = ['x', 'y']
- self.assertEqual(df_unique.values.shape, df.values.shape)
+ assert df_unique.values.shape == df.values.shape
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
- def test_consolidate_ordering_issues(self):
- self.mgr.set('f', randn(N))
- self.mgr.set('d', randn(N))
- self.mgr.set('b', randn(N))
- self.mgr.set('g', randn(N))
- self.mgr.set('h', randn(N))
-
- # we have datetime/tz blocks in self.mgr
- cons = self.mgr.consolidate()
- self.assertEqual(cons.nblocks, 4)
- cons = self.mgr.consolidate().get_numeric_data()
- self.assertEqual(cons.nblocks, 1)
- tm.assertIsInstance(cons.blocks[0].mgr_locs, lib.BlockPlacement)
+ def test_consolidate_ordering_issues(self, mgr):
+ mgr.set('f', randn(N))
+ mgr.set('d', randn(N))
+ mgr.set('b', randn(N))
+ mgr.set('g', randn(N))
+ mgr.set('h', randn(N))
+
+ # we have datetime/tz blocks in mgr
+ cons = mgr.consolidate()
+ assert cons.nblocks == 4
+ cons = mgr.consolidate().get_numeric_data()
+ assert cons.nblocks == 1
+ assert isinstance(cons.blocks[0].mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,
np.arange(len(cons.items), dtype=np.int64))
@@ -714,7 +709,7 @@ def test_reindex_items(self):
'f: bool; g: f8-2')
reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)
- self.assertEqual(reindexed.nblocks, 2)
+ assert reindexed.nblocks == 2
tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))
assert_almost_equal(
mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))
@@ -748,9 +743,9 @@ def test_multiindex_xs(self):
mgr.set_axis(1, index)
result = mgr.xs('bar', axis=1)
- self.assertEqual(result.shape, (6, 2))
- self.assertEqual(result.axes[1][0], ('bar', 'one'))
- self.assertEqual(result.axes[1][1], ('bar', 'two'))
+ assert result.shape == (6, 2)
+ assert result.axes[1][0] == ('bar', 'one')
+ assert result.axes[1][1] == ('bar', 'two')
def test_get_numeric_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
@@ -826,11 +821,11 @@ def test_equals(self):
# unique items
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
- self.assertTrue(bm1.equals(bm2))
+ assert bm1.equals(bm2)
bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
- self.assertTrue(bm1.equals(bm2))
+ assert bm1.equals(bm2)
def test_equals_block_order_different_dtypes(self):
# GH 9330
@@ -848,19 +843,19 @@ def test_equals_block_order_different_dtypes(self):
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
- self.assertTrue(bm.equals(bm_this))
- self.assertTrue(bm_this.equals(bm))
+ assert bm.equals(bm_this)
+ assert bm_this.equals(bm)
def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
- self.assertEqual(mgr.as_matrix().tolist(), [0., 1., 2., 3., 4.])
+ assert mgr.as_matrix().tolist() == [0., 1., 2., 3., 4.]
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
for value in invalid_values:
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
bm1.replace_list([1], [2], inplace=value)
@@ -918,32 +913,37 @@ def assert_slice_ok(mgr, axis, slobj):
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
- yield assert_slice_ok, mgr, ax, slice(None)
- yield assert_slice_ok, mgr, ax, slice(3)
- yield assert_slice_ok, mgr, ax, slice(100)
- yield assert_slice_ok, mgr, ax, slice(1, 4)
- yield assert_slice_ok, mgr, ax, slice(3, 0, -2)
+ assert_slice_ok(mgr, ax, slice(None))
+ assert_slice_ok(mgr, ax, slice(3))
+ assert_slice_ok(mgr, ax, slice(100))
+ assert_slice_ok(mgr, ax, slice(1, 4))
+ assert_slice_ok(mgr, ax, slice(3, 0, -2))
# boolean mask
- yield assert_slice_ok, mgr, ax, np.array([], dtype=np.bool_)
- yield (assert_slice_ok, mgr, ax,
- np.ones(mgr.shape[ax], dtype=np.bool_))
- yield (assert_slice_ok, mgr, ax,
- np.zeros(mgr.shape[ax], dtype=np.bool_))
+ assert_slice_ok(
+ mgr, ax, np.array([], dtype=np.bool_))
+ assert_slice_ok(
+ mgr, ax,
+ np.ones(mgr.shape[ax], dtype=np.bool_))
+ assert_slice_ok(
+ mgr, ax,
+ np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
- yield (assert_slice_ok, mgr, ax,
- np.arange(mgr.shape[ax]) % 3 == 0)
- yield (assert_slice_ok, mgr, ax, np.array(
- [True, True, False], dtype=np.bool_))
+ assert_slice_ok(
+ mgr, ax,
+ np.arange(mgr.shape[ax]) % 3 == 0)
+ assert_slice_ok(
+ mgr, ax, np.array(
+ [True, True, False], dtype=np.bool_))
# fancy indexer
- yield assert_slice_ok, mgr, ax, []
- yield assert_slice_ok, mgr, ax, lrange(mgr.shape[ax])
+ assert_slice_ok(mgr, ax, [])
+ assert_slice_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
- yield assert_slice_ok, mgr, ax, [0, 1, 2]
- yield assert_slice_ok, mgr, ax, [-1, -2, -3]
+ assert_slice_ok(mgr, ax, [0, 1, 2])
+ assert_slice_ok(mgr, ax, [-1, -2, -3])
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
@@ -957,13 +957,13 @@ def assert_take_ok(mgr, axis, indexer):
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
- yield assert_take_ok, mgr, ax, []
- yield assert_take_ok, mgr, ax, [0, 0, 0]
- yield assert_take_ok, mgr, ax, lrange(mgr.shape[ax])
+ assert_take_ok(mgr, ax, [])
+ assert_take_ok(mgr, ax, [0, 0, 0])
+ assert_take_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
- yield assert_take_ok, mgr, ax, [0, 1, 2]
- yield assert_take_ok, mgr, ax, [-1, -2, -3]
+ assert_take_ok(mgr, ax, [0, 1, 2])
+ assert_take_ok(mgr, ax, [-1, -2, -3])
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
@@ -981,25 +981,33 @@ def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
- yield (assert_reindex_axis_is_ok, mgr, ax,
- pd.Index([]), fill_value)
- yield (assert_reindex_axis_is_ok, mgr, ax, mgr.axes[ax],
- fill_value)
- yield (assert_reindex_axis_is_ok, mgr, ax,
- mgr.axes[ax][[0, 0, 0]], fill_value)
- yield (assert_reindex_axis_is_ok, mgr, ax,
- pd.Index(['foo', 'bar', 'baz']), fill_value)
- yield (assert_reindex_axis_is_ok, mgr, ax,
- pd.Index(['foo', mgr.axes[ax][0], 'baz']),
- fill_value)
+ assert_reindex_axis_is_ok(
+ mgr, ax,
+ pd.Index([]), fill_value)
+ assert_reindex_axis_is_ok(
+ mgr, ax, mgr.axes[ax],
+ fill_value)
+ assert_reindex_axis_is_ok(
+ mgr, ax,
+ mgr.axes[ax][[0, 0, 0]], fill_value)
+ assert_reindex_axis_is_ok(
+ mgr, ax,
+ pd.Index(['foo', 'bar', 'baz']), fill_value)
+ assert_reindex_axis_is_ok(
+ mgr, ax,
+ pd.Index(['foo', mgr.axes[ax][0], 'baz']),
+ fill_value)
if mgr.shape[ax] >= 3:
- yield (assert_reindex_axis_is_ok, mgr, ax,
- mgr.axes[ax][:-3], fill_value)
- yield (assert_reindex_axis_is_ok, mgr, ax,
- mgr.axes[ax][-3::-1], fill_value)
- yield (assert_reindex_axis_is_ok, mgr, ax,
- mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
+ assert_reindex_axis_is_ok(
+ mgr, ax,
+ mgr.axes[ax][:-3], fill_value)
+ assert_reindex_axis_is_ok(
+ mgr, ax,
+ mgr.axes[ax][-3::-1], fill_value)
+ assert_reindex_axis_is_ok(
+ mgr, ax,
+ mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
def test_reindex_indexer(self):
@@ -1018,33 +1026,41 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
- yield (assert_reindex_indexer_is_ok, mgr, ax,
- pd.Index([]), [], fill_value)
- yield (assert_reindex_indexer_is_ok, mgr, ax,
- mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
- yield (assert_reindex_indexer_is_ok, mgr, ax,
- pd.Index(['foo'] * mgr.shape[ax]),
- np.arange(mgr.shape[ax]), fill_value)
-
- yield (assert_reindex_indexer_is_ok, mgr, ax,
- mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
- fill_value)
- yield (assert_reindex_indexer_is_ok, mgr, ax, mgr.axes[ax],
- np.arange(mgr.shape[ax])[::-1], fill_value)
- yield (assert_reindex_indexer_is_ok, mgr, ax,
- pd.Index(['foo', 'bar', 'baz']),
- [0, 0, 0], fill_value)
- yield (assert_reindex_indexer_is_ok, mgr, ax,
- pd.Index(['foo', 'bar', 'baz']),
- [-1, 0, -1], fill_value)
- yield (assert_reindex_indexer_is_ok, mgr, ax,
- pd.Index(['foo', mgr.axes[ax][0], 'baz']),
- [-1, -1, -1], fill_value)
+ assert_reindex_indexer_is_ok(
+ mgr, ax,
+ pd.Index([]), [], fill_value)
+ assert_reindex_indexer_is_ok(
+ mgr, ax,
+ mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
+ assert_reindex_indexer_is_ok(
+ mgr, ax,
+ pd.Index(['foo'] * mgr.shape[ax]),
+ np.arange(mgr.shape[ax]), fill_value)
+ assert_reindex_indexer_is_ok(
+ mgr, ax,
+ mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
+ fill_value)
+ assert_reindex_indexer_is_ok(
+ mgr, ax, mgr.axes[ax],
+ np.arange(mgr.shape[ax])[::-1], fill_value)
+ assert_reindex_indexer_is_ok(
+ mgr, ax,
+ pd.Index(['foo', 'bar', 'baz']),
+ [0, 0, 0], fill_value)
+ assert_reindex_indexer_is_ok(
+ mgr, ax,
+ pd.Index(['foo', 'bar', 'baz']),
+ [-1, 0, -1], fill_value)
+ assert_reindex_indexer_is_ok(
+ mgr, ax,
+ pd.Index(['foo', mgr.axes[ax][0], 'baz']),
+ [-1, -1, -1], fill_value)
if mgr.shape[ax] >= 3:
- yield (assert_reindex_indexer_is_ok, mgr, ax,
- pd.Index(['foo', 'bar', 'baz']),
- [0, 1, 2], fill_value)
+ assert_reindex_indexer_is_ok(
+ mgr, ax,
+ pd.Index(['foo', 'bar', 'baz']),
+ [0, 1, 2], fill_value)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
@@ -1055,21 +1071,23 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
class TestBlockPlacement(tm.TestCase):
def test_slice_len(self):
- self.assertEqual(len(BlockPlacement(slice(0, 4))), 4)
- self.assertEqual(len(BlockPlacement(slice(0, 4, 2))), 2)
- self.assertEqual(len(BlockPlacement(slice(0, 3, 2))), 2)
+ assert len(BlockPlacement(slice(0, 4))) == 4
+ assert len(BlockPlacement(slice(0, 4, 2))) == 2
+ assert len(BlockPlacement(slice(0, 3, 2))) == 2
- self.assertEqual(len(BlockPlacement(slice(0, 1, 2))), 1)
- self.assertEqual(len(BlockPlacement(slice(1, 0, -1))), 1)
+ assert len(BlockPlacement(slice(0, 1, 2))) == 1
+ assert len(BlockPlacement(slice(1, 0, -1))) == 1
def test_zero_step_raises(self):
- self.assertRaises(ValueError, BlockPlacement, slice(1, 1, 0))
- self.assertRaises(ValueError, BlockPlacement, slice(1, 2, 0))
+ with pytest.raises(ValueError):
+ BlockPlacement(slice(1, 1, 0))
+ with pytest.raises(ValueError):
+ BlockPlacement(slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
- self.assertRaisesRegexp(ValueError, "unbounded slice",
- lambda: BlockPlacement(slc))
+ tm.assertRaisesRegexp(ValueError, "unbounded slice",
+ lambda: BlockPlacement(slc))
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
@@ -1087,7 +1105,7 @@ def assert_unbounded_slice_error(slc):
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
- self.assertTrue(not BlockPlacement(slc).is_slice_like)
+ assert not BlockPlacement(slc).is_slice_like
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
@@ -1095,12 +1113,12 @@ def assert_not_slice_like(slc):
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
- self.assertTrue(not BlockPlacement(slice(0, 0)).is_slice_like)
- self.assertTrue(not BlockPlacement(slice(100, 100)).is_slice_like)
+ assert not BlockPlacement(slice(0, 0)).is_slice_like
+ assert not BlockPlacement(slice(100, 100)).is_slice_like
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
- self.assertEqual(BlockPlacement(arr).as_slice, slc)
+ assert BlockPlacement(arr).as_slice == slc
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
@@ -1115,7 +1133,7 @@ def assert_as_slice_equals(arr, slc):
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
- self.assertTrue(not BlockPlacement(arr).is_slice_like)
+ assert not BlockPlacement(arr).is_slice_like
assert_not_slice_like([])
assert_not_slice_like([-1])
@@ -1128,13 +1146,12 @@ def assert_not_slice_like(arr):
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
- self.assertEqual(list(BlockPlacement(slice(0, 3))), [0, 1, 2])
- self.assertEqual(list(BlockPlacement(slice(0, 0))), [])
- self.assertEqual(list(BlockPlacement(slice(3, 0))), [])
+ assert list(BlockPlacement(slice(0, 3))) == [0, 1, 2]
+ assert list(BlockPlacement(slice(0, 0))) == []
+ assert list(BlockPlacement(slice(3, 0))) == []
- self.assertEqual(list(BlockPlacement(slice(3, 0, -1))), [3, 2, 1])
- self.assertEqual(list(BlockPlacement(slice(3, None, -1))),
- [3, 2, 1, 0])
+ assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]
+ assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
@@ -1152,13 +1169,13 @@ def assert_as_array_equals(slc, asarray):
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
- self.assertEqual(bpl.add(1).as_slice, slice(1, 6, 1))
- self.assertEqual(bpl.add(np.arange(5)).as_slice, slice(0, 10, 2))
- self.assertEqual(list(bpl.add(np.arange(5, 0, -1))), [5, 5, 5, 5, 5])
+ assert bpl.add(1).as_slice == slice(1, 6, 1)
+ assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)
+ assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
- self.assertEqual(list(BlockPlacement(val).add(inc)), result)
+ assert list(BlockPlacement(val).add(inc)) == result
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
@@ -1177,9 +1194,9 @@ def assert_add_equals(val, inc, result):
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
- self.assertRaises(ValueError,
- lambda: BlockPlacement(slice(1, 4)).add(-10))
- self.assertRaises(ValueError,
- lambda: BlockPlacement([1, 2, 4]).add(-10))
- self.assertRaises(ValueError,
- lambda: BlockPlacement(slice(2, None, -1)).add(-1))
+ with pytest.raises(ValueError):
+ BlockPlacement(slice(1, 4)).add(-10)
+ with pytest.raises(ValueError):
+ BlockPlacement([1, 2, 4]).add(-10)
+ with pytest.raises(ValueError):
+ BlockPlacement(slice(2, None, -1)).add(-1)
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 3f2973a9834ca..fe03d7886e661 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -646,7 +646,7 @@ def test_dtypes(self):
f = self.funcs[f_name]
d = self.data[d_name]
exp = self.expects[d_name][f_name]
- yield self.check_dtypes, f, f_name, d, d_name, exp
+ self.check_dtypes(f, f_name, d, d_name, exp)
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 154476ce8340a..cf76f4ead77e3 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -93,11 +93,7 @@ def reset_display_options(self):
pd.reset_option('^display.', silent=True)
def round_trip_pickle(self, obj, path=None):
- if path is None:
- path = u('__%s__.pickle' % rands(10))
- with ensure_clean(path) as path:
- pd.to_pickle(obj, path)
- return pd.read_pickle(path)
+ return round_trip_pickle(obj, path=path)
# https://docs.python.org/3/library/unittest.html#deprecated-aliases
def assertEquals(self, *args, **kwargs):
@@ -121,6 +117,14 @@ def assertNotAlmostEquals(self, *args, **kwargs):
self.assertNotAlmostEqual)(*args, **kwargs)
+def round_trip_pickle(obj, path=None):
+ if path is None:
+ path = u('__%s__.pickle' % rands(10))
+ with ensure_clean(path) as path:
+ pd.to_pickle(obj, path)
+ return pd.read_pickle(path)
+
+
def assert_almost_equal(left, right, check_exact=False,
check_dtype='equiv', check_less_precise=False,
**kwargs):
diff --git a/setup.cfg b/setup.cfg
index 8de4fc955bd50..8b32f0f62fe28 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -24,7 +24,6 @@ split_penalty_logical_operator = 30
[tool:pytest]
# TODO: Change all yield-based (nose-style) fixutures to pytest fixtures
# Silencing the warning until then
-addopts = --disable-pytest-warnings
testpaths = pandas
markers =
single: mark a test as single cpu only
| xref https://github.com/pandas-dev/pandas/issues/15341 | https://api.github.com/repos/pandas-dev/pandas/pulls/15708 | 2017-03-17T00:41:09Z | 2017-03-17T14:08:49Z | 2017-03-17T14:08:49Z | 2017-03-17T15:03:20Z |
TST: move pandas/tests/io/test_date_converters.py to pandas/tests/io/parsers/parse_dates.py | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index a56212328f5c3..6791b52836cbe 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -727,6 +727,7 @@ Other API Changes
- ``Series.sort_values()`` accepts a one element list of bool for consistency with the behavior of ``DataFrame.sort_values()`` (:issue:`15604`)
- ``.merge()`` and ``.join()`` on ``category`` dtype columns will now preserve the category dtype when possible (:issue:`10409`)
- ``SparseDataFrame.default_fill_value`` will be 0, previously was ``nan`` in the return from ``pd.get_dummies(..., sparse=True)`` (:issue:`15594`)
+- Reorganization of date converter test to parse test (:issue:`15707`)
.. _whatsnew_0200.deprecations:
diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py
index 4cba9276a9d1e..de4e3fbc0d943 100644
--- a/pandas/tests/io/parser/parse_dates.py
+++ b/pandas/tests/io/parser/parse_dates.py
@@ -6,7 +6,7 @@
"""
from distutils.version import LooseVersion
-from datetime import datetime
+from datetime import datetime, date
import pytest
import numpy as np
@@ -19,9 +19,10 @@
import pandas.util.testing as tm
import pandas.io.date_converters as conv
-from pandas import DataFrame, Series, Index, DatetimeIndex
+from pandas import DataFrame, Series, Index, DatetimeIndex, MultiIndex
from pandas import compat
from pandas.compat import parse_date, StringIO, lrange
+from pandas.compat.numpy import np_array_datetime64_compat
from pandas.tseries.index import date_range
@@ -510,3 +511,146 @@ def test_parse_date_time_multi_level_column_name(self):
expected = DataFrame(expected_data,
columns=['date_time', ('A', 'a'), ('B', 'b')])
tm.assert_frame_equal(result, expected)
+
+ def test_parse_date_time(self):
+ dates = np.array(['2007/1/3', '2008/2/4'], dtype=object)
+ times = np.array(['05:07:09', '06:08:00'], dtype=object)
+ expected = np.array([datetime(2007, 1, 3, 5, 7, 9),
+ datetime(2008, 2, 4, 6, 8, 0)])
+
+ result = conv.parse_date_time(dates, times)
+ self.assertTrue((result == expected).all())
+
+ data = """\
+date, time, a, b
+2001-01-05, 10:00:00, 0.0, 10.
+2001-01-05, 00:00:00, 1., 11.
+"""
+ datecols = {'date_time': [0, 1]}
+ df = self.read_csv(StringIO(data), sep=',', header=0,
+ parse_dates=datecols,
+ date_parser=conv.parse_date_time)
+ self.assertIn('date_time', df)
+ self.assertEqual(df.date_time.loc[0], datetime(2001, 1, 5, 10, 0, 0))
+
+ data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
+ "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
+ "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
+ "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
+ "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
+ "KORD,19990127, 23:00:00, 22:56:00, -0.5900")
+
+ date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
+ df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
+ date_parser=conv.parse_date_time)
+
+ def test_parse_date_fields(self):
+ years = np.array([2007, 2008])
+ months = np.array([1, 2])
+ days = np.array([3, 4])
+ result = conv.parse_date_fields(years, months, days)
+ expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)])
+ self.assertTrue((result == expected).all())
+
+ data = ("year, month, day, a\n 2001 , 01 , 10 , 10.\n"
+ "2001 , 02 , 1 , 11.")
+ datecols = {'ymd': [0, 1, 2]}
+ df = self.read_csv(StringIO(data), sep=',', header=0,
+ parse_dates=datecols,
+ date_parser=conv.parse_date_fields)
+ self.assertIn('ymd', df)
+ self.assertEqual(df.ymd.loc[0], datetime(2001, 1, 10))
+
+ def test_datetime_six_col(self):
+ years = np.array([2007, 2008])
+ months = np.array([1, 2])
+ days = np.array([3, 4])
+ hours = np.array([5, 6])
+ minutes = np.array([7, 8])
+ seconds = np.array([9, 0])
+ expected = np.array([datetime(2007, 1, 3, 5, 7, 9),
+ datetime(2008, 2, 4, 6, 8, 0)])
+
+ result = conv.parse_all_fields(years, months, days,
+ hours, minutes, seconds)
+
+ self.assertTrue((result == expected).all())
+
+ data = """\
+year, month, day, hour, minute, second, a, b
+2001, 01, 05, 10, 00, 0, 0.0, 10.
+2001, 01, 5, 10, 0, 00, 1., 11.
+"""
+ datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]}
+ df = self.read_csv(StringIO(data), sep=',', header=0,
+ parse_dates=datecols,
+ date_parser=conv.parse_all_fields)
+ self.assertIn('ymdHMS', df)
+ self.assertEqual(df.ymdHMS.loc[0], datetime(2001, 1, 5, 10, 0, 0))
+
+ def test_datetime_fractional_seconds(self):
+ data = """\
+year, month, day, hour, minute, second, a, b
+2001, 01, 05, 10, 00, 0.123456, 0.0, 10.
+2001, 01, 5, 10, 0, 0.500000, 1., 11.
+"""
+ datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]}
+ df = self.read_csv(StringIO(data), sep=',', header=0,
+ parse_dates=datecols,
+ date_parser=conv.parse_all_fields)
+ self.assertIn('ymdHMS', df)
+ self.assertEqual(df.ymdHMS.loc[0], datetime(2001, 1, 5, 10, 0, 0,
+ microsecond=123456))
+ self.assertEqual(df.ymdHMS.loc[1], datetime(2001, 1, 5, 10, 0, 0,
+ microsecond=500000))
+
+ def test_generic(self):
+ data = "year, month, day, a\n 2001, 01, 10, 10.\n 2001, 02, 1, 11."
+ datecols = {'ym': [0, 1]}
+ dateconverter = lambda y, m: date(year=int(y), month=int(m), day=1)
+ df = self.read_csv(StringIO(data), sep=',', header=0,
+ parse_dates=datecols,
+ date_parser=dateconverter)
+ self.assertIn('ym', df)
+ self.assertEqual(df.ym.loc[0], date(2001, 1, 1))
+
+ def test_dateparser_resolution_if_not_ns(self):
+ # GH 10245
+ data = """\
+date,time,prn,rxstatus
+2013-11-03,19:00:00,126,00E80000
+2013-11-03,19:00:00,23,00E80000
+2013-11-03,19:00:00,13,00E80000
+"""
+
+ def date_parser(date, time):
+ datetime = np_array_datetime64_compat(
+ date + 'T' + time + 'Z', dtype='datetime64[s]')
+ return datetime
+
+ df = self.read_csv(StringIO(data), date_parser=date_parser,
+ parse_dates={'datetime': ['date', 'time']},
+ index_col=['datetime', 'prn'])
+
+ datetimes = np_array_datetime64_compat(['2013-11-03T19:00:00Z'] * 3,
+ dtype='datetime64[s]')
+ df_correct = DataFrame(data={'rxstatus': ['00E80000'] * 3},
+ index=MultiIndex.from_tuples(
+ [(datetimes[0], 126),
+ (datetimes[1], 23),
+ (datetimes[2], 13)],
+ names=['datetime', 'prn']))
+ tm.assert_frame_equal(df, df_correct)
+
+ def test_parse_date_column_with_empty_string(self):
+ # GH 6428
+ data = """case,opdate
+ 7,10/18/2006
+ 7,10/18/2008
+ 621, """
+ result = self.read_csv(StringIO(data), parse_dates=['opdate'])
+ expected_data = [[7, '10/18/2006'],
+ [7, '10/18/2008'],
+ [621, ' ']]
+ expected = DataFrame(expected_data, columns=['case', 'opdate'])
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/test_date_converters.py b/pandas/tests/io/test_date_converters.py
deleted file mode 100644
index 5b54925c65fbd..0000000000000
--- a/pandas/tests/io/test_date_converters.py
+++ /dev/null
@@ -1,150 +0,0 @@
-from pandas.compat import StringIO
-from datetime import date, datetime
-
-import numpy as np
-
-from pandas import DataFrame, MultiIndex
-from pandas.io.parsers import (read_csv, read_table)
-from pandas.util.testing import assert_frame_equal
-import pandas.io.date_converters as conv
-import pandas.util.testing as tm
-from pandas.compat.numpy import np_array_datetime64_compat
-
-
-class TestConverters(tm.TestCase):
-
- def setUp(self):
- self.years = np.array([2007, 2008])
- self.months = np.array([1, 2])
- self.days = np.array([3, 4])
- self.hours = np.array([5, 6])
- self.minutes = np.array([7, 8])
- self.seconds = np.array([9, 0])
- self.dates = np.array(['2007/1/3', '2008/2/4'], dtype=object)
- self.times = np.array(['05:07:09', '06:08:00'], dtype=object)
- self.expected = np.array([datetime(2007, 1, 3, 5, 7, 9),
- datetime(2008, 2, 4, 6, 8, 0)])
-
- def test_parse_date_time(self):
- result = conv.parse_date_time(self.dates, self.times)
- self.assertTrue((result == self.expected).all())
-
- data = """\
-date, time, a, b
-2001-01-05, 10:00:00, 0.0, 10.
-2001-01-05, 00:00:00, 1., 11.
-"""
- datecols = {'date_time': [0, 1]}
- df = read_table(StringIO(data), sep=',', header=0,
- parse_dates=datecols, date_parser=conv.parse_date_time)
- self.assertIn('date_time', df)
- self.assertEqual(df.date_time.loc[0], datetime(2001, 1, 5, 10, 0, 0))
-
- data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
- "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
- "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
- "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
- "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
- "KORD,19990127, 23:00:00, 22:56:00, -0.5900")
-
- date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
- df = read_csv(StringIO(data), header=None, parse_dates=date_spec,
- date_parser=conv.parse_date_time)
-
- def test_parse_date_fields(self):
- result = conv.parse_date_fields(self.years, self.months, self.days)
- expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)])
- self.assertTrue((result == expected).all())
-
- data = ("year, month, day, a\n 2001 , 01 , 10 , 10.\n"
- "2001 , 02 , 1 , 11.")
- datecols = {'ymd': [0, 1, 2]}
- df = read_table(StringIO(data), sep=',', header=0,
- parse_dates=datecols,
- date_parser=conv.parse_date_fields)
- self.assertIn('ymd', df)
- self.assertEqual(df.ymd.loc[0], datetime(2001, 1, 10))
-
- def test_datetime_six_col(self):
- result = conv.parse_all_fields(self.years, self.months, self.days,
- self.hours, self.minutes, self.seconds)
- self.assertTrue((result == self.expected).all())
-
- data = """\
-year, month, day, hour, minute, second, a, b
-2001, 01, 05, 10, 00, 0, 0.0, 10.
-2001, 01, 5, 10, 0, 00, 1., 11.
-"""
- datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]}
- df = read_table(StringIO(data), sep=',', header=0,
- parse_dates=datecols,
- date_parser=conv.parse_all_fields)
- self.assertIn('ymdHMS', df)
- self.assertEqual(df.ymdHMS.loc[0], datetime(2001, 1, 5, 10, 0, 0))
-
- def test_datetime_fractional_seconds(self):
- data = """\
-year, month, day, hour, minute, second, a, b
-2001, 01, 05, 10, 00, 0.123456, 0.0, 10.
-2001, 01, 5, 10, 0, 0.500000, 1., 11.
-"""
- datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]}
- df = read_table(StringIO(data), sep=',', header=0,
- parse_dates=datecols,
- date_parser=conv.parse_all_fields)
- self.assertIn('ymdHMS', df)
- self.assertEqual(df.ymdHMS.loc[0], datetime(2001, 1, 5, 10, 0, 0,
- microsecond=123456))
- self.assertEqual(df.ymdHMS.loc[1], datetime(2001, 1, 5, 10, 0, 0,
- microsecond=500000))
-
- def test_generic(self):
- data = "year, month, day, a\n 2001, 01, 10, 10.\n 2001, 02, 1, 11."
- datecols = {'ym': [0, 1]}
- dateconverter = lambda y, m: date(year=int(y), month=int(m), day=1)
- df = read_table(StringIO(data), sep=',', header=0,
- parse_dates=datecols,
- date_parser=dateconverter)
- self.assertIn('ym', df)
- self.assertEqual(df.ym.loc[0], date(2001, 1, 1))
-
- def test_dateparser_resolution_if_not_ns(self):
- # issue 10245
- data = """\
-date,time,prn,rxstatus
-2013-11-03,19:00:00,126,00E80000
-2013-11-03,19:00:00,23,00E80000
-2013-11-03,19:00:00,13,00E80000
-"""
-
- def date_parser(date, time):
- datetime = np_array_datetime64_compat(
- date + 'T' + time + 'Z', dtype='datetime64[s]')
- return datetime
-
- df = read_csv(StringIO(data), date_parser=date_parser,
- parse_dates={'datetime': ['date', 'time']},
- index_col=['datetime', 'prn'])
-
- datetimes = np_array_datetime64_compat(['2013-11-03T19:00:00Z'] * 3,
- dtype='datetime64[s]')
- df_correct = DataFrame(data={'rxstatus': ['00E80000'] * 3},
- index=MultiIndex.from_tuples(
- [(datetimes[0], 126),
- (datetimes[1], 23),
- (datetimes[2], 13)],
- names=['datetime', 'prn']))
- assert_frame_equal(df, df_correct)
-
- def test_parse_date_column_with_empty_string(self):
- # GH 6428
- data = """case,opdate
- 7,10/18/2006
- 7,10/18/2008
- 621, """
- result = read_csv(StringIO(data), parse_dates=['opdate'])
- expected_data = [[7, '10/18/2006'],
- [7, '10/18/2008'],
- [621, ' ']]
- expected = DataFrame(expected_data, columns=['case', 'opdate'])
- assert_frame_equal(result, expected)
| - [x] closes #15519
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
How can I run tests on `parse_dates.py`?
I am getting `ValueError: no option named 'only_slow'` with `item.config.getoption("--only-slow"):` | https://api.github.com/repos/pandas-dev/pandas/pulls/15707 | 2017-03-16T23:30:51Z | 2017-03-18T02:00:51Z | null | 2017-03-20T21:21:46Z |
TST: don't catch, but supress warnings in panel4d/panelnd | diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index c1e5904693d1c..af51c7f2e2dc1 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -550,8 +550,8 @@ def _validate_categories(cls, categories, fastpath=False):
# we don't allow NaNs in the categories themselves
if categories.hasnans:
- # NaNs in cats deprecated in 0.17,
- # remove in 0.18 or 0.19 GH 10748
+ # NaNs in cats deprecated in 0.17
+ # GH 10748
msg = ('\nSetting NaNs in `categories` is deprecated and '
'will be removed in a future version of pandas.')
warn(msg, FutureWarning, stacklevel=3)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 72efc47a3c744..b3b253f151541 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2094,7 +2094,17 @@ def convert(self, values, nan_rep, encoding):
# we have a categorical
categories = self.metadata
- self.data = Categorical.from_codes(self.data.ravel(),
+ codes = self.data.ravel()
+
+ # if we have stored a NaN in the categories
+ # then strip it; in theory we could have BOTH
+ # -1s in the codes and nulls :<
+ mask = isnull(categories)
+ if mask.any():
+ categories = categories[~mask]
+ codes[codes != -1] -= mask.astype(int).cumsum().values
+
+ self.data = Categorical.from_codes(codes,
categories=categories,
ordered=self.ordered)
@@ -3404,10 +3414,12 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
- if append_axis != exist_axis:
+ if not array_equivalent(np.array(append_axis),
+ np.array(exist_axis)):
# ahah! -> reindex
- if sorted(append_axis) == sorted(exist_axis):
+ if array_equivalent(np.array(sorted(append_axis)),
+ np.array(sorted(exist_axis))):
append_axis = exist_axis
# the non_index_axes info
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 5592c564e51df..b476096d9c27c 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -1,11 +1,12 @@
import pytest
import sys
import os
-import warnings
+from warnings import catch_warnings
import tempfile
from contextlib import contextmanager
import datetime
+from datetime import timedelta
import numpy as np
import pandas
@@ -22,7 +23,7 @@
from pandas.io.pytables import TableIterator
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
- AttributeConflictWarning, DuplicateWarning,
+ AttributeConflictWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
@@ -31,7 +32,6 @@
assert_panel_equal,
assert_frame_equal,
assert_series_equal,
- assert_produces_warning,
set_timezone)
from pandas import concat, Timestamp
from pandas import compat
@@ -123,17 +123,6 @@ def _maybe_remove(store, key):
pass
-@contextmanager
-def compat_assert_produces_warning(w):
- """ don't produce a warning under PY3 """
- if compat.PY3:
- yield
- else:
- with tm.assert_produces_warning(expected_warning=w,
- check_stacklevel=False):
- yield
-
-
class Base(tm.TestCase):
@classmethod
@@ -151,8 +140,6 @@ def tearDownClass(cls):
tm.set_testing_mode()
def setUp(self):
- warnings.filterwarnings(action='ignore', category=FutureWarning)
-
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def tearDown(self):
@@ -420,9 +407,9 @@ def test_repr(self):
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
- warnings.filterwarnings('ignore', category=PerformanceWarning)
- store['df'] = df
- warnings.filterwarnings('always', category=PerformanceWarning)
+ # PerformanceWarning
+ with catch_warnings(record=True):
+ store['df'] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, 'bah')
@@ -455,9 +442,9 @@ def test_contains(self):
self.assertNotIn('bar', store)
# GH 2694
- warnings.filterwarnings(
- 'ignore', category=tables.NaturalNameWarning)
- store['node())'] = tm.makeDataFrame()
+ # tables.NaturalNameWarning
+ with catch_warnings(record=True):
+ store['node())'] = tm.makeDataFrame()
self.assertIn('node())', store)
def test_versioning(self):
@@ -767,11 +754,8 @@ def test_put_mixed_type(self):
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
- # cannot use assert_produces_warning here for some reason
- # a PendingDeprecationWarning is also raised?
- warnings.filterwarnings('ignore', category=PerformanceWarning)
- store.put('df', df)
- warnings.filterwarnings('always', category=PerformanceWarning)
+ with catch_warnings(record=True):
+ store.put('df', df)
expected = store.get('df')
tm.assert_frame_equal(expected, df)
@@ -796,8 +780,8 @@ def test_append(self):
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
- with tm.assert_produces_warning(
- expected_warning=tables.NaturalNameWarning):
+ # tables.NaturalNameWarning):
+ with catch_warnings(record=True):
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
@@ -811,8 +795,7 @@ def test_append(self):
assert_panel_equal(store['wp1'], wp)
# ndim
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.iloc[:, :, :10, :])
@@ -900,12 +883,12 @@ def test_append_series(self):
# select on the values
expected = ns[ns > 60]
- result = store.select('ns', Term('foo>60'))
+ result = store.select('ns', 'foo>60')
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
- result = store.select('ns', [Term('foo>70'), Term('index<90')])
+ result = store.select('ns', 'foo>70 and index<90')
tm.assert_series_equal(result, expected)
# multi-index
@@ -1227,7 +1210,7 @@ def test_append_with_different_block_ordering(self):
def test_ndim_indexables(self):
# test using ndim tables in new ways
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
with ensure_clean_store(self.path) as store:
p4d = tm.makePanel4D()
@@ -1887,8 +1870,7 @@ def test_append_misc(self):
with ensure_clean_store(self.path) as store:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
# unsuported data types for non-tables
p4d = tm.makePanel4D()
@@ -1929,7 +1911,7 @@ def check(obj, comparator):
p = tm.makePanel()
check(p, assert_panel_equal)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p4d = tm.makePanel4D()
check(p4d, assert_panel4d_equal)
@@ -2057,8 +2039,8 @@ def test_table_values_dtypes_roundtrip(self):
expected = Series({'float32': 2, 'float64': 1, 'int32': 1,
'bool': 1, 'int16': 1, 'int8': 1,
'int64': 1, 'object': 1, 'datetime64[ns]': 2})
- result.sort()
- expected.sort()
+ result = result.sort_index()
+ result = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self):
@@ -2097,7 +2079,8 @@ def test_table_mixed_dtypes(self):
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
+
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
@@ -2169,7 +2152,6 @@ def test_append_with_timedelta(self):
# GH 3577
# append timedelta
- from datetime import timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp(
'20130101') + timedelta(days=i, seconds=10) for i in range(10)]))
df['C'] = df['A'] - df['B']
@@ -2183,12 +2165,9 @@ def test_append_with_timedelta(self):
result = store.select('df')
assert_frame_equal(result, df)
- result = store.select('df', Term("C<100000"))
+ result = store.select('df', "C<100000")
assert_frame_equal(result, df)
- result = store.select('df', Term("C", "<", -3 * 86400))
- assert_frame_equal(result, df.iloc[3:])
-
result = store.select('df', "C<'-3D'")
assert_frame_equal(result, df.iloc[3:])
@@ -2431,7 +2410,7 @@ def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
- with compat_assert_produces_warning(FutureWarning):
+ with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
@@ -2489,7 +2468,7 @@ def test_terms(self):
0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
- with compat_assert_produces_warning(FutureWarning):
+ with catch_warnings(record=True):
p4d = tm.makePanel4D()
store.put('p4d', p4d, format='table')
@@ -2498,39 +2477,23 @@ def test_terms(self):
store.put('wpneg', wpneg, format='table')
# panel
- result = store.select('wp', [Term(
- 'major_axis<"20000108"'), Term("minor_axis=['A', 'B']")])
+ result = store.select(
+ 'wp', "major_axis<'20000108' and minor_axis=['A', 'B']")
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
- # with deprecation
- result = store.select('wp', [Term(
- 'major_axis', '<', "20000108"), Term("minor_axis=['A', 'B']")])
- expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
- tm.assert_panel_equal(result, expected)
-
# p4d
- with compat_assert_produces_warning(FutureWarning):
+ with catch_warnings(record=True):
result = store.select('p4d',
- [Term('major_axis<"20000108"'),
- Term("minor_axis=['A', 'B']"),
- Term("items=['ItemA', 'ItemB']")])
+ ("major_axis<'20000108' and "
+ "minor_axis=['A', 'B'] and "
+ "items=['ItemA', 'ItemB']"))
expected = p4d.truncate(after='20000108').reindex(
minor=['A', 'B'], items=['ItemA', 'ItemB'])
assert_panel4d_equal(result, expected)
- # back compat invalid terms
- terms = [dict(field='major_axis', op='>', value='20121114'),
- [dict(field='major_axis', op='>', value='20121114')],
- ["minor_axis=['A','B']",
- dict(field='major_axis', op='>', value='20121114')]]
- for t in terms:
- with tm.assert_produces_warning(expected_warning=FutureWarning,
- check_stacklevel=False):
- Term(t)
-
- with compat_assert_produces_warning(FutureWarning):
+ with catch_warnings(record=True):
# valid terms
terms = [('major_axis=20121114'),
@@ -2581,13 +2544,13 @@ def test_term_compat(self):
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
- result = store.select('wp', [Term('major_axis>20000102'),
- Term('minor_axis', '=', ['A', 'B'])])
+ result = store.select(
+ 'wp', "major_axis>20000102 and minor_axis=['A', 'B']")
expected = wp.loc[:, wp.major_axis >
Timestamp('20000102'), ['A', 'B']]
assert_panel_equal(result, expected)
- store.remove('wp', Term('major_axis>20000103'))
+ store.remove('wp', 'major_axis>20000103')
result = store.select('wp')
expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]
assert_panel_equal(result, expected)
@@ -2601,25 +2564,23 @@ def test_term_compat(self):
# stringified datetimes
result = store.select(
- 'wp', [Term('major_axis', '>', datetime.datetime(2000, 1, 2))])
+ 'wp', "major_axis>datetime.datetime(2000, 1, 2)")
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
- 'wp', [Term('major_axis', '>',
- datetime.datetime(2000, 1, 2, 0, 0))])
+ 'wp', "major_axis>datetime.datetime(2000, 1, 2, 0, 0)")
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
- 'wp', [Term('major_axis', '=',
- [datetime.datetime(2000, 1, 2, 0, 0),
- datetime.datetime(2000, 1, 3, 0, 0)])])
+ 'wp', ("major_axis=[datetime.datetime(2000, 1, 2, 0, 0), "
+ "datetime.datetime(2000, 1, 3, 0, 0)]"))
expected = wp.loc[:, [Timestamp('20000102'),
Timestamp('20000103')]]
assert_panel_equal(result, expected)
- result = store.select('wp', [Term('minor_axis', '=', ['A', 'B'])])
+ result = store.select('wp', "minor_axis=['A', 'B']")
expected = wp.loc[:, :, ['A', 'B']]
assert_panel_equal(result, expected)
@@ -2630,8 +2591,7 @@ def test_backwards_compat_without_term_object(self):
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
- with assert_produces_warning(expected_warning=FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
result = store.select('wp', [('major_axis>20000102'),
('minor_axis', '=', ['A', 'B'])])
expected = wp.loc[:,
@@ -2652,24 +2612,21 @@ def test_backwards_compat_without_term_object(self):
store.append('wp', wp)
# stringified datetimes
- with assert_produces_warning(expected_warning=FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
result = store.select('wp',
[('major_axis',
'>',
datetime.datetime(2000, 1, 2))])
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
- with assert_produces_warning(expected_warning=FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
result = store.select('wp',
[('major_axis',
'>',
datetime.datetime(2000, 1, 2, 0, 0))])
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
- with assert_produces_warning(expected_warning=FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
result = store.select('wp',
[('major_axis',
'=',
@@ -2768,9 +2725,7 @@ def test_tuple_index(self):
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
- expected_warning = Warning if PY35 else PerformanceWarning
- with tm.assert_produces_warning(expected_warning=expected_warning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
@@ -2782,30 +2737,23 @@ def test_index_types(self):
check_index_type=True,
check_series_type=True)
- # nose has a deprecation warning in 3.5
- expected_warning = Warning if PY35 else PerformanceWarning
- with tm.assert_produces_warning(expected_warning=expected_warning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
- with tm.assert_produces_warning(expected_warning=expected_warning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
- with tm.assert_produces_warning(expected_warning=expected_warning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
- with tm.assert_produces_warning(expected_warning=expected_warning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
- with tm.assert_produces_warning(expected_warning=expected_warning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
@@ -3053,7 +3001,7 @@ def test_wide_table_dups(self):
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
- with tm.assert_produces_warning(expected_warning=DuplicateWarning):
+ with catch_warnings(record=True):
recons = store['panel']
assert_panel_equal(recons, wp)
@@ -3647,6 +3595,7 @@ def test_retain_index_attributes(self):
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
+
expected_warning = Warning if PY35 else AttributeConflictWarning
with tm.assert_produces_warning(expected_warning=expected_warning,
check_stacklevel=False):
@@ -3804,15 +3753,10 @@ def test_frame_select_complex2(self):
hist.to_hdf(hh, 'df', mode='w', format='table')
- expected = read_hdf(hh, 'df', where=Term('l1', '=', [2, 3, 4]))
-
- # list like
- result = read_hdf(hh, 'df', where=Term(
- 'l1', '=', selection.index.tolist()))
- assert_frame_equal(result, expected)
- l = selection.index.tolist() # noqa
+ expected = read_hdf(hh, 'df', where="l1=[2, 3, 4]")
# sccope with list like
+ l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select('df', where='l1=l')
assert_frame_equal(result, expected)
@@ -3881,12 +3825,12 @@ def test_string_select(self):
store.append('df', df, data_columns=['x'])
- result = store.select('df', Term('x=none'))
+ result = store.select('df', 'x=none')
expected = df[df.x == 'none']
assert_frame_equal(result, expected)
try:
- result = store.select('df', Term('x!=none'))
+ result = store.select('df', 'x!=none')
expected = df[df.x != 'none']
assert_frame_equal(result, expected)
except Exception as detail:
@@ -3898,7 +3842,7 @@ def test_string_select(self):
df2.loc[df2.x == '', 'x'] = np.nan
store.append('df2', df2, data_columns=['x'])
- result = store.select('df2', Term('x!=none'))
+ result = store.select('df2', 'x!=none')
expected = df2[isnull(df2.x)]
assert_frame_equal(result, expected)
@@ -3908,11 +3852,11 @@ def test_string_select(self):
store.append('df3', df, data_columns=['int'])
- result = store.select('df3', Term('int=2'))
+ result = store.select('df3', 'int=2')
expected = df[df.int == 2]
assert_frame_equal(result, expected)
- result = store.select('df3', Term('int!=2'))
+ result = store.select('df3', 'int!=2')
expected = df[df.int != 2]
assert_frame_equal(result, expected)
@@ -4178,8 +4122,8 @@ def test_select_as_multiple(self):
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
- result = store.select_as_multiple(['df1', 'df2'], where=[Term(
- 'index>df2.index[4]')], selector='df2')
+ result = store.select_as_multiple(
+ ['df1', 'df2'], where='index>df2.index[4]', selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
@@ -4221,13 +4165,13 @@ def test_start_stop_table(self):
store.append('df', df)
result = store.select(
- 'df', [Term("columns=['A']")], start=0, stop=5)
+ 'df', "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
- 'df', [Term("columns=['A']")], start=30, stop=40)
+ 'df', "columns=['A']", start=30, stop=40)
self.assertTrue(len(result) == 0)
expected = df.loc[30:40, ['A']]
tm.assert_frame_equal(result, expected)
@@ -4287,11 +4231,11 @@ def test_select_filter_corner(self):
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
- crit = Term('columns=df.columns[:75]')
+ crit = 'columns=df.columns[:75]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
- crit = Term('columns=df.columns[:75:2]')
+ crit = 'columns=df.columns[:75:2]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
@@ -4470,16 +4414,16 @@ def test_legacy_table_read(self):
with tm.assert_produces_warning(
expected_warning=IncompatibilityWarning):
self.assertRaises(
- Exception, store.select, 'wp1', Term('minor_axis=B'))
+ Exception, store.select, 'wp1', 'minor_axis=B')
df2 = store.select('df2')
- result = store.select('df2', Term('index>df2.index[2]'))
+ result = store.select('df2', 'index>df2.index[2]')
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
def test_legacy_0_10_read(self):
# legacy from 0.10
- with compat_assert_produces_warning(FutureWarning):
+ with catch_warnings(record=True):
path = tm.get_data_path('legacy_hdf/legacy_0.10.h5')
with ensure_clean_store(path, mode='r') as store:
str(store)
@@ -4503,7 +4447,7 @@ def test_legacy_0_11_read(self):
def test_copy(self):
- with compat_assert_produces_warning(FutureWarning):
+ with catch_warnings(record=True):
def do_copy(f=None, new_f=None, keys=None,
propindexes=True, **kwargs):
@@ -4645,7 +4589,8 @@ def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
- with compat_assert_produces_warning(PerformanceWarning):
+ # PerformanceWarning
+ with catch_warnings(record=True):
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
@@ -4913,15 +4858,19 @@ def test_to_hdf_with_object_column_names(self):
with self.assertRaises(
ValueError, msg=("cannot have non-object label "
"DataIndexableCol")):
- df.to_hdf(path, 'df', format='table', data_columns=True)
+ with catch_warnings(record=True):
+ df.to_hdf(path, 'df',
+ format='table',
+ data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
- df.to_hdf(path, 'df', format='table', data_columns=True)
- result = pd.read_hdf(
- path, 'df', where="index = [{0}]".format(df.index[0]))
- assert(len(result))
+ with catch_warnings(record=True):
+ df.to_hdf(path, 'df', format='table', data_columns=True)
+ result = pd.read_hdf(
+ path, 'df', where="index = [{0}]".format(df.index[0]))
+ assert(len(result))
def test_read_hdf_open_store(self):
# GH10330
@@ -5186,7 +5135,7 @@ def test_complex_mixed_table(self):
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['A', 'B'])
- result = store.select('df', where=Term('A>2'))
+ result = store.select('df', where='A>2')
assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(self.path) as path:
@@ -5215,7 +5164,7 @@ def test_complex_across_dimensions(self):
df = DataFrame({'A': s, 'B': s})
p = Panel({'One': df, 'Two': df})
- with compat_assert_produces_warning(FutureWarning):
+ with catch_warnings(record=True):
p4d = pd.Panel4D({'i': p, 'ii': p})
objs = [df, p, p4d]
@@ -5299,7 +5248,7 @@ def test_append_with_timezones_dateutil(self):
# select with tz aware
expected = df[df.A >= df.A[3]]
- result = store.select('df_tz', where=Term('A>=df.A[3]'))
+ result = store.select('df_tz', where='A>=df.A[3]')
self._compare_with_tz(result, expected)
# ensure we include dates in DST and STD time here.
@@ -5370,7 +5319,7 @@ def test_append_with_timezones_pytz(self):
# select with tz aware
self._compare_with_tz(store.select(
- 'df_tz', where=Term('A>=df.A[3]')), df[df.A >= df.A[3]])
+ 'df_tz', where='A>=df.A[3]'), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 373f590cbf9eb..ab0322abbcf06 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
+from warnings import catch_warnings
from datetime import datetime
import operator
@@ -1272,7 +1273,7 @@ def test_apply_slabs(self):
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
- with tm.assert_produces_warning(False):
+ with catch_warnings(record=True):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 2491bac2a7f19..c0511581cd299 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -3,7 +3,7 @@
from pandas.compat import range, lrange
import operator
import pytest
-
+from warnings import catch_warnings
import numpy as np
from pandas.types.common import is_float_dtype
@@ -129,17 +129,21 @@ def skipna_wrapper(x):
def wrapper(x):
return alternative(np.asarray(x))
- for i in range(obj.ndim):
- result = f(axis=i, skipna=False)
- assert_panel_equal(result, obj.apply(wrapper, axis=i))
+ with catch_warnings(record=True):
+ for i in range(obj.ndim):
+ result = f(axis=i, skipna=False)
+ expected = obj.apply(wrapper, axis=i)
+ assert_panel_equal(result, expected)
else:
skipna_wrapper = alternative
wrapper = alternative
- for i in range(obj.ndim):
- result = f(axis=i)
- if not tm._incompat_bottleneck_version(name):
- assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i))
+ with catch_warnings(record=True):
+ for i in range(obj.ndim):
+ result = f(axis=i)
+ if not tm._incompat_bottleneck_version(name):
+ expected = obj.apply(skipna_wrapper, axis=i)
+ assert_panel_equal(result, expected)
self.assertRaises(Exception, f, axis=obj.ndim)
@@ -161,32 +165,33 @@ def test_get_axis(self):
assert(self.panel4d._get_axis(3) is self.panel4d.minor_axis)
def test_set_axis(self):
- new_labels = Index(np.arange(len(self.panel4d.labels)))
+ with catch_warnings(record=True):
+ new_labels = Index(np.arange(len(self.panel4d.labels)))
- # TODO: unused?
- # new_items = Index(np.arange(len(self.panel4d.items)))
+ # TODO: unused?
+ # new_items = Index(np.arange(len(self.panel4d.items)))
- new_major = Index(np.arange(len(self.panel4d.major_axis)))
- new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
+ new_major = Index(np.arange(len(self.panel4d.major_axis)))
+ new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
- # ensure propagate to potentially prior-cached items too
+ # ensure propagate to potentially prior-cached items too
- # TODO: unused?
- # label = self.panel4d['l1']
+ # TODO: unused?
+ # label = self.panel4d['l1']
- self.panel4d.labels = new_labels
+ self.panel4d.labels = new_labels
- if hasattr(self.panel4d, '_item_cache'):
- self.assertNotIn('l1', self.panel4d._item_cache)
- self.assertIs(self.panel4d.labels, new_labels)
+ if hasattr(self.panel4d, '_item_cache'):
+ self.assertNotIn('l1', self.panel4d._item_cache)
+ self.assertIs(self.panel4d.labels, new_labels)
- self.panel4d.major_axis = new_major
- self.assertIs(self.panel4d[0].major_axis, new_major)
- self.assertIs(self.panel4d.major_axis, new_major)
+ self.panel4d.major_axis = new_major
+ self.assertIs(self.panel4d[0].major_axis, new_major)
+ self.assertIs(self.panel4d.major_axis, new_major)
- self.panel4d.minor_axis = new_minor
- self.assertIs(self.panel4d[0].minor_axis, new_minor)
- self.assertIs(self.panel4d.minor_axis, new_minor)
+ self.panel4d.minor_axis = new_minor
+ self.assertIs(self.panel4d[0].minor_axis, new_minor)
+ self.assertIs(self.panel4d.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel4d._get_axis_number('labels'), 0)
@@ -201,7 +206,7 @@ def test_get_axis_name(self):
self.assertEqual(self.panel4d._get_axis_name(3), 'minor_axis')
def test_arith(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
self._test_op(self.panel4d, operator.add)
self._test_op(self.panel4d, operator.sub)
self._test_op(self.panel4d, operator.mul)
@@ -233,16 +238,16 @@ def test_iteritems(self):
len(self.panel4d.labels))
def test_combinePanel4d(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
result = self.panel4d.add(self.panel4d)
self.assert_panel4d_equal(result, self.panel4d * 2)
def test_neg(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
self.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
def test_select(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p = self.panel4d
@@ -283,7 +288,7 @@ def test_get_value(self):
def test_abs(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
result = self.panel4d.abs()
expected = np.abs(self.panel4d)
self.assert_panel4d_equal(result, expected)
@@ -306,7 +311,7 @@ def test_getitem(self):
def test_delitem_and_pop(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
expected = self.panel4d['l2']
result = self.panel4d.pop('l2')
assert_panel_equal(expected, result)
@@ -351,40 +356,38 @@ def test_delitem_and_pop(self):
assert_panel_equal(panel4dc[0], panel4d[0])
def test_setitem(self):
- # LongPanel with one item
- # lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
- # self.assertRaises(Exception, self.panel.__setitem__,
- # 'ItemE', lp)
+ with catch_warnings(record=True):
- # Panel
- p = Panel(dict(
- ItemA=self.panel4d['l1']['ItemA'][2:].filter(items=['A', 'B'])))
- self.panel4d['l4'] = p
- self.panel4d['l5'] = p
+ # Panel
+ p = Panel(dict(
+ ItemA=self.panel4d['l1']['ItemA'][2:].filter(
+ items=['A', 'B'])))
+ self.panel4d['l4'] = p
+ self.panel4d['l5'] = p
- p2 = self.panel4d['l4']
+ p2 = self.panel4d['l4']
- assert_panel_equal(p, p2.reindex(items=p.items,
- major_axis=p.major_axis,
- minor_axis=p.minor_axis))
+ assert_panel_equal(p, p2.reindex(items=p.items,
+ major_axis=p.major_axis,
+ minor_axis=p.minor_axis))
- # scalar
- self.panel4d['lG'] = 1
- self.panel4d['lE'] = True
- self.assertEqual(self.panel4d['lG'].values.dtype, np.int64)
- self.assertEqual(self.panel4d['lE'].values.dtype, np.bool_)
+ # scalar
+ self.panel4d['lG'] = 1
+ self.panel4d['lE'] = True
+ self.assertEqual(self.panel4d['lG'].values.dtype, np.int64)
+ self.assertEqual(self.panel4d['lE'].values.dtype, np.bool_)
- # object dtype
- self.panel4d['lQ'] = 'foo'
- self.assertEqual(self.panel4d['lQ'].values.dtype, np.object_)
+ # object dtype
+ self.panel4d['lQ'] = 'foo'
+ self.assertEqual(self.panel4d['lQ'].values.dtype, np.object_)
- # boolean dtype
- self.panel4d['lP'] = self.panel4d['l1'] > 0
- self.assertEqual(self.panel4d['lP'].values.dtype, np.bool_)
+ # boolean dtype
+ self.panel4d['lP'] = self.panel4d['l1'] > 0
+ self.assertEqual(self.panel4d['lP'].values.dtype, np.bool_)
def test_setitem_by_indexer(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
# Panel
panel4dc = self.panel4d.copy()
@@ -419,7 +422,7 @@ def func():
def test_setitem_by_indexer_mixed_type(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
# GH 8702
self.panel4d['foo'] = 'bar'
@@ -433,7 +436,7 @@ def test_setitem_by_indexer_mixed_type(self):
self.assertTrue((panel4dc.iloc[2].values == 'foo').all())
def test_comparisons(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p1 = tm.makePanel4D()
p2 = tm.makePanel4D()
@@ -467,7 +470,8 @@ def test_major_xs(self):
ref = self.panel4d['l1']['ItemA']
idx = self.panel4d.major_axis[5]
- xs = self.panel4d.major_xs(idx)
+ with catch_warnings(record=True):
+ xs = self.panel4d.major_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'],
ref.xs(idx), check_names=False)
@@ -478,15 +482,17 @@ def test_major_xs(self):
def test_major_xs_mixed(self):
self.panel4d['l4'] = 'foo'
- xs = self.panel4d.major_xs(self.panel4d.major_axis[0])
+ with catch_warnings(record=True):
+ xs = self.panel4d.major_xs(self.panel4d.major_axis[0])
self.assertEqual(xs['l1']['A'].dtype, np.float64)
self.assertEqual(xs['l4']['A'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel4d['l1']['ItemA']
- idx = self.panel4d.minor_axis[1]
- xs = self.panel4d.minor_xs(idx)
+ with catch_warnings(record=True):
+ idx = self.panel4d.minor_axis[1]
+ xs = self.panel4d.minor_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'], ref[idx], check_names=False)
@@ -496,7 +502,8 @@ def test_minor_xs(self):
def test_minor_xs_mixed(self):
self.panel4d['l4'] = 'foo'
- xs = self.panel4d.minor_xs('D')
+ with catch_warnings(record=True):
+ xs = self.panel4d.minor_xs('D')
self.assertEqual(xs['l1'].T['ItemA'].dtype, np.float64)
self.assertEqual(xs['l4'].T['ItemA'].dtype, np.object_)
@@ -512,11 +519,12 @@ def test_xs(self):
# mixed-type
self.panel4d['strings'] = 'foo'
- result = self.panel4d.xs('D', axis=3)
+ with catch_warnings(record=True):
+ result = self.panel4d.xs('D', axis=3)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
panel4d = self.panel4d
labels = panel4d.labels[[1, 0]]
@@ -572,7 +580,7 @@ def test_get_value(self):
def test_set_value(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
for label in self.panel4d.labels:
for item in self.panel4d.items:
@@ -603,13 +611,13 @@ def assert_panel4d_equal(cls, x, y):
assert_panel4d_equal(x, y)
def setUp(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
self.panel4d = tm.makePanel4D(nper=8)
add_nans(self.panel4d)
def test_constructor(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
panel4d = Panel4D(self.panel4d._data)
self.assertIs(panel4d._data, self.panel4d._data)
@@ -649,7 +657,7 @@ def test_constructor(self):
assert_panel4d_equal(panel4d, expected)
def test_constructor_cast(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
zero_filled = self.panel4d.fillna(0)
casted = Panel4D(zero_filled._data, dtype=int)
@@ -671,7 +679,7 @@ def test_constructor_cast(self):
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_consolidate(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
self.assertTrue(self.panel4d._data.is_consolidated())
self.panel4d['foo'] = 1.
@@ -681,7 +689,7 @@ def test_consolidate(self):
self.assertTrue(panel4d._data.is_consolidated())
def test_ctor_dict(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
l1 = self.panel4d['l1']
l2 = self.panel4d['l2']
@@ -694,7 +702,7 @@ def test_ctor_dict(self):
:, :]['ItemB'])
def test_constructor_dict_mixed(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
data = dict((k, v.values) for k, v in self.panel4d.iteritems())
result = Panel4D(data)
@@ -721,7 +729,7 @@ def test_constructor_dict_mixed(self):
self.assertRaises(Exception, Panel4D, data)
def test_constructor_resize(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
data = self.panel4d._data
labels = self.panel4d.labels[:-1]
items = self.panel4d.items[:-1]
@@ -747,16 +755,19 @@ def test_constructor_resize(self):
assert_panel4d_equal(result, expected)
def test_conform(self):
+ with catch_warnings(record=True):
- p = self.panel4d['l1'].filter(items=['ItemA', 'ItemB'])
- conformed = self.panel4d.conform(p)
+ p = self.panel4d['l1'].filter(items=['ItemA', 'ItemB'])
+ conformed = self.panel4d.conform(p)
- tm.assert_index_equal(conformed.items, self.panel4d.labels)
- tm.assert_index_equal(conformed.major_axis, self.panel4d.major_axis)
- tm.assert_index_equal(conformed.minor_axis, self.panel4d.minor_axis)
+ tm.assert_index_equal(conformed.items, self.panel4d.labels)
+ tm.assert_index_equal(conformed.major_axis,
+ self.panel4d.major_axis)
+ tm.assert_index_equal(conformed.minor_axis,
+ self.panel4d.minor_axis)
def test_reindex(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
ref = self.panel4d['l2']
# labels
@@ -810,14 +821,14 @@ def test_reindex(self):
self.assertTrue(result is self.panel4d)
def test_not_hashable(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p4D_empty = Panel4D()
self.assertRaises(TypeError, hash, p4D_empty)
self.assertRaises(TypeError, hash, self.panel4d)
def test_reindex_like(self):
# reindex_like
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1],
items=self.panel4d.items[:-1],
major=self.panel4d.major_axis[:-1],
@@ -826,7 +837,7 @@ def test_reindex_like(self):
assert_panel4d_equal(smaller, smaller_like)
def test_sort_index(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
import random
rlabels = list(self.panel4d.labels)
@@ -844,7 +855,7 @@ def test_sort_index(self):
def test_fillna(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
self.assertFalse(np.isfinite(self.panel4d.values).all())
filled = self.panel4d.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
@@ -853,7 +864,7 @@ def test_fillna(self):
self.panel4d.fillna, method='pad')
def test_swapaxes(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
result = self.panel4d.swapaxes('labels', 'items')
self.assertIs(result.items, self.panel4d.labels)
@@ -880,7 +891,7 @@ def test_swapaxes(self):
def test_update(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p4d = Panel4D([[[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
@@ -913,12 +924,12 @@ def test_dtypes(self):
assert_series_equal(result, expected)
def test_repr_empty(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
empty = Panel4D()
repr(empty)
def test_rename(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
mapper = {'l1': 'foo',
'l2': 'bar',
diff --git a/pandas/tests/test_panelnd.py b/pandas/tests/test_panelnd.py
index 6a578d85d3ee3..7ecc773cd7bea 100644
--- a/pandas/tests/test_panelnd.py
+++ b/pandas/tests/test_panelnd.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+from warnings import catch_warnings
from pandas.core import panelnd
from pandas.core.panel import Panel
@@ -13,7 +14,7 @@ def setUp(self):
def test_4d_construction(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
@@ -29,7 +30,7 @@ def test_4d_construction(self):
def test_4d_construction_alt(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
@@ -61,7 +62,7 @@ def test_4d_construction_error(self):
def test_5d_construction(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
diff --git a/pandas/tests/tools/test_concat.py b/pandas/tests/tools/test_concat.py
index 392036a99a297..c41924a7987bd 100644
--- a/pandas/tests/tools/test_concat.py
+++ b/pandas/tests/tools/test_concat.py
@@ -1,3 +1,4 @@
+from warnings import catch_warnings
import numpy as np
from numpy.random import randn
@@ -1373,7 +1374,7 @@ def df():
concat([panel1, panel3], axis=1, verify_integrity=True)
def test_panel4d_concat(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p4d = tm.makePanel4D()
p1 = p4d.iloc[:, :, :5, :]
@@ -1389,7 +1390,7 @@ def test_panel4d_concat(self):
tm.assert_panel4d_equal(result, p4d)
def test_panel4d_concat_mixed_type(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p4d = tm.makePanel4D()
# if things are a bit misbehaved
| https://api.github.com/repos/pandas-dev/pandas/pulls/15705 | 2017-03-16T22:12:47Z | 2017-03-17T13:06:27Z | 2017-03-17T13:06:27Z | 2017-03-17T13:07:05Z | |
MAINT: test with manylinux numpy/scipy pre-release | diff --git a/.travis.yml b/.travis.yml
index b0331941e2a1e..ee093e5bf0e60 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -123,11 +123,6 @@ matrix:
- PANDAS_TESTING_MODE="deprecate"
- CACHE_NAME="35_numpy_dev"
- USE_CACHE=true
- addons:
- apt:
- packages:
- - libatlas-base-dev
- - gfortran
# In allow_failures
- python: 3.5
env:
@@ -167,11 +162,6 @@ matrix:
- PANDAS_TESTING_MODE="deprecate"
- CACHE_NAME="35_numpy_dev"
- USE_CACHE=true
- addons:
- apt:
- packages:
- - libatlas-base-dev
- - gfortran
- python: 3.5
env:
- PYTHON_VERSION=3.5
diff --git a/ci/requirements-3.5_NUMPY_DEV.build.sh b/ci/requirements-3.5_NUMPY_DEV.build.sh
index b6c8a477e6f5e..4af1307f26a18 100644
--- a/ci/requirements-3.5_NUMPY_DEV.build.sh
+++ b/ci/requirements-3.5_NUMPY_DEV.build.sh
@@ -8,6 +8,7 @@ echo "install numpy master wheel"
pip uninstall numpy -y
# install numpy wheel from master
-pip install --pre --upgrade --no-index --timeout=60 --trusted-host travis-dev-wheels.scipy.org -f http://travis-dev-wheels.scipy.org/ numpy
+PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com"
+pip install --pre --upgrade --timeout=60 -f $PRE_WHEELS numpy scipy
true
| Numpy / Scipy switching to daily manylinux wheels of trunk, instead of building
wheels specific to Ubuntu 12.04 for every commit. Use these new wheels
for numpy and scipy pre-release testing.
See: #15699, #15689 and https://github.com/scipy/scipy/issues/7188. | https://api.github.com/repos/pandas-dev/pandas/pulls/15702 | 2017-03-16T17:56:21Z | 2017-03-16T19:12:09Z | 2017-03-16T19:12:09Z | 2017-03-17T07:01:30Z |
ENH: support for removing unused levels of a MultiIndex (interally) | diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 6e9ef4b10273c..dfe3f0ef87c11 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -292,7 +292,10 @@ def setup(self):
self.rng3 = date_range(start='1/1/2000', periods=1500000, freq='S')
self.ts3 = Series(1, index=self.rng3)
- def time_sort_index(self):
+ def time_sort_index_monotonic(self):
+ self.ts2.sort_index()
+
+ def time_sort_index_non_monotonic(self):
self.ts.sort_index()
def time_timeseries_slice_minutely(self):
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 37a70435ed6ff..d518d85836123 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -656,6 +656,78 @@ If indicated, a deprecation warning will be issued if you reference that module.
"pandas._hash", "pandas.tools.libhash", ""
"pandas._window", "pandas.core.libwindow", ""
+.. _whatsnew_0200.api_breaking.sort_index:
+
+DataFrame.sort_index changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In certain cases, calling ``.sort_index()`` on a MultiIndexed DataFrame would return the *same* DataFrame without seeming to sort.
+This would happen with a ``lexsorted``, but non-montonic levels. (:issue:`15622`, :issue:`15687`, :issue:`14015`, :issue:`13431`)
+
+This is UNCHANGED between versions, but showing for illustration purposes:
+
+.. ipython:: python
+
+ df = DataFrame(np.arange(6), columns=['value'], index=MultiIndex.from_product([list('BA'), range(3)]))
+ df
+
+.. ipython:: python
+
+ df.index.is_lexsorted()
+ df.index.is_monotonic
+
+Sorting works as expected
+
+.. ipython:: python
+
+ df.sort_index()
+
+.. ipython:: python
+
+ df.sort_index().index.is_lexsorted()
+ df.sort_index().index.is_monotonic
+
+However, this example, which has a monotonic level, doesn't behave as desired.
+
+.. ipython:: python
+ df = pd.DataFrame({'value': [1, 2, 3, 4]},
+ index=pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
+
+Previous Behavior:
+
+.. ipython:: python
+
+ In [11]: df.sort_index()
+ Out[11]:
+ value
+ a bb 1
+ aa 2
+ b bb 3
+ aa 4
+
+ In [14]: df.sort_index().index.is_lexsorted()
+ Out[14]: True
+
+ In [15]: df.sort_index().index.is_monotonic
+ Out[15]: False
+
+New Behavior:
+
+.. ipython:: python
+
+ df.sort_index()
+ df.sort_index().index.is_lexsorted()
+ df.sort_index().index.is_monotonic
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+New Behavior:
+
+.. ipython:: python
+
.. _whatsnew_0200.api_breaking.groupby_describe:
@@ -830,7 +902,7 @@ Performance Improvements
- Improved performance when using ``.unstack()`` (:issue:`15503`)
- Improved performance of merge/join on ``category`` columns (:issue:`10409`)
- Improved performance of ``drop_duplicates()`` on ``bool`` columns (:issue:`12963`)
-
+- Improved performance of ``Series.sort_index()`` with a monotonic index (:issue:`15694`)
.. _whatsnew_0200.bug_fixes:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6b5e8e0799421..c9987053150ff 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3308,6 +3308,10 @@ def trans(v):
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
+
+ # TODO: this can be combined with Series.sort_index impl as
+ # almost identical
+
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
@@ -3321,8 +3325,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
- # sort by the index
- if level is not None:
+ if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
@@ -3332,17 +3335,15 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
- if not labels.is_lexsorted():
- labels = MultiIndex.from_tuples(labels.values)
+ labels = labels._reconstruct(sort=True)
indexer = lexsort_indexer(labels.labels, orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
- # GH11080 - Check monotonic-ness before sort an index
- # if monotonic (already sorted), return None or copy() according
- # to 'inplace'
+ # Check monotonic-ness before sort an index
+ # GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
@@ -3353,8 +3354,9 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
+ baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
- axis=self._get_block_manager_axis(axis),
+ axis=baxis,
convert=False, verify=False)
if inplace:
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 727af8b8cd3eb..00300ccaa1b75 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1808,6 +1808,13 @@ def get_group_levels(self):
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
+ def _is_builtin_func(self, arg):
+ """
+ if we define an builtin function for this argument, return it,
+ otherwise return the arg
+ """
+ return SelectionMixin._builtin_table.get(arg, arg)
+
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
@@ -2033,7 +2040,7 @@ def _aggregate_series_fast(self, obj, func):
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
- obj = obj.take(indexer, convert=False)
+ obj = obj.take(indexer, convert=False).to_dense()
group_index = algorithms.take_nd(
group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 2822d98b7c906..8d6b6e17396eb 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -22,8 +22,8 @@
from pandas.sparse.libsparse import IntIndex
from pandas.core.categorical import Categorical, _factorize_from_iterable
-from pandas.core.sorting import (get_group_index, compress_group_index,
- decons_obs_group_ids)
+from pandas.core.sorting import (get_group_index, get_compressed_ids,
+ compress_group_index, decons_obs_group_ids)
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
@@ -494,11 +494,6 @@ def _unstack_frame(obj, level, fill_value=None):
return unstacker.get_result()
-def get_compressed_ids(labels, sizes):
- ids = get_group_index(labels, sizes, sort=True, xnull=False)
- return compress_group_index(ids, sort=True)
-
-
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0913592e055cd..6fb1b66708369 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1752,17 +1752,31 @@ def _try_kind_sort(arr):
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
+ # TODO: this can be combined with DataFrame.sort_index impl as
+ # almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
index = self.index
- if level is not None:
+
+ if level:
new_index, indexer = index.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
- indexer = lexsort_indexer(index.labels, orders=ascending)
+ labels = index._reconstruct(sort=True)
+ indexer = lexsort_indexer(labels.labels, orders=ascending)
else:
from pandas.core.sorting import nargsort
+
+ # Check monotonic-ness before sort an index
+ # GH11080
+ if ((ascending and index.is_monotonic_increasing) or
+ (not ascending and index.is_monotonic_decreasing)):
+ if inplace:
+ return
+ else:
+ return self.copy()
+
indexer = nargsort(index, kind=kind, ascending=ascending,
na_position=na_position)
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 205d0d94d2ec3..ea131e66cb833 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -93,6 +93,11 @@ def maybe_lift(lab, size): # pormote nan values
return loop(list(labels), list(shape))
+def get_compressed_ids(labels, sizes):
+ ids = get_group_index(labels, sizes, sort=True, xnull=False)
+ return compress_group_index(ids, sort=True)
+
+
def is_int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index e6ae0605d4758..16019dd2d860b 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -1173,9 +1173,100 @@ def from_product(cls, iterables, sortorder=None, names=None):
labels, levels = _factorize_from_iterables(iterables)
labels = cartesian_product(labels)
+ return MultiIndex(levels, labels, sortorder=sortorder, names=names)
- return MultiIndex(levels=levels, labels=labels, sortorder=sortorder,
- names=names)
+ def _reconstruct(self, sort=False, remove_unused=False):
+ """
+ reconstruct the MultiIndex
+
+ The MultiIndex will have the same outward appearance (e.g. values)
+ and will also .equals()
+
+ Parameters
+ ----------
+ sort: boolean, default False
+ monotonically sort the levels
+ remove_unused: boolean, default False
+ remove unsued levels
+
+ Returns
+ -------
+ MultiIndex
+
+ """
+
+ if sort and remove_unused:
+ raise ValueError("only support one of sort / remove_unused")
+
+ if not (sort or remove_unused):
+ raise ValueError("must supply one of sort / remove_unsued")
+
+ levels = self.levels
+ labels = self.labels
+
+ new_levels = []
+ new_labels = []
+
+ if sort:
+
+ if self.is_lexsorted() and self.is_monotonic:
+ return self
+
+ for lev, lab in zip(levels, labels):
+
+ if lev.is_monotonic:
+ new_levels.append(lev)
+ new_labels.append(lab)
+ continue
+
+ # indexer to reorder the levels
+ indexer = lev.argsort()
+ lev = lev.take(indexer)
+
+ # indexer to reorder the labels
+ ri = lib.get_reverse_indexer(indexer, len(indexer))
+ lab = algos.take_1d(ri, lab)
+
+ new_levels.append(lev)
+ new_labels.append(lab)
+
+ elif remove_unused:
+
+ changed = np.zeros(self.nlevels, dtype=bool)
+ for i, (lev, lab) in enumerate(zip(levels, labels)):
+
+ uniques = np.sort(algos.unique(lab))
+
+ # nothing unused
+ if len(uniques) == len(lev):
+ new_levels.append(lev)
+ new_labels.append(lab)
+ changed[i] = True
+ continue
+
+ unused = list(reversed(sorted(set(
+ np.arange(len(lev))) - set(uniques))))
+
+ # new levels are simple
+ lev = lev.take(uniques)
+
+ # new labels, we remove the unsued
+ # by decrementing the labels for that value
+ # prob a better way
+ for u in unused:
+
+ lab = np.where(lab > u, lab - 1, lab)
+
+ new_levels.append(lev)
+ new_labels.append(lab)
+
+ # nothing changed
+ if not changed.any():
+ return self
+
+ return MultiIndex(new_levels, new_labels,
+ names=self.names, sortorder=self.sortorder,
+ verify_integrity=False)
@property
def nlevels(self):
@@ -1746,9 +1837,10 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
- raise KeyError('Key length (%d) was greater than MultiIndex'
- ' lexsort depth (%d)' %
- (len(tup), self.lexsort_depth))
+ raise UnsortedIndexError(
+ 'Key length (%d) was greater than MultiIndex'
+ ' lexsort depth (%d)' %
+ (len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 0c274b2f6c4ff..f153fc6d7b815 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -2411,6 +2411,92 @@ def test_is_monotonic(self):
self.assertFalse(i.is_monotonic)
+ def test_reconstruct_api(self):
+
+ mi = MultiIndex.from_arrays([
+ ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
+ ])
+
+ with pytest.raises(ValueError):
+ mi._reconstruct()
+
+ with pytest.raises(ValueError):
+ mi._reconstruct(sort=True, remove_unused=True)
+
+ def test_reconstruct_sort(self):
+
+ # starts off lexsorted & monotonic
+ mi = MultiIndex.from_arrays([
+ ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
+ ])
+ assert mi.is_lexsorted()
+ assert mi.is_monotonic
+
+ recons = mi._reconstruct(sort=True)
+ assert recons.is_lexsorted()
+ assert recons.is_monotonic
+ assert mi is recons
+
+ assert mi.equals(recons)
+ assert Index(mi.values).equals(Index(recons.values))
+
+ # cannot convert to lexsorted
+ mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'),
+ ('x', 'b'), ('y', 'a'), ('z', 'b')],
+ names=['one', 'two'])
+ assert not mi.is_lexsorted()
+ assert not mi.is_monotonic
+
+ recons = mi._reconstruct(sort=True)
+ assert not recons.is_lexsorted()
+ assert not recons.is_monotonic
+
+ assert mi.equals(recons)
+ assert Index(mi.values).equals(Index(recons.values))
+
+ # cannot convert to lexsorted
+ mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
+ labels=[[0, 1, 0, 2], [2, 0, 0, 1]],
+ names=['col1', 'col2'])
+ assert not mi.is_lexsorted()
+ assert not mi.is_monotonic
+
+ recons = mi._reconstruct(sort=True)
+ assert not recons.is_lexsorted()
+ assert not recons.is_monotonic
+
+ assert mi.equals(recons)
+ assert Index(mi.values).equals(Index(recons.values))
+
+ def test_reconstruct_remove_unused(self):
+ # xref to GH 2770
+ df = DataFrame([['deleteMe', 1, 9],
+ ['keepMe', 2, 9],
+ ['keepMeToo', 3, 9]],
+ columns=['first', 'second', 'third'])
+ df2 = df.set_index(['first', 'second'], drop=False)
+ df2 = df2[df2['first'] != 'deleteMe']
+
+ # removed levels are there
+ expected = MultiIndex(levels=[['deleteMe', 'keepMe', 'keepMeToo'],
+ [1, 2, 3]],
+ labels=[[1, 2], [1, 2]],
+ names=['first', 'second'])
+ result = df2.index
+ tm.assert_index_equal(result, expected)
+
+ expected = MultiIndex(levels=[['keepMe', 'keepMeToo'],
+ [2, 3]],
+ labels=[[0, 1], [0, 1]],
+ names=['first', 'second'])
+ result = df2.index._reconstruct(remove_unused=True)
+ tm.assert_index_equal(result, expected)
+
+ # idempotent
+ result2 = result._reconstruct(remove_unused=True)
+ tm.assert_index_equal(result2, expected)
+ assert result2 is result
+
def test_isin(self):
values = [('foo', 2), ('bar', 3), ('quux', 4)]
@@ -2699,6 +2785,30 @@ def test_unsortedindex(self):
with assertRaises(KeyError):
df.loc(axis=0)['q', :]
+ def test_unsortedindex_doc_examples(self):
+ # http://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex # noqa
+ dfm = DataFrame({'jim': [0, 0, 1, 1],
+ 'joe': ['x', 'x', 'z', 'y'],
+ 'jolie': np.random.rand(4)})
+
+ dfm = dfm.set_index(['jim', 'joe'])
+ with tm.assert_produces_warning(PerformanceWarning):
+ dfm.loc[(1, 'z')]
+
+ with pytest.raises(UnsortedIndexError):
+ dfm.loc[(0, 'y'):(1, 'z')]
+
+ assert not dfm.index.is_lexsorted()
+ assert dfm.index.lexsort_depth == 1
+
+ # sort it
+ dfm = dfm.sort_index()
+ dfm.loc[(1, 'z')]
+ dfm.loc[(0, 'y'):(1, 'z')]
+
+ assert dfm.index.is_lexsorted()
+ assert dfm.index.lexsort_depth == 2
+
def test_tuples_with_name_string(self):
# GH 15110 and GH 14848
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index dc71fafb1094f..1e197f10cb9c6 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1599,7 +1599,7 @@ def test_unstack(self):
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values},
index=exp_index).sort_index(level=0)
- unstacked = s.unstack(0)
+ unstacked = s.unstack(0).sort_index()
assert_frame_equal(unstacked, expected)
# GH5873
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index fd5421abc89ad..ef3dab19b81b6 100755
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -2438,6 +2438,30 @@ def test_getitem_slice_not_sorted(self):
expected = df.reindex(columns=df.columns[:3])
tm.assert_frame_equal(result, expected)
+ def test_frame_getitem_not_sorted2(self):
+ # 13431
+ df = DataFrame({'col1': ['b', 'd', 'b', 'a'],
+ 'col2': [3, 1, 1, 2],
+ 'data': ['one', 'two', 'three', 'four']})
+
+ df2 = df.set_index(['col1', 'col2'])
+ df2_original = df2.copy()
+
+ df2.index.set_levels(['b', 'd', 'a'], level='col1', inplace=True)
+ df2.index.set_labels([0, 1, 0, 2], level='col1', inplace=True)
+ assert not df2.index.is_lexsorted()
+ assert not df2.index.is_monotonic
+
+ assert df2_original.index.equals(df2.index)
+ expected = df2.sort_index()
+ assert not expected.index.is_lexsorted()
+ assert expected.index.is_monotonic
+
+ result = df2.sort_index(level=0)
+ assert not result.index.is_lexsorted()
+ assert result.index.is_monotonic
+ tm.assert_frame_equal(result, expected)
+
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
@@ -2474,3 +2498,117 @@ def test_series_getitem_not_sorted(self):
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
+
+ def test_sort_index_and_reconstruction(self):
+
+ # 15622
+ # lexsortedness should be identical
+ # across MultiIndex consruction methods
+
+ df = DataFrame([[1, 1], [2, 2]], index=list('ab'))
+ expected = DataFrame([[1, 1], [2, 2], [1, 1], [2, 2]],
+ index=MultiIndex.from_tuples([(0.5, 'a'),
+ (0.5, 'b'),
+ (0.8, 'a'),
+ (0.8, 'b')]))
+ assert expected.index.is_lexsorted()
+
+ result = DataFrame(
+ [[1, 1], [2, 2], [1, 1], [2, 2]],
+ index=MultiIndex.from_product([[0.5, 0.8], list('ab')]))
+ result = result.sort_index()
+ assert result.index.is_lexsorted()
+ assert result.index.is_monotonic
+
+ tm.assert_frame_equal(result, expected)
+
+ result = DataFrame(
+ [[1, 1], [2, 2], [1, 1], [2, 2]],
+ index=MultiIndex(levels=[[0.5, 0.8], ['a', 'b']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
+ result = result.sort_index()
+ assert result.index.is_lexsorted()
+
+ tm.assert_frame_equal(result, expected)
+
+ concatted = pd.concat([df, df], keys=[0.8, 0.5])
+ result = concatted.sort_index()
+
+ # this will be monotonic, but not lexsorted!
+ assert not result.index.is_lexsorted()
+ assert result.index.is_monotonic
+
+ tm.assert_frame_equal(result, expected)
+
+ # 14015
+ df = DataFrame([[1, 2], [6, 7]],
+ columns=MultiIndex.from_tuples(
+ [(0, '20160811 12:00:00'),
+ (0, '20160809 12:00:00')],
+ names=['l1', 'Date']))
+
+ df.columns.set_levels(pd.to_datetime(df.columns.levels[1]),
+ level=1,
+ inplace=True)
+ assert not df.columns.is_lexsorted()
+ assert not df.columns.is_monotonic
+ result = df.sort_index(axis=1)
+ assert result.columns.is_lexsorted()
+ assert result.columns.is_monotonic
+ result = df.sort_index(axis=1, level=1)
+ assert result.columns.is_lexsorted()
+ assert result.columns.is_monotonic
+
+ def test_sort_index_and_reconstruction_doc_example(self):
+ # doc example
+ df = DataFrame({'value': [1, 2, 3, 4]},
+ index=MultiIndex(
+ levels=[['a', 'b'], ['bb', 'aa']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
+ assert df.index.is_lexsorted()
+ assert not df.index.is_monotonic
+
+ # sort it
+ expected = DataFrame({'value': [2, 1, 4, 3]},
+ index=MultiIndex(
+ levels=[['a', 'b'], ['aa', 'bb']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
+ result = df.sort_index()
+ assert not result.index.is_lexsorted()
+ assert result.index.is_monotonic
+
+ tm.assert_frame_equal(result, expected)
+
+ # reconstruct
+ result = df.sort_index().copy()
+ result.index = result.index._reconstruct(sort=True)
+ assert result.index.is_lexsorted()
+ assert result.index.is_monotonic
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_sort_index_reorder_on_ops(self):
+ # 15687
+ df = pd.DataFrame(
+ np.random.randn(8, 2),
+ index=MultiIndex.from_product(
+ [['a', 'b'],
+ ['big', 'small'],
+ ['red', 'blu']],
+ names=['letter', 'size', 'color']),
+ columns=['near', 'far'])
+ df = df.sort_index()
+
+ def my_func(group):
+ group.index = ['newz', 'newa']
+ return group
+
+ result = df.groupby(level=['letter', 'size']).apply(
+ my_func).sort_index()
+ expected = MultiIndex.from_product(
+ [['a', 'b'],
+ ['big', 'small'],
+ ['newa', 'newz']],
+ names=['letter', 'size', None])
+
+ tm.assert_index_equal(result.index, expected)
diff --git a/pandas/tests/tools/test_hashing.py b/pandas/tests/tools/test_hashing.py
index 9bed0d428bc41..17a1fb1a7d525 100644
--- a/pandas/tests/tools/test_hashing.py
+++ b/pandas/tests/tools/test_hashing.py
@@ -87,6 +87,35 @@ def test_multiindex_unique(self):
result = hash_pandas_object(mi)
self.assertTrue(result.is_unique)
+ def test_multiindex_objects(self):
+ mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
+ labels=[[0, 1, 0, 2], [2, 0, 0, 1]],
+ names=['col1', 'col2'])
+ recons = mi._reconstruct(sort=True)
+
+ # these are equal
+ assert mi.equals(recons)
+ assert Index(mi.values).equals(Index(recons.values))
+
+ # _hashed_values and hash_pandas_object(..., index=False)
+ # equivalency
+ expected = hash_pandas_object(
+ mi, index=False).values
+ result = mi._hashed_values
+ tm.assert_numpy_array_equal(result, expected)
+
+ expected = hash_pandas_object(
+ recons, index=False).values
+ result = recons._hashed_values
+ tm.assert_numpy_array_equal(result, expected)
+
+ expected = mi._hashed_values
+ result = recons._hashed_values
+
+ # values should match, but in different order
+ tm.assert_numpy_array_equal(np.sort(result),
+ np.sort(expected))
+
def test_hash_pandas_object(self):
for obj in [Series([1, 2, 3]),
diff --git a/pandas/tests/tools/test_pivot.py b/pandas/tests/tools/test_pivot.py
index 4502f232c6d9c..c8dfaf5e29bc6 100644
--- a/pandas/tests/tools/test_pivot.py
+++ b/pandas/tests/tools/test_pivot.py
@@ -2,6 +2,7 @@
import numpy as np
+from collections import OrderedDict
import pandas as pd
from pandas import (DataFrame, Series, Index, MultiIndex,
Grouper, date_range, concat)
@@ -513,7 +514,7 @@ def test_pivot_columns_lexsorted(self):
self.assertTrue(pivoted.columns.is_monotonic)
def test_pivot_complex_aggfunc(self):
- f = {'D': ['std'], 'E': ['sum']}
+ f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
| on top of #15694
xref #2770
Here's an example of what we could do with this
```
In [1]: df = pd.DataFrame({'value': [1, 2, 3, 4]}, index=pd.MultiIndex(
...: levels=[['a', 'b'], ['bb', 'aa']],
...: labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
In [2]: df
Out[2]:
value
a bb 1
aa 2
b bb 3
aa 4
In [14]: df.index.is_lexsorted()
Out[14]: True
In [15]: df.index.is_monotonic
Out[15]: False
```
sorting makes this monotonic & usually lexsorted (but not always)
```
In [3]: df2 = df.sort_index()
In [4]: df2
Out[4]:
value
a aa 2
bb 1
b aa 4
bb 3
In [12]: df2.index.is_lexsorted()
Out[12]: False
In [13]: df2.index.is_monotonic
Out[13]: True
```
If we expose a method ``.remove_unused_labels()`` (or even just do this under the hood on certain operations.
```
In [5]: df3 = df2.copy()
In [6]: df3.index._reconstruct(sort=True)
Out[6]:
MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
In [7]: df3.index = df3.index._reconstruct(sort=True)
In [8]: df3
Out[8]:
value
a aa 2
bb 1
b aa 4
bb 3
In [9]: df3.index.is_lexsorted()
Out[9]: True
In [11]: df3.index.is_monotonic
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/15700 | 2017-03-16T16:31:28Z | 2017-03-22T22:32:24Z | null | 2017-03-22T22:33:07Z |
CI: remove dev-scipy from testing on numpy-dev build as really old sheels | diff --git a/ci/requirements-3.5_NUMPY_DEV.build.sh b/ci/requirements-3.5_NUMPY_DEV.build.sh
index 91fa15491bbf7..b6c8a477e6f5e 100644
--- a/ci/requirements-3.5_NUMPY_DEV.build.sh
+++ b/ci/requirements-3.5_NUMPY_DEV.build.sh
@@ -8,6 +8,6 @@ echo "install numpy master wheel"
pip uninstall numpy -y
# install numpy wheel from master
-pip install --pre --upgrade --no-index --timeout=60 --trusted-host travis-dev-wheels.scipy.org -f http://travis-dev-wheels.scipy.org/ numpy scipy
+pip install --pre --upgrade --no-index --timeout=60 --trusted-host travis-dev-wheels.scipy.org -f http://travis-dev-wheels.scipy.org/ numpy
true
| closes #15696 | https://api.github.com/repos/pandas-dev/pandas/pulls/15699 | 2017-03-16T15:58:52Z | 2017-03-16T16:20:37Z | 2017-03-16T16:20:37Z | 2017-03-16T16:20:38Z |
BUG: DataFrame.sort_index broken if not both lexsorted and monotonic in levels | diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 6e9ef4b10273c..dfe3f0ef87c11 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -292,7 +292,10 @@ def setup(self):
self.rng3 = date_range(start='1/1/2000', periods=1500000, freq='S')
self.ts3 = Series(1, index=self.rng3)
- def time_sort_index(self):
+ def time_sort_index_monotonic(self):
+ self.ts2.sort_index()
+
+ def time_sort_index_non_monotonic(self):
self.ts.sort_index()
def time_timeseries_slice_minutely(self):
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index f380070ddac79..16ded4083b588 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -136,7 +136,7 @@ can find yourself working with hierarchically-indexed data without creating a
may wish to generate your own ``MultiIndex`` when preparing the data set.
Note that how the index is displayed by be controlled using the
-``multi_sparse`` option in ``pandas.set_printoptions``:
+``multi_sparse`` option in ``pandas.set_options()``:
.. ipython:: python
@@ -175,35 +175,40 @@ completely analogous way to selecting a column in a regular DataFrame:
See :ref:`Cross-section with hierarchical index <advanced.xs>` for how to select
on a deeper level.
-.. note::
+.. _advanced.shown_levels:
+
+Defined Levels
+~~~~~~~~~~~~~~
+
+The repr of a ``MultiIndex`` shows ALL the defined levels of an index, even
+if the they are not actually used. When slicing an index, you may notice this.
+For example:
- The repr of a ``MultiIndex`` shows ALL the defined levels of an index, even
- if the they are not actually used. When slicing an index, you may notice this.
- For example:
+.. ipython:: python
- .. ipython:: python
+ # original multi-index
+ df.columns
- # original multi-index
- df.columns
+ # sliced
+ df[['foo','qux']].columns
- # sliced
- df[['foo','qux']].columns
+This is done to avoid a recomputation of the levels in order to make slicing
+highly performant. If you want to see the actual used levels.
- This is done to avoid a recomputation of the levels in order to make slicing
- highly performant. If you want to see the actual used levels.
+.. ipython:: python
- .. ipython:: python
+ df[['foo','qux']].columns.values
- df[['foo','qux']].columns.values
+ # for a specific level
+ df[['foo','qux']].columns.get_level_values(0)
- # for a specific level
- df[['foo','qux']].columns.get_level_values(0)
+To reconstruct the multiindex with only the used levels
- To reconstruct the multiindex with only the used levels
+.. versionadded:: 0.20.0
- .. ipython:: python
+.. ipython:: python
- pd.MultiIndex.from_tuples(df[['foo','qux']].columns.values)
+ df[['foo','qux']].columns.remove_unused_levels()
Data alignment and using ``reindex``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -288,7 +293,7 @@ As usual, **both sides** of the slicers are included as this is label indexing.
.. code-block:: python
- df.loc[(slice('A1','A3'),.....),:]
+ df.loc[(slice('A1','A3'),.....), :]
rather than this:
@@ -317,43 +322,43 @@ Basic multi-index slicing using slices, lists, and labels.
.. ipython:: python
- dfmi.loc[(slice('A1','A3'),slice(None), ['C1','C3']),:]
+ dfmi.loc[(slice('A1','A3'), slice(None), ['C1', 'C3']), :]
You can use a ``pd.IndexSlice`` to have a more natural syntax using ``:`` rather than using ``slice(None)``
.. ipython:: python
idx = pd.IndexSlice
- dfmi.loc[idx[:,:,['C1','C3']],idx[:,'foo']]
+ dfmi.loc[idx[:, :, ['C1', 'C3']], idx[:, 'foo']]
It is possible to perform quite complicated selections using this method on multiple
axes at the same time.
.. ipython:: python
- dfmi.loc['A1',(slice(None),'foo')]
- dfmi.loc[idx[:,:,['C1','C3']],idx[:,'foo']]
+ dfmi.loc['A1', (slice(None), 'foo')]
+ dfmi.loc[idx[:, :, ['C1', 'C3']], idx[:, 'foo']]
Using a boolean indexer you can provide selection related to the *values*.
.. ipython:: python
- mask = dfmi[('a','foo')]>200
- dfmi.loc[idx[mask,:,['C1','C3']],idx[:,'foo']]
+ mask = dfmi[('a', 'foo')] > 200
+ dfmi.loc[idx[mask, :, ['C1', 'C3']], idx[:, 'foo']]
You can also specify the ``axis`` argument to ``.loc`` to interpret the passed
slicers on a single axis.
.. ipython:: python
- dfmi.loc(axis=0)[:,:,['C1','C3']]
+ dfmi.loc(axis=0)[:, :, ['C1', 'C3']]
Furthermore you can *set* the values using these methods
.. ipython:: python
df2 = dfmi.copy()
- df2.loc(axis=0)[:,:,['C1','C3']] = -10
+ df2.loc(axis=0)[:, :, ['C1', 'C3']] = -10
df2
You can use a right-hand-side of an alignable object as well.
@@ -361,7 +366,7 @@ You can use a right-hand-side of an alignable object as well.
.. ipython:: python
df2 = dfmi.copy()
- df2.loc[idx[:,:,['C1','C3']],:] = df2*1000
+ df2.loc[idx[:, :, ['C1', 'C3']], :] = df2 * 1000
df2
.. _advanced.xs:
diff --git a/doc/source/api.rst b/doc/source/api.rst
index 24bad7d515305..336b0b9b14c6c 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1432,6 +1432,7 @@ MultiIndex Components
MultiIndex.droplevel
MultiIndex.swaplevel
MultiIndex.reorder_levels
+ MultiIndex.remove_unused_levels
.. _api.datetimeindex:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index cb9e2496757ef..21b259e7663ba 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -366,6 +366,8 @@ Other Enhancements
- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
- ``pandas.io.json.json_normalize()`` has gained a ``sep`` option that accepts ``str`` to separate joined fields; the default is ".", which is backward compatible. (:issue:`14883`)
- ``pd.read_csv()`` will now raise a ``csv.Error`` error whenever an end-of-file character is encountered in the middle of a data row (:issue:`15913`)
+- A new function has been added to a ``MultiIndex`` to facilitate :ref:`Removing Unused Levels <advanced.shown_levels>`. (:issue:`15694`)
+- :func:`MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels <advanced.shown_levels>`. (:issue:`15694`)
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
@@ -714,6 +716,72 @@ If indicated, a deprecation warning will be issued if you reference that module.
"pandas._hash", "pandas.tools.libhash", ""
"pandas._window", "pandas.core.libwindow", ""
+.. _whatsnew_0200.api_breaking.sort_index:
+
+DataFrame.sort_index changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In certain cases, calling ``.sort_index()`` on a MultiIndexed DataFrame would return the *same* DataFrame without seeming to sort.
+This would happen with a ``lexsorted``, but non-monotonic levels. (:issue:`15622`, :issue:`15687`, :issue:`14015`, :issue:`13431`)
+
+This is UNCHANGED between versions, but showing for illustration purposes:
+
+.. ipython:: python
+
+ df = DataFrame(np.arange(6), columns=['value'], index=MultiIndex.from_product([list('BA'), range(3)]))
+ df
+
+.. ipython:: python
+
+ df.index.is_lexsorted()
+ df.index.is_monotonic
+
+Sorting works as expected
+
+.. ipython:: python
+
+ df.sort_index()
+
+.. ipython:: python
+
+ df.sort_index().index.is_lexsorted()
+ df.sort_index().index.is_monotonic
+
+However, this example, which has a non-monotonic 2nd level,
+doesn't behave as desired.
+
+.. ipython:: python
+ df = pd.DataFrame(
+ {'value': [1, 2, 3, 4]},
+ index=pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
+
+Previous Behavior:
+
+.. ipython:: python
+
+ In [11]: df.sort_index()
+ Out[11]:
+ value
+ a bb 1
+ aa 2
+ b bb 3
+ aa 4
+
+ In [14]: df.sort_index().index.is_lexsorted()
+ Out[14]: True
+
+ In [15]: df.sort_index().index.is_monotonic
+ Out[15]: False
+
+New Behavior:
+
+.. ipython:: python
+
+ df.sort_index()
+ df.sort_index().index.is_lexsorted()
+ df.sort_index().index.is_monotonic
+
.. _whatsnew_0200.api_breaking.groupby_describe:
@@ -965,7 +1033,7 @@ Performance Improvements
- Improve performance of ``pd.core.groupby.GroupBy.apply`` when the applied
function used the ``.name`` attribute of the group DataFrame (:issue:`15062`).
- Improved performance of ``iloc`` indexing with a list or array (:issue:`15504`).
-
+- Improved performance of ``Series.sort_index()`` with a monotonic index (:issue:`15694`)
.. _whatsnew_0200.bug_fixes:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f6199be2d1fc9..c8c21b0c5fd7d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3322,6 +3322,10 @@ def trans(v):
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
+
+ # TODO: this can be combined with Series.sort_index impl as
+ # almost identical
+
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
@@ -3335,8 +3339,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
- # sort by the index
- if level is not None:
+ if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
@@ -3346,17 +3349,14 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
- if not labels.is_lexsorted():
- labels = MultiIndex.from_tuples(labels.values)
-
+ labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels.labels, orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
- # GH11080 - Check monotonic-ness before sort an index
- # if monotonic (already sorted), return None or copy() according
- # to 'inplace'
+ # Check monotonic-ness before sort an index
+ # GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
@@ -3367,8 +3367,9 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
+ baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
- axis=self._get_block_manager_axis(axis),
+ axis=baxis,
convert=False, verify=False)
if inplace:
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index fe764a099bb63..add2987b8f452 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1882,6 +1882,13 @@ def get_group_levels(self):
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
+ def _is_builtin_func(self, arg):
+ """
+ if we define an builtin function for this argument, return it,
+ otherwise return the arg
+ """
+ return SelectionMixin._builtin_table.get(arg, arg)
+
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
@@ -2107,7 +2114,7 @@ def _aggregate_series_fast(self, obj, func):
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
- obj = obj.take(indexer, convert=False)
+ obj = obj.take(indexer, convert=False).to_dense()
group_index = algorithms.take_nd(
group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index c7e06d63fbda9..b03c3d77928c7 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -22,8 +22,8 @@
from pandas.sparse.libsparse import IntIndex
from pandas.core.categorical import Categorical, _factorize_from_iterable
-from pandas.core.sorting import (get_group_index, compress_group_index,
- decons_obs_group_ids)
+from pandas.core.sorting import (get_group_index, get_compressed_ids,
+ compress_group_index, decons_obs_group_ids)
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
@@ -496,11 +496,6 @@ def _unstack_frame(obj, level, fill_value=None):
return unstacker.get_result()
-def get_compressed_ids(labels, sizes):
- ids = get_group_index(labels, sizes, sort=True, xnull=False)
- return compress_group_index(ids, sort=True)
-
-
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d6a1a9d98faf4..760abc20351cf 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1751,17 +1751,31 @@ def _try_kind_sort(arr):
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
+ # TODO: this can be combined with DataFrame.sort_index impl as
+ # almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
index = self.index
- if level is not None:
+
+ if level:
new_index, indexer = index.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
- indexer = lexsort_indexer(index.labels, orders=ascending)
+ labels = index._sort_levels_monotonic()
+ indexer = lexsort_indexer(labels.labels, orders=ascending)
else:
from pandas.core.sorting import nargsort
+
+ # Check monotonic-ness before sort an index
+ # GH11080
+ if ((ascending and index.is_monotonic_increasing) or
+ (not ascending and index.is_monotonic_decreasing)):
+ if inplace:
+ return
+ else:
+ return self.copy()
+
indexer = nargsort(index, kind=kind, ascending=ascending,
na_position=na_position)
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 205d0d94d2ec3..e56a4f50de134 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -93,6 +93,27 @@ def maybe_lift(lab, size): # pormote nan values
return loop(list(labels), list(shape))
+def get_compressed_ids(labels, sizes):
+ """
+
+ Group_index is offsets into cartesian product of all possible labels. This
+ space can be huge, so this function compresses it, by computing offsets
+ (comp_ids) into the list of unique labels (obs_group_ids).
+
+ Parameters
+ ----------
+ labels : list of label arrays
+ sizes : list of size of the levels
+
+ Returns
+ -------
+ tuple of (comp_ids, obs_group_ids)
+
+ """
+ ids = get_group_index(labels, sizes, sort=True, xnull=False)
+ return compress_group_index(ids, sort=True)
+
+
def is_int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index f12b10ae682fa..96e0effbd7608 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -1171,9 +1171,142 @@ def from_product(cls, iterables, sortorder=None, names=None):
labels, levels = _factorize_from_iterables(iterables)
labels = cartesian_product(labels)
+ return MultiIndex(levels, labels, sortorder=sortorder, names=names)
- return MultiIndex(levels=levels, labels=labels, sortorder=sortorder,
- names=names)
+ def _sort_levels_monotonic(self):
+ """
+ .. versionadded:: 0.20.0
+
+ This is an *internal* function.
+
+ create a new MultiIndex from the current to monotonically sorted
+ items IN the levels. This does not actually make the entire MultiIndex
+ monotonic, JUST the levels.
+
+ The resulting MultiIndex will have the same outward
+ appearance, meaning the same .values and ordering. It will also
+ be .equals() to the original.
+
+ Returns
+ -------
+ MultiIndex
+
+ Examples
+ --------
+
+ >>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
+ >>> i
+ MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
+
+ >>> i.sort_monotonic()
+ MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
+ labels=[[0, 0, 1, 1], [1, 0, 1, 0]])
+
+ """
+
+ if self.is_lexsorted() and self.is_monotonic:
+ return self
+
+ new_levels = []
+ new_labels = []
+
+ for lev, lab in zip(self.levels, self.labels):
+
+ if lev.is_monotonic:
+ new_levels.append(lev)
+ new_labels.append(lab)
+ continue
+
+ # indexer to reorder the levels
+ indexer = lev.argsort()
+ lev = lev.take(indexer)
+
+ # indexer to reorder the labels
+ ri = lib.get_reverse_indexer(indexer, len(indexer))
+ lab = algos.take_1d(ri, lab)
+
+ new_levels.append(lev)
+ new_labels.append(lab)
+
+ return MultiIndex(new_levels, new_labels,
+ names=self.names, sortorder=self.sortorder,
+ verify_integrity=False)
+
+ def remove_unused_levels(self):
+ """
+ create a new MultiIndex from the current that removing
+ unused levels, meaning that they are not expressed in the labels
+
+ The resulting MultiIndex will have the same outward
+ appearance, meaning the same .values and ordering. It will also
+ be .equals() to the original.
+
+ .. versionadded:: 0.20.0
+
+ Returns
+ -------
+ MultiIndex
+
+ Examples
+ --------
+ >>> i = pd.MultiIndex.from_product([range(2), list('ab')])
+ MultiIndex(levels=[[0, 1], ['a', 'b']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
+
+
+ >>> i[2:]
+ MultiIndex(levels=[[0, 1], ['a', 'b']],
+ labels=[[1, 1], [0, 1]])
+
+ # the 0 from the first level is not represented
+ # and can be removed
+ >>> i[2:].remove_unused_levels()
+ MultiIndex(levels=[[1], ['a', 'b']],
+ labels=[[0, 0], [0, 1]])
+
+ """
+
+ new_levels = []
+ new_labels = []
+
+ changed = np.ones(self.nlevels, dtype=bool)
+ for i, (lev, lab) in enumerate(zip(self.levels, self.labels)):
+
+ uniques = algos.unique(lab)
+
+ # nothing unused
+ if len(uniques) == len(lev):
+ new_levels.append(lev)
+ new_labels.append(lab)
+ changed[i] = False
+ continue
+
+ # set difference, then reverse sort
+ diff = Index(np.arange(len(lev))).difference(uniques)
+ unused = diff.sort_values(ascending=False)
+
+ # new levels are simple
+ lev = lev.take(uniques)
+
+ # new labels, we remove the unsued
+ # by decrementing the labels for that value
+ # prob a better way
+ for u in unused:
+
+ lab = np.where(lab > u, lab - 1, lab)
+
+ new_levels.append(lev)
+ new_labels.append(lab)
+
+ # nothing changed
+ if not changed.any():
+ return self
+
+ return MultiIndex(new_levels, new_labels,
+ names=self.names, sortorder=self.sortorder,
+ verify_integrity=False)
@property
def nlevels(self):
@@ -1744,9 +1877,10 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
- raise KeyError('Key length (%d) was greater than MultiIndex'
- ' lexsort depth (%d)' %
- (len(tup), self.lexsort_depth))
+ raise UnsortedIndexError(
+ 'Key length (%d) was greater than MultiIndex'
+ ' lexsort depth (%d)' %
+ (len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 470526043234f..e93319a30d5d8 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -2411,6 +2411,80 @@ def test_is_monotonic(self):
self.assertFalse(i.is_monotonic)
+ def test_reconstruct_sort(self):
+
+ # starts off lexsorted & monotonic
+ mi = MultiIndex.from_arrays([
+ ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
+ ])
+ assert mi.is_lexsorted()
+ assert mi.is_monotonic
+
+ recons = mi._sort_levels_monotonic()
+ assert recons.is_lexsorted()
+ assert recons.is_monotonic
+ assert mi is recons
+
+ assert mi.equals(recons)
+ assert Index(mi.values).equals(Index(recons.values))
+
+ # cannot convert to lexsorted
+ mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'),
+ ('x', 'b'), ('y', 'a'), ('z', 'b')],
+ names=['one', 'two'])
+ assert not mi.is_lexsorted()
+ assert not mi.is_monotonic
+
+ recons = mi._sort_levels_monotonic()
+ assert not recons.is_lexsorted()
+ assert not recons.is_monotonic
+
+ assert mi.equals(recons)
+ assert Index(mi.values).equals(Index(recons.values))
+
+ # cannot convert to lexsorted
+ mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
+ labels=[[0, 1, 0, 2], [2, 0, 0, 1]],
+ names=['col1', 'col2'])
+ assert not mi.is_lexsorted()
+ assert not mi.is_monotonic
+
+ recons = mi._sort_levels_monotonic()
+ assert not recons.is_lexsorted()
+ assert not recons.is_monotonic
+
+ assert mi.equals(recons)
+ assert Index(mi.values).equals(Index(recons.values))
+
+ def test_reconstruct_remove_unused(self):
+ # xref to GH 2770
+ df = DataFrame([['deleteMe', 1, 9],
+ ['keepMe', 2, 9],
+ ['keepMeToo', 3, 9]],
+ columns=['first', 'second', 'third'])
+ df2 = df.set_index(['first', 'second'], drop=False)
+ df2 = df2[df2['first'] != 'deleteMe']
+
+ # removed levels are there
+ expected = MultiIndex(levels=[['deleteMe', 'keepMe', 'keepMeToo'],
+ [1, 2, 3]],
+ labels=[[1, 2], [1, 2]],
+ names=['first', 'second'])
+ result = df2.index
+ tm.assert_index_equal(result, expected)
+
+ expected = MultiIndex(levels=[['keepMe', 'keepMeToo'],
+ [2, 3]],
+ labels=[[0, 1], [0, 1]],
+ names=['first', 'second'])
+ result = df2.index.remove_unused_levels()
+ tm.assert_index_equal(result, expected)
+
+ # idempotent
+ result2 = result.remove_unused_levels()
+ tm.assert_index_equal(result2, expected)
+ assert result2 is result
+
def test_isin(self):
values = [('foo', 2), ('bar', 3), ('quux', 4)]
@@ -2699,6 +2773,30 @@ def test_unsortedindex(self):
with assertRaises(KeyError):
df.loc(axis=0)['q', :]
+ def test_unsortedindex_doc_examples(self):
+ # http://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex # noqa
+ dfm = DataFrame({'jim': [0, 0, 1, 1],
+ 'joe': ['x', 'x', 'z', 'y'],
+ 'jolie': np.random.rand(4)})
+
+ dfm = dfm.set_index(['jim', 'joe'])
+ with tm.assert_produces_warning(PerformanceWarning):
+ dfm.loc[(1, 'z')]
+
+ with pytest.raises(UnsortedIndexError):
+ dfm.loc[(0, 'y'):(1, 'z')]
+
+ assert not dfm.index.is_lexsorted()
+ assert dfm.index.lexsort_depth == 1
+
+ # sort it
+ dfm = dfm.sort_index()
+ dfm.loc[(1, 'z')]
+ dfm.loc[(0, 'y'):(1, 'z')]
+
+ assert dfm.index.is_lexsorted()
+ assert dfm.index.lexsort_depth == 2
+
def test_tuples_with_name_string(self):
# GH 15110 and GH 14848
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 732142f1bce9a..a682e8643d251 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1526,7 +1526,7 @@ def test_unstack(self):
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values},
index=exp_index).sort_index(level=0)
- unstacked = s.unstack(0)
+ unstacked = s.unstack(0).sort_index()
assert_frame_equal(unstacked, expected)
# GH5873
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 5584c1ac6a239..914d26fcafb4a 100755
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -11,6 +11,7 @@
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull, Timestamp
+from pandas.core.common import UnsortedIndexError
from pandas.types.common import is_float_dtype, is_integer_dtype
import pandas.core.common as com
import pandas.util.testing as tm
@@ -2438,6 +2439,30 @@ def test_getitem_slice_not_sorted(self):
expected = df.reindex(columns=df.columns[:3])
tm.assert_frame_equal(result, expected)
+ def test_frame_getitem_not_sorted2(self):
+ # 13431
+ df = DataFrame({'col1': ['b', 'd', 'b', 'a'],
+ 'col2': [3, 1, 1, 2],
+ 'data': ['one', 'two', 'three', 'four']})
+
+ df2 = df.set_index(['col1', 'col2'])
+ df2_original = df2.copy()
+
+ df2.index.set_levels(['b', 'd', 'a'], level='col1', inplace=True)
+ df2.index.set_labels([0, 1, 0, 2], level='col1', inplace=True)
+ assert not df2.index.is_lexsorted()
+ assert not df2.index.is_monotonic
+
+ assert df2_original.index.equals(df2.index)
+ expected = df2.sort_index()
+ assert not expected.index.is_lexsorted()
+ assert expected.index.is_monotonic
+
+ result = df2.sort_index(level=0)
+ assert not result.index.is_lexsorted()
+ assert result.index.is_monotonic
+ tm.assert_frame_equal(result, expected)
+
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
@@ -2474,3 +2499,137 @@ def test_series_getitem_not_sorted(self):
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
+
+ def test_sort_index_and_reconstruction(self):
+
+ # 15622
+ # lexsortedness should be identical
+ # across MultiIndex consruction methods
+
+ df = DataFrame([[1, 1], [2, 2]], index=list('ab'))
+ expected = DataFrame([[1, 1], [2, 2], [1, 1], [2, 2]],
+ index=MultiIndex.from_tuples([(0.5, 'a'),
+ (0.5, 'b'),
+ (0.8, 'a'),
+ (0.8, 'b')]))
+ assert expected.index.is_lexsorted()
+
+ result = DataFrame(
+ [[1, 1], [2, 2], [1, 1], [2, 2]],
+ index=MultiIndex.from_product([[0.5, 0.8], list('ab')]))
+ result = result.sort_index()
+ assert result.index.is_lexsorted()
+ assert result.index.is_monotonic
+
+ tm.assert_frame_equal(result, expected)
+
+ result = DataFrame(
+ [[1, 1], [2, 2], [1, 1], [2, 2]],
+ index=MultiIndex(levels=[[0.5, 0.8], ['a', 'b']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
+ result = result.sort_index()
+ assert result.index.is_lexsorted()
+
+ tm.assert_frame_equal(result, expected)
+
+ concatted = pd.concat([df, df], keys=[0.8, 0.5])
+ result = concatted.sort_index()
+
+ # this will be monotonic, but not lexsorted!
+ assert not result.index.is_lexsorted()
+ assert result.index.is_monotonic
+
+ tm.assert_frame_equal(result, expected)
+
+ # 14015
+ df = DataFrame([[1, 2], [6, 7]],
+ columns=MultiIndex.from_tuples(
+ [(0, '20160811 12:00:00'),
+ (0, '20160809 12:00:00')],
+ names=['l1', 'Date']))
+
+ df.columns.set_levels(pd.to_datetime(df.columns.levels[1]),
+ level=1,
+ inplace=True)
+ assert not df.columns.is_lexsorted()
+ assert not df.columns.is_monotonic
+ result = df.sort_index(axis=1)
+ assert result.columns.is_lexsorted()
+ assert result.columns.is_monotonic
+ result = df.sort_index(axis=1, level=1)
+ assert result.columns.is_lexsorted()
+ assert result.columns.is_monotonic
+
+ def test_sort_index_and_reconstruction_doc_example(self):
+ # doc example
+ df = DataFrame({'value': [1, 2, 3, 4]},
+ index=MultiIndex(
+ levels=[['a', 'b'], ['bb', 'aa']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
+ assert df.index.is_lexsorted()
+ assert not df.index.is_monotonic
+
+ # sort it
+ expected = DataFrame({'value': [2, 1, 4, 3]},
+ index=MultiIndex(
+ levels=[['a', 'b'], ['aa', 'bb']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
+ result = df.sort_index()
+ assert not result.index.is_lexsorted()
+ assert result.index.is_monotonic
+
+ tm.assert_frame_equal(result, expected)
+
+ # reconstruct
+ result = df.sort_index().copy()
+ result.index = result.index._sort_levels_monotonic()
+ assert result.index.is_lexsorted()
+ assert result.index.is_monotonic
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_sort_index_reorder_on_ops(self):
+ # 15687
+ df = pd.DataFrame(
+ np.random.randn(8, 2),
+ index=MultiIndex.from_product(
+ [['a', 'b'],
+ ['big', 'small'],
+ ['red', 'blu']],
+ names=['letter', 'size', 'color']),
+ columns=['near', 'far'])
+ df = df.sort_index()
+
+ def my_func(group):
+ group.index = ['newz', 'newa']
+ return group
+
+ result = df.groupby(level=['letter', 'size']).apply(
+ my_func).sort_index()
+ expected = MultiIndex.from_product(
+ [['a', 'b'],
+ ['big', 'small'],
+ ['newa', 'newz']],
+ names=['letter', 'size', None])
+
+ tm.assert_index_equal(result.index, expected)
+
+ def test_sort_non_lexsorted(self):
+ # degenerate case where we sort but don't
+ # have a satisfying result :<
+
+ idx = MultiIndex([['A', 'B', 'C'],
+ ['c', 'b', 'a']],
+ [[0, 1, 2, 0, 1, 2],
+ [0, 2, 1, 1, 0, 2]])
+
+ df = DataFrame({'col': range(len(idx))}, index=idx)
+ assert df.index.is_lexsorted() is False
+ assert df.index.is_monotonic is False
+
+ result = df.sort_index()
+ assert result.index.is_lexsorted() is False
+ assert result.index.is_monotonic is True
+
+ with pytest.raises(UnsortedIndexError):
+ result.loc[pd.IndexSlice['B':'C', 'a':'c'], :]
diff --git a/pandas/tests/tools/test_hashing.py b/pandas/tests/tools/test_hashing.py
index 9bed0d428bc41..864b5018abc75 100644
--- a/pandas/tests/tools/test_hashing.py
+++ b/pandas/tests/tools/test_hashing.py
@@ -87,6 +87,35 @@ def test_multiindex_unique(self):
result = hash_pandas_object(mi)
self.assertTrue(result.is_unique)
+ def test_multiindex_objects(self):
+ mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
+ labels=[[0, 1, 0, 2], [2, 0, 0, 1]],
+ names=['col1', 'col2'])
+ recons = mi._sort_levels_monotonic()
+
+ # these are equal
+ assert mi.equals(recons)
+ assert Index(mi.values).equals(Index(recons.values))
+
+ # _hashed_values and hash_pandas_object(..., index=False)
+ # equivalency
+ expected = hash_pandas_object(
+ mi, index=False).values
+ result = mi._hashed_values
+ tm.assert_numpy_array_equal(result, expected)
+
+ expected = hash_pandas_object(
+ recons, index=False).values
+ result = recons._hashed_values
+ tm.assert_numpy_array_equal(result, expected)
+
+ expected = mi._hashed_values
+ result = recons._hashed_values
+
+ # values should match, but in different order
+ tm.assert_numpy_array_equal(np.sort(result),
+ np.sort(expected))
+
def test_hash_pandas_object(self):
for obj in [Series([1, 2, 3]),
diff --git a/pandas/tests/tools/test_pivot.py b/pandas/tests/tools/test_pivot.py
index 4502f232c6d9c..c8dfaf5e29bc6 100644
--- a/pandas/tests/tools/test_pivot.py
+++ b/pandas/tests/tools/test_pivot.py
@@ -2,6 +2,7 @@
import numpy as np
+from collections import OrderedDict
import pandas as pd
from pandas import (DataFrame, Series, Index, MultiIndex,
Grouper, date_range, concat)
@@ -513,7 +514,7 @@ def test_pivot_columns_lexsorted(self):
self.assertTrue(pivoted.columns.is_monotonic)
def test_pivot_complex_aggfunc(self):
- f = {'D': ['std'], 'E': ['sum']}
+ f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
| closes #15622
closes #15687
closes #14015
closes #13431
nice bump on Series.sort_index for monotonic
```
before after ratio
[37e5f78b] [a6f352c0]
- 1.86ms 100.07μs 0.05 timeseries.TimeSeries.time_sort_index_monotonic
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/15694 | 2017-03-15T23:09:07Z | 2017-04-07T15:18:25Z | null | 2017-04-07T15:19:55Z |
ENH: use constant f32 eps, not np.finfo() during import | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 9c6f5d3e0596d..f0bfd6a86abc3 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -920,3 +920,4 @@ Bug Fixes
- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
- Bug in ``pd.read_msgpack`` which did not allow to load dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
+- Use of ``np.finfo()`` during `import pandas` removed to mitigate deadlock on Python GIL misuse (:issue:`14641`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 19b7771251da3..c80e8c34aa88f 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1852,7 +1852,7 @@ def _convert_key(self, key, is_setter=False):
# 32-bit floating point machine epsilon
-_eps = np.finfo('f4').eps
+_eps = 1.1920929e-07
def length_of_indexer(indexer, target=None):
| NumPy docs for `np.finfo()` say not to call it during import (at module scope).
It's a relatively expensive call, and it modifies the GIL state.
Now we just hard-code it, because it is always the value anyway.
This avoids touching the GIL at import, which helps avoid deadlocks in practice.
- [x] closes #14641
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15691 | 2017-03-15T12:52:54Z | 2017-03-15T16:05:50Z | null | 2017-03-15T16:17:44Z |
compatibility with scipy 0.19 | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 9c6f5d3e0596d..c3fd484d2bc9c 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -308,6 +308,7 @@ Other enhancements
- ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs <categorical.union>` for more information.
- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
- ``pd.DataFrame.to_latex`` and ``pd.DataFrame.to_string`` now allow optional header aliases. (:issue:`15536`)
+- ``pd.test()`` will now pass with SciPy 0.19.0. (:issue:`15662`)
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 6fda60c449f42..9c9f861451309 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -544,7 +544,8 @@ def _pop_args(win_type, arg_names, kwargs):
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
- return sig.get_window(win_type, window).astype(float)
+ # GH #15662. `False` makes symmetric window, rather than periodic.
+ return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, how=None, **kwargs):
"""
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 923ed2e7c3444..93c3ba78a0abf 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -19,6 +19,13 @@
from pandas.tests.frame.common import TestData, _check_mixed_float
+try:
+ import scipy
+ _is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
+except:
+ _is_scipy_ge_0190 = False
+
+
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
@@ -548,7 +555,7 @@ def test_interp_nan_idx(self):
df.interpolate(method='values')
def test_interp_various(self):
- tm.skip_if_no_package('scipy', max_version='0.19.0')
+ tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
@@ -561,8 +568,15 @@ def test_interp_various(self):
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
- expected.A.loc[3] = 2.81621174
- expected.A.loc[13] = 5.64146581
+ # GH #15662.
+ # new cubic and quadratic interpolation algorithms from scipy 0.19.0.
+ # previously `splmake` was used. See scipy/scipy#6710
+ if _is_scipy_ge_0190:
+ expected.A.loc[3] = 2.81547781
+ expected.A.loc[13] = 5.52964175
+ else:
+ expected.A.loc[3] = 2.81621174
+ expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
@@ -571,8 +585,12 @@ def test_interp_various(self):
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
- expected.A.loc[3] = 2.82533638
- expected.A.loc[13] = 6.02817974
+ if _is_scipy_ge_0190:
+ expected.A.loc[3] = 2.82150771
+ expected.A.loc[13] = 6.12648668
+ else:
+ expected.A.loc[3] = 2.82533638
+ expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
@@ -585,11 +603,6 @@ def test_interp_various(self):
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
- result = df.interpolate(method='quadratic')
- expected.A.loc[3] = 2.82533638
- expected.A.loc[13] = 6.02817974
- assert_frame_equal(result, expected)
-
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 9e997da517bf6..7174283494fe7 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -4,6 +4,7 @@
import pytz
from datetime import timedelta, datetime
+from distutils.version import LooseVersion
from numpy import nan
import numpy as np
import pandas as pd
@@ -17,6 +18,12 @@
from .common import TestData
+try:
+ import scipy
+ _is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
+except:
+ _is_scipy_ge_0190 = False
+
def _skip_if_no_pchip():
try:
@@ -827,7 +834,7 @@ def test_interp_quad(self):
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
- tm.skip_if_no_package('scipy', max_version='0.19.0')
+ tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
@@ -852,7 +859,13 @@ def test_interp_scipy_basic(self):
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
- expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
+ # GH #15662.
+ # new cubic and quadratic interpolation algorithms from scipy 0.19.0.
+ # previously `splmake` was used. See scipy/scipy#6710
+ if _is_scipy_ge_0190:
+ expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
+ else:
+ expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index b7164d31b2a5e..3f2973a9834ca 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -905,7 +905,7 @@ def test_cmov_window_na_min_periods(self):
def test_cmov_window_regular(self):
# GH 8238
- tm.skip_if_no_package('scipy', max_version='0.19.0')
+ tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
@@ -938,7 +938,7 @@ def test_cmov_window_regular(self):
def test_cmov_window_regular_linear_range(self):
# GH 8238
- tm.skip_if_no_package('scipy', max_version='0.19.0')
+ tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
@@ -955,7 +955,7 @@ def test_cmov_window_regular_linear_range(self):
def test_cmov_window_regular_missing_data(self):
# GH 8238
- tm.skip_if_no_package('scipy', max_version='0.19.0')
+ tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
@@ -988,7 +988,7 @@ def test_cmov_window_regular_missing_data(self):
def test_cmov_window_special(self):
# GH 8238
- tm.skip_if_no_package('scipy', max_version='0.19.0')
+ tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2.,
@@ -1015,7 +1015,7 @@ def test_cmov_window_special(self):
def test_cmov_window_special_linear_range(self):
# GH 8238
- tm.skip_if_no_package('scipy', max_version='0.19.0')
+ tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2.,
| fix #15662 | https://api.github.com/repos/pandas-dev/pandas/pulls/15689 | 2017-03-14T18:52:27Z | 2017-03-15T13:30:35Z | null | 2017-03-16T16:17:05Z |
BUG: Group-by numeric type-coercion with datetime | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 8a4f2f47b9853..ce5111e2725e2 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -886,3 +886,4 @@ Bug Fixes
- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
- Bug in ``pd.read_msgpack`` which did not allow to load dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
+- Bug in ``groupby.apply()`` coercing ``object`` series to numeric types, when not all values were numeric (:issue:`15680`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index a10be078a8f96..7a017ffae284c 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -10,6 +10,7 @@
zip, range, lzip,
callable, map
)
+
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.compat.numpy import _np_version_under1p8
@@ -3424,6 +3425,7 @@ def _decide_output_index(self, output, labels):
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
+ from pandas.tools.util import to_numeric
if len(keys) == 0:
return DataFrame(index=keys)
@@ -3566,7 +3568,8 @@ def first_non_None_value(values):
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if (so.ndim == 2 and so.dtypes.apply(is_datetimelike).any()):
- result = result._convert(numeric=True)
+ result = result.apply(
+ lambda x: to_numeric(x, errors='ignore'))
date_cols = self._selected_obj.select_dtypes(
include=['datetime', 'timedelta']).columns
date_cols = date_cols.intersection(result.columns)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index d7fa3beda0abf..c25974c94bfd1 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -4314,6 +4314,54 @@ def test_cummin_cummax(self):
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
+ def test_apply_numeric_coercion_when_datetime(self):
+ # In the past, group-by/apply operations have been over-eager
+ # in converting dtypes to numeric, in the presence of datetime
+ # columns. Various GH issues were filed, the reproductions
+ # for which are here.
+
+ # GH 15670
+ df = pd.DataFrame({'Number': [1, 2],
+ 'Date': ["2017-03-02"] * 2,
+ 'Str': ["foo", "inf"]})
+ expected = df.groupby(['Number']).apply(lambda x: x.iloc[0])
+ df.Date = pd.to_datetime(df.Date)
+ result = df.groupby(['Number']).apply(lambda x: x.iloc[0])
+ tm.assert_series_equal(result['Str'], expected['Str'])
+
+ # GH 15421
+ df = pd.DataFrame({'A': [10, 20, 30],
+ 'B': ['foo', '3', '4'],
+ 'T': [pd.Timestamp("12:31:22")] * 3})
+
+ def get_B(g):
+ return g.iloc[0][['B']]
+ result = df.groupby('A').apply(get_B)['B']
+ expected = df.B
+ expected.index = df.A
+ tm.assert_series_equal(result, expected)
+
+ # GH 14423
+ def predictions(tool):
+ out = pd.Series(index=['p1', 'p2', 'useTime'], dtype=object)
+ if 'step1' in list(tool.State):
+ out['p1'] = str(tool[tool.State == 'step1'].Machine.values[0])
+ if 'step2' in list(tool.State):
+ out['p2'] = str(tool[tool.State == 'step2'].Machine.values[0])
+ out['useTime'] = str(
+ tool[tool.State == 'step2'].oTime.values[0])
+ return out
+ df1 = pd.DataFrame({'Key': ['B', 'B', 'A', 'A'],
+ 'State': ['step1', 'step2', 'step1', 'step2'],
+ 'oTime': ['', '2016-09-19 05:24:33',
+ '', '2016-09-19 23:59:04'],
+ 'Machine': ['23', '36L', '36R', '36R']})
+ df2 = df1.copy()
+ df2.oTime = pd.to_datetime(df2.oTime)
+ expected = df1.groupby('Key').apply(predictions).p1
+ result = df2.groupby('Key').apply(predictions).p1
+ tm.assert_series_equal(expected, result)
+
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
| closes #14423
closes #15421
closes #15670
During a group-by/apply on a DataFrame, in the presence of one or more
DateTime-like columns, Pandas would incorrectly coerce the type of all
other columns to numeric. E.g. a String column would be coerced to
numeric, producing NaNs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15680 | 2017-03-14T10:57:12Z | 2017-03-16T12:08:59Z | null | 2017-03-16T12:08:59Z |
BUG: Allow multiple 'by' parameters in merge_asof() when DataFrames are indexed (#15676) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 8a4f2f47b9853..10106845b08a9 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -851,6 +851,7 @@ Bug Fixes
- Bug in the HTML display with with a ``MultiIndex`` and truncation (:issue:`14882`)
+- Bug in ``pd.merge_asof()`` where ``left_index`` or ``right_index`` caused a failure when multiple ``by`` was specified (:issue:`15676`)
- Bug in ``pd.merge_asof()`` where ``left_index``/``right_index`` together caused a failure when ``tolerance`` was specified (:issue:`15135`)
- Bug in ``DataFrame.pivot_table()`` where ``dropna=True`` would not drop all-NaN columns when the columns was a ``category`` dtype (:issue:`15193`)
diff --git a/pandas/tests/tools/test_merge_asof.py b/pandas/tests/tools/test_merge_asof.py
index cdff8f0349c15..c9460cc74c94a 100644
--- a/pandas/tests/tools/test_merge_asof.py
+++ b/pandas/tests/tools/test_merge_asof.py
@@ -368,6 +368,41 @@ def test_multiby_heterogeneous_types(self):
by=['ticker', 'exch'])
assert_frame_equal(result, expected)
+ def test_multiby_indexed(self):
+ # GH15676
+ left = pd.DataFrame([
+ [pd.to_datetime('20160602'), 1, 'a'],
+ [pd.to_datetime('20160602'), 2, 'a'],
+ [pd.to_datetime('20160603'), 1, 'b'],
+ [pd.to_datetime('20160603'), 2, 'b']],
+ columns=['time', 'k1', 'k2']).set_index('time')
+
+ right = pd.DataFrame([
+ [pd.to_datetime('20160502'), 1, 'a', 1.0],
+ [pd.to_datetime('20160502'), 2, 'a', 2.0],
+ [pd.to_datetime('20160503'), 1, 'b', 3.0],
+ [pd.to_datetime('20160503'), 2, 'b', 4.0]],
+ columns=['time', 'k1', 'k2', 'value']).set_index('time')
+
+ expected = pd.DataFrame([
+ [pd.to_datetime('20160602'), 1, 'a', 1.0],
+ [pd.to_datetime('20160602'), 2, 'a', 2.0],
+ [pd.to_datetime('20160603'), 1, 'b', 3.0],
+ [pd.to_datetime('20160603'), 2, 'b', 4.0]],
+ columns=['time', 'k1', 'k2', 'value']).set_index('time')
+
+ result = pd.merge_asof(left,
+ right,
+ left_index=True,
+ right_index=True,
+ by=['k1', 'k2'])
+
+ assert_frame_equal(expected, result)
+
+ with self.assertRaises(MergeError):
+ pd.merge_asof(left, right, left_index=True, right_index=True,
+ left_by=['k1', 'k2'], right_by=['k1'])
+
def test_basic2(self):
expected = self.read_data('asof2.csv')
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index d02f4c5b26c86..261884bba54bd 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1165,7 +1165,7 @@ def _validate_specification(self):
if self.left_by is not None and self.right_by is None:
raise MergeError('missing right_by')
- # add by to our key-list so we can have it in the
+ # add 'by' to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
@@ -1173,6 +1173,9 @@ def _validate_specification(self):
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
+ if len(self.left_by) != len(self.right_by):
+ raise MergeError('left_by and right_by must be same length')
+
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
@@ -1264,13 +1267,21 @@ def flip(xs):
# a "by" parameter requires special handling
if self.left_by is not None:
- if len(self.left_join_keys) > 2:
- # get tuple representation of values if more than one
- left_by_values = flip(self.left_join_keys[0:-1])
- right_by_values = flip(self.right_join_keys[0:-1])
+ # remove 'on' parameter from values if one existed
+ if self.left_index and self.right_index:
+ left_by_values = self.left_join_keys
+ right_by_values = self.right_join_keys
+ else:
+ left_by_values = self.left_join_keys[0:-1]
+ right_by_values = self.right_join_keys[0:-1]
+
+ # get tuple representation of values if more than one
+ if len(left_by_values) == 1:
+ left_by_values = left_by_values[0]
+ right_by_values = right_by_values[0]
else:
- left_by_values = self.left_join_keys[0]
- right_by_values = self.right_join_keys[0]
+ left_by_values = flip(left_by_values)
+ right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
| - [x] closes #15676
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15679 | 2017-03-14T02:28:02Z | 2017-03-14T14:06:40Z | null | 2017-03-14T14:11:13Z |
CLN: push key coercion to the indexes with Index._convert_list_indexer | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 546cbd8337e7e..19b7771251da3 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -7,7 +7,6 @@
from pandas.types.generic import ABCDataFrame, ABCPanel, ABCSeries
from pandas.types.common import (is_integer_dtype,
is_integer, is_float,
- is_categorical_dtype,
is_list_like,
is_sequence,
is_iterator,
@@ -1087,51 +1086,24 @@ def _getitem_iterable(self, key, axis=0):
inds, = key.nonzero()
return self.obj.take(inds, axis=axis, convert=False)
else:
- if isinstance(key, Index):
- keyarr = labels._convert_index_indexer(key)
- else:
- keyarr = _asarray_tuplesafe(key)
- keyarr = labels._convert_arr_indexer(keyarr)
-
- if is_categorical_dtype(labels):
- keyarr = labels._shallow_copy(keyarr)
-
- # have the index handle the indexer and possibly return
- # an indexer or raising
- indexer = labels._convert_list_indexer(keyarr, kind=self.name)
+ # Have the index compute an indexer or return None
+ # if it cannot handle
+ indexer, keyarr = labels._convert_listlike_indexer(
+ key, kind=self.name)
if indexer is not None:
return self.obj.take(indexer, axis=axis)
- # this is not the most robust, but...
- if (isinstance(labels, MultiIndex) and len(keyarr) and
- not isinstance(keyarr[0], tuple)):
- level = 0
- else:
- level = None
-
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
try:
- result = self.obj.reindex_axis(keyarr, axis=axis,
- level=level)
-
- # this is an error as we are trying to find
- # keys in a multi-index that don't exist
- if isinstance(labels, MultiIndex) and level is not None:
- if (hasattr(result, 'ndim') and
- not np.prod(result.shape) and len(keyarr)):
- raise KeyError("cannot index a multi-index axis "
- "with these keys")
-
- return result
-
+ return self.obj.reindex_axis(keyarr, axis=axis)
except AttributeError:
# Series
if axis != 0:
raise AssertionError('axis must be 0')
- return self.obj.reindex(keyarr, level=level)
+ return self.obj.reindex(keyarr)
# existing labels are non-unique
else:
@@ -1225,49 +1197,33 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False):
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
+
elif is_list_like_indexer(obj):
+
if is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
- if isinstance(obj, Index):
- # want Index objects to pass through untouched
- objarr = obj
- else:
- objarr = _asarray_tuplesafe(obj)
- # The index may want to handle a list indexer differently
- # by returning an indexer or raising
- indexer = labels._convert_list_indexer(objarr, kind=self.name)
+ # Have the index compute an indexer or return None
+ # if it cannot handle
+ indexer, objarr = labels._convert_listlike_indexer(
+ obj, kind=self.name)
if indexer is not None:
return indexer
- # this is not the most robust, but...
- if (isinstance(labels, MultiIndex) and
- not isinstance(objarr[0], tuple)):
- level = 0
- _, indexer = labels.reindex(objarr, level=level)
+ # unique index
+ if labels.is_unique:
+ indexer = check = labels.get_indexer(objarr)
- # take all
- if indexer is None:
- indexer = np.arange(len(labels))
-
- check = labels.levels[0].get_indexer(objarr)
+ # non-unique (dups)
else:
- level = None
-
- # unique index
- if labels.is_unique:
- indexer = check = labels.get_indexer(objarr)
-
- # non-unique (dups)
- else:
- (indexer,
- missing) = labels.get_indexer_non_unique(objarr)
- # 'indexer' has dupes, create 'check' using 'missing'
- check = np.zeros_like(objarr)
- check[missing] = -1
+ (indexer,
+ missing) = labels.get_indexer_non_unique(objarr)
+ # 'indexer' has dupes, create 'check' using 'missing'
+ check = np.zeros_like(objarr)
+ check[missing] = -1
mask = check == -1
if mask.any():
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 7f46f437489a1..5b942e2565c29 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -1339,6 +1339,27 @@ def is_int(v):
return indexer
+ def _convert_listlike_indexer(self, keyarr, kind=None):
+ """
+ Parameters
+ ----------
+ keyarr : list-like
+ Indexer to convert.
+
+ Returns
+ -------
+ tuple (indexer, keyarr)
+ indexer is an ndarray or None if cannot convert
+ keyarr are tuple-safe keys
+ """
+ if isinstance(keyarr, Index):
+ keyarr = self._convert_index_indexer(keyarr)
+ else:
+ keyarr = self._convert_arr_indexer(keyarr)
+
+ indexer = self._convert_list_indexer(keyarr, kind=kind)
+ return indexer, keyarr
+
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
@@ -1354,6 +1375,7 @@ def is_int(v):
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
+ keyarr = _asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
@@ -1373,6 +1395,21 @@ def _convert_arr_indexer(self, keyarr):
def _convert_index_indexer(self, keyarr):
return keyarr
+ _index_shared_docs['_convert_list_indexer'] = """
+ Convert a list-like indexer to the appropriate dtype.
+
+ Parameters
+ ----------
+ keyarr : Index (or sub-class)
+ Indexer to convert.
+ kind : iloc, ix, loc, optional
+
+ Returns
+ -------
+ positional indexer or None
+ """
+
+ @Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
"""
passed a key that is tuplesafe that is integer based
diff --git a/pandas/indexes/category.py b/pandas/indexes/category.py
index 3d8f76fc56b01..923dd4ec785c5 100644
--- a/pandas/indexes/category.py
+++ b/pandas/indexes/category.py
@@ -18,6 +18,8 @@
import pandas.core.base as base
import pandas.core.missing as missing
import pandas.indexes.base as ibase
+from pandas.core.common import _asarray_tuplesafe
+
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass='CategoricalIndex'))
@@ -458,12 +460,10 @@ def get_indexer_non_unique(self, target):
codes = self.categories.get_indexer(target)
return self._engine.get_indexer_non_unique(codes)
+ @Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
- """
- we are passed a list indexer.
- Return our indexer or raise if all of the values are not included in
- the categories
- """
+ # Return our indexer or raise if all of the values are not included in
+ # the categories
codes = self.categories.get_indexer(keyarr)
if (codes == -1).any():
raise KeyError("a list-indexer must only include values that are "
@@ -471,6 +471,15 @@ def _convert_list_indexer(self, keyarr, kind=None):
return None
+ @Appender(_index_shared_docs['_convert_arr_indexer'])
+ def _convert_arr_indexer(self, keyarr):
+ keyarr = _asarray_tuplesafe(keyarr)
+ return self._shallow_copy(keyarr)
+
+ @Appender(_index_shared_docs['_convert_index_indexer'])
+ def _convert_index_indexer(self, keyarr):
+ return self._shallow_copy(keyarr)
+
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index bca1db83b6645..1c1609fed1dd1 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -1568,6 +1568,39 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
return new_index, indexer
+ def _convert_listlike_indexer(self, keyarr, kind=None):
+ """
+ Parameters
+ ----------
+ keyarr : list-like
+ Indexer to convert.
+
+ Returns
+ -------
+ tuple (indexer, keyarr)
+ indexer is an ndarray or None if cannot convert
+ keyarr are tuple-safe keys
+ """
+ indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer(
+ keyarr, kind=kind)
+
+ # are we indexing a specific level
+ if indexer is None and len(keyarr) and not isinstance(keyarr[0],
+ tuple):
+ level = 0
+ _, indexer = self.reindex(keyarr, level=level)
+
+ # take all
+ if indexer is None:
+ indexer = np.arange(len(self))
+
+ check = self.levels[0].get_indexer(keyarr)
+ mask = check == -1
+ if mask.any():
+ raise KeyError('%s not in index' % keyarr[mask])
+
+ return indexer, keyarr
+
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
diff --git a/pandas/indexes/numeric.py b/pandas/indexes/numeric.py
index 9bb70feb2501f..2f897c81975c2 100644
--- a/pandas/indexes/numeric.py
+++ b/pandas/indexes/numeric.py
@@ -203,6 +203,7 @@ def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
+ keyarr = _asarray_tuplesafe(keyarr)
if is_integer_dtype(keyarr):
return _asarray_tuplesafe(keyarr, dtype=np.uint64)
return keyarr
| CLN: push key coercion to the indexes themselves to simplify a bit
| https://api.github.com/repos/pandas-dev/pandas/pulls/15678 | 2017-03-13T22:21:37Z | 2017-03-13T23:49:43Z | 2017-03-13T23:49:43Z | 2017-03-13T23:50:39Z |
API: df.rolling(..).corr()/cov() when pairwise=True to return MI DataFrame | diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index 57480a244f308..315dd122b96cc 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -505,13 +505,18 @@ two ``Series`` or any combination of ``DataFrame/Series`` or
- ``DataFrame/DataFrame``: by default compute the statistic for matching column
names, returning a DataFrame. If the keyword argument ``pairwise=True`` is
passed then computes the statistic for each pair of columns, returning a
- ``Panel`` whose ``items`` are the dates in question (see :ref:`the next section
+ ``MultiIndexed DataFrame`` whose ``index`` are the dates in question (see :ref:`the next section
<stats.moments.corr_pairwise>`).
For example:
.. ipython:: python
+ df = pd.DataFrame(np.random.randn(1000, 4),
+ index=pd.date_range('1/1/2000', periods=1000),
+ columns=['A', 'B', 'C', 'D'])
+ df = df.cumsum()
+
df2 = df[:20]
df2.rolling(window=5).corr(df2['B'])
@@ -520,11 +525,16 @@ For example:
Computing rolling pairwise covariances and correlations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. warning::
+
+ Prior to version 0.20.0 if ``pairwise=True`` was passed, a ``Panel`` would be returned.
+ This will now return a 2-level MultiIndexed DataFrame, see the whatsnew :ref:`here <whatsnew_0200.api_breaking.rolling_pairwise>`
+
In financial data analysis and other fields it's common to compute covariance
and correlation matrices for a collection of time series. Often one is also
interested in moving-window covariance and correlation matrices. This can be
done by passing the ``pairwise`` keyword argument, which in the case of
-``DataFrame`` inputs will yield a ``Panel`` whose ``items`` are the dates in
+``DataFrame`` inputs will yield a ``MultiIndexed DataFrame`` whose ``index`` are the dates in
question. In the case of a single DataFrame argument the ``pairwise`` argument
can even be omitted:
@@ -539,12 +549,12 @@ can even be omitted:
.. ipython:: python
covs = df[['B','C','D']].rolling(window=50).cov(df[['A','B','C']], pairwise=True)
- covs[df.index[-50]]
+ covs.unstack(-1).iloc[-50]
.. ipython:: python
correls = df.rolling(window=50).corr()
- correls[df.index[-50]]
+ correls.unstack(-1).iloc[-50]
You can efficiently retrieve the time series of correlations between two
columns using ``.loc`` indexing:
@@ -557,7 +567,7 @@ columns using ``.loc`` indexing:
.. ipython:: python
@savefig rolling_corr_pairwise_ex.png
- correls.loc[:, 'A', 'C'].plot()
+ correls.unstack(-1).[('A', 'C')].plot()
.. _stats.aggregate:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 74fe7916523c5..2b344087bdbea 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -12,11 +12,13 @@ Highlights include:
- The ``.ix`` indexer has been deprecated, see :ref:`here <whatsnew_0200.api_breaking.deprecate_ix>`
- Improved user API when accessing levels in ``.groupby()``, see :ref:`here <whatsnew_0200.enhancements.groupby_access>`
- Improved support for UInt64 dtypes, see :ref:`here <whatsnew_0200.enhancements.uint64_support>`
+- Window Binary Corr/Cov operations return a MultiIndex DataFrame rather than a Panel, see :ref:`here <whhatsnew_0200.api_breaking.rolling_pairwise>`
- A new orient for JSON serialization, ``orient='table'``, that uses the Table Schema spec, see :ref:`here <whatsnew_0200.enhancements.table_schema>`
- Support for S3 handling now uses ``s3fs``, see :ref:`here <whatsnew_0200.api_breaking.s3>`
- Google BigQuery support now uses the ``pandas-gbq`` library, see :ref:`here <whatsnew_0200.api_breaking.gbq>`
- Switched the test framework to use `pytest <http://doc.pytest.org/en/latest>`__ (:issue:`13097`)
+
Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations <whatsnew_0200.deprecations>` before updating.
.. contents:: What's new in v0.20.0
@@ -766,6 +768,50 @@ New Behavior:
df.groupby('A').agg([np.mean, np.std, np.min, np.max])
+.. _whatsnew_0200.api_breaking.rolling_pairwise:
+
+Window Binary Corr/Cov operations return a MultiIndex DataFrame
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A binary window operation, like ``.corr()`` or ``.cov()``, when operating on a ``.rolling(..)``, ``.expanding(..)``, or ``.ewm(..)`` object,
+will now return a 2-level ``MultiIndexed DataFrame`` rather than a ``Panel``. These are equivalent in function,
+but MultiIndexed DataFrames enjoy more support in pandas.
+See the section on :ref:`Windowed Binary Operations <stats.moments.binary>` for more information. (:issue:`15677`)
+
+.. ipython:: python
+
+ np.random.seed(1234)
+ df = pd.DataFrame(np.random.rand(100, 2),
+ columns=pd.Index(['A', 'B'], name='bar'),
+ index=pd.date_range('20160101',
+ periods=100, freq='D', name='foo'))
+ df
+
+Old Behavior:
+
+.. code-block:: ipython
+
+ In [2]: df.rolling(12).corr()
+ Out[2]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 100 (items) x 2 (major_axis) x 2 (minor_axis)
+ Items axis: 2016-01-01 00:00:00 to 2016-04-09 00:00:00
+ Major_axis axis: A to B
+ Minor_axis axis: A to B
+
+New Behavior:
+
+.. ipython:: python
+
+ res = df.rolling(12).corr()
+ res
+
+Retrieving a correlation matrix for a cross-section
+
+.. ipython:: python
+
+ df.rolling(12).corr().loc['2016-04-07']
+
.. _whatsnew_0200.api_breaking.hdfstore_where:
HDFStore where string comparison
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 9c9f861451309..a61d5b6d90dae 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -1652,7 +1652,8 @@ def _cov(x, y):
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
- from pandas import Series, DataFrame, Panel
+ from pandas import Series, DataFrame
+
if not (isinstance(arg1, (np.ndarray, Series, DataFrame)) and
isinstance(arg2, (np.ndarray, Series, DataFrame))):
raise TypeError("arguments to moment function must be of type "
@@ -1703,12 +1704,36 @@ def dataframe_from_int_dict(data, frame_template):
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
+
+ # TODO: not the most efficient (perf-wise)
+ # though not bad code-wise
+ from pandas import Panel, MultiIndex, Index
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
- return p
+
+ if len(p.items):
+ result = pd.concat(
+ [p.iloc[i].T for i in range(len(p.items))],
+ keys=p.items)
+ else:
+
+ result = DataFrame(
+ index=MultiIndex(levels=[arg1.index, arg1.columns],
+ labels=[[], []]),
+ columns=arg2.columns,
+ dtype='float64')
+
+ # reset our names to arg1 names
+ # careful not to mutate the original names
+ result.columns = Index(result.columns).set_names(None)
+ result.index = result.index.set_names(
+ [arg1.index.name, arg1.columns.name])
+
+ return result
+
else:
raise ValueError("'pairwise' is not True/False")
else:
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index f12b10ae682fa..9e1ccde9ddc23 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -2069,20 +2069,14 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
else:
loc = level_index.get_loc(key)
- if level > 0 or self.lexsort_depth == 0:
+ if isinstance(loc, slice):
+ return loc
+ elif level > 0 or self.lexsort_depth == 0:
return np.array(labels == loc, dtype=bool)
- else:
- # sorted, so can return slice object -> view
- try:
- loc = labels.dtype.type(loc)
- except TypeError:
- # this occurs when loc is a slice (partial string indexing)
- # but the TypeError raised by searchsorted in this case
- # is catched in Index._has_valid_type()
- pass
- i = labels.searchsorted(loc, side='left')
- j = labels.searchsorted(loc, side='right')
- return slice(i, j)
+
+ i = labels.searchsorted(loc, side='left')
+ j = labels.searchsorted(loc, side='right')
+ return slice(i, j)
def get_locs(self, tup):
"""
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index ceb12c6c03074..c75731882e231 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -10,8 +10,8 @@
from distutils.version import LooseVersion
import pandas as pd
-from pandas import (Series, DataFrame, Panel, bdate_range, isnull,
- notnull, concat, Timestamp)
+from pandas import (Series, DataFrame, bdate_range, isnull,
+ notnull, concat, Timestamp, Index)
import pandas.stats.moments as mom
import pandas.core.window as rwindow
import pandas.tseries.offsets as offsets
@@ -172,7 +172,7 @@ def test_agg_consistency(self):
tm.assert_index_equal(result, expected)
result = r['A'].agg([np.sum, np.mean]).columns
- expected = pd.Index(['sum', 'mean'])
+ expected = Index(['sum', 'mean'])
tm.assert_index_equal(result, expected)
result = r.agg({'A': [np.sum, np.mean]}).columns
@@ -1688,6 +1688,162 @@ def _check_ew_structures(self, func, name):
self.assertEqual(type(frame_result), DataFrame)
+class TestPairwise(object):
+
+ # GH 7738
+ df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]],
+ columns=['C', 'C']),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1., 0]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0., 1]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=['C', 1]),
+ DataFrame([[2., 4.], [1., 2.], [5., 2.], [8., 1.]],
+ columns=[1, 0.]),
+ DataFrame([[2, 4.], [1, 2.], [5, 2.], [8, 1.]],
+ columns=[0, 1.]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.]],
+ columns=[1., 'X']), ]
+ df2 = DataFrame([[None, 1, 1], [None, 1, 2],
+ [None, 3, 2], [None, 8, 1]], columns=['Y', 'Z', 'X'])
+ s = Series([1, 1, 3, 8])
+
+ def compare(self, result, expected):
+
+ # since we have sorted the results
+ # we can only compare non-nans
+ result = result.dropna().values
+ expected = expected.dropna().values
+
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize('f', [lambda x: x.cov(), lambda x: x.corr()])
+ def test_no_flex(self, f):
+
+ # DataFrame methods (which do not call _flex_binary_moment())
+
+ with warnings.catch_warnings(record=True):
+
+ results = [f(df) for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index, df.columns)
+ tm.assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x: x.expanding().cov(pairwise=True),
+ lambda x: x.expanding().corr(pairwise=True),
+ lambda x: x.rolling(window=3).cov(pairwise=True),
+ lambda x: x.rolling(window=3).corr(pairwise=True),
+ lambda x: x.ewm(com=3).cov(pairwise=True),
+ lambda x: x.ewm(com=3).corr(pairwise=True)])
+ def test_pairwise_with_self(self, f):
+
+ # DataFrame with itself, pairwise=True
+ results = [f(df) for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index.levels[0],
+ df.index,
+ check_names=False)
+ tm.assert_index_equal(result.index.levels[1],
+ df.columns,
+ check_names=False)
+ tm.assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x: x.expanding().cov(pairwise=False),
+ lambda x: x.expanding().corr(pairwise=False),
+ lambda x: x.rolling(window=3).cov(pairwise=False),
+ lambda x: x.rolling(window=3).corr(pairwise=False),
+ lambda x: x.ewm(com=3).cov(pairwise=False),
+ lambda x: x.ewm(com=3).corr(pairwise=False), ])
+ def test_no_pairwise_with_self(self, f):
+
+ # DataFrame with itself, pairwise=False
+ results = [f(df) for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index, df.index)
+ tm.assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x, y: x.expanding().cov(y, pairwise=True),
+ lambda x, y: x.expanding().corr(y, pairwise=True),
+ lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
+ lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
+ lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
+ lambda x, y: x.ewm(com=3).corr(y, pairwise=True), ])
+ def test_pairwise_with_other(self, f):
+
+ # DataFrame with another DataFrame, pairwise=True
+ results = [f(df, self.df2) for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index.levels[0],
+ df.index,
+ check_names=False)
+ tm.assert_index_equal(result.index.levels[1],
+ self.df2.columns,
+ check_names=False)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x, y: x.expanding().cov(y, pairwise=False),
+ lambda x, y: x.expanding().corr(y, pairwise=False),
+ lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
+ lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
+ lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
+ lambda x, y: x.ewm(com=3).corr(y, pairwise=False), ])
+ def test_no_pairwise_with_other(self, f):
+
+ with warnings.catch_warnings(record=True):
+
+ # DataFrame with another DataFrame, pairwise=False
+ results = [f(df, self.df2) if df.columns.is_unique else None
+ for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ if result is not None:
+ expected_index = df.index.union(self.df2.index)
+ expected_columns = df.columns.union(self.df2.columns)
+ tm.assert_index_equal(result.index, expected_index)
+ tm.assert_index_equal(result.columns, expected_columns)
+ else:
+ tm.assertRaisesRegexp(
+ ValueError, "'arg1' columns are not unique", f, df,
+ self.df2)
+ tm.assertRaisesRegexp(
+ ValueError, "'arg2' columns are not unique", f,
+ self.df2, df)
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x, y: x.expanding().cov(y),
+ lambda x, y: x.expanding().corr(y),
+ lambda x, y: x.rolling(window=3).cov(y),
+ lambda x, y: x.rolling(window=3).corr(y),
+ lambda x, y: x.ewm(com=3).cov(y),
+ lambda x, y: x.ewm(com=3).corr(y), ])
+ def test_pairwise_with_series(self, f):
+
+ # DataFrame with a Series
+ results = ([f(df, self.s) for df in self.df1s] +
+ [f(self.s, df) for df in self.df1s])
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index, df.index)
+ tm.assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
@@ -2083,21 +2239,6 @@ def test_expanding_consistency(self):
assert_equal(expanding_f_result,
expanding_apply_f_result)
- if (name in ['cov', 'corr']) and isinstance(x,
- DataFrame):
- # test pairwise=True
- expanding_f_result = expanding_f(x, pairwise=True)
- expected = Panel(items=x.index,
- major_axis=x.columns,
- minor_axis=x.columns)
- for i, _ in enumerate(x.columns):
- for j, _ in enumerate(x.columns):
- expected.iloc[:, i, j] = getattr(
- x.iloc[:, i].expanding(
- min_periods=min_periods),
- name)(x.iloc[:, j])
- tm.assert_panel_equal(expanding_f_result, expected)
-
@tm.slow
def test_rolling_consistency(self):
@@ -2203,25 +2344,6 @@ def cases():
assert_equal(rolling_f_result,
rolling_apply_f_result)
- if (name in ['cov', 'corr']) and isinstance(
- x, DataFrame):
- # test pairwise=True
- rolling_f_result = rolling_f(x,
- pairwise=True)
- expected = Panel(items=x.index,
- major_axis=x.columns,
- minor_axis=x.columns)
- for i, _ in enumerate(x.columns):
- for j, _ in enumerate(x.columns):
- expected.iloc[:, i, j] = (
- getattr(
- x.iloc[:, i]
- .rolling(window=window,
- min_periods=min_periods,
- center=center),
- name)(x.iloc[:, j]))
- tm.assert_panel_equal(rolling_f_result, expected)
-
# binary moments
def test_rolling_cov(self):
A = self.series
@@ -2257,11 +2379,11 @@ def _check_pairwise_moment(self, dispatch, name, **kwargs):
def get_result(obj, obj2=None):
return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)
- panel = get_result(self.frame)
- actual = panel.loc[:, 1, 5]
+ result = get_result(self.frame)
+ result = result.loc[(slice(None), 1), 5]
+ result.index = result.index.droplevel(1)
expected = get_result(self.frame[1], self.frame[5])
- tm.assert_series_equal(actual, expected, check_names=False)
- self.assertEqual(actual.name, 5)
+ tm.assert_series_equal(result, expected, check_names=False)
def test_flex_binary_moment(self):
# GH3155
@@ -2429,17 +2551,14 @@ def test_expanding_cov_pairwise(self):
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
- for i in result.items:
- tm.assert_almost_equal(result[i], rolling_result[i])
+ tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
-
- for i in result.items:
- tm.assert_almost_equal(result[i], rolling_result[i])
+ tm.assert_frame_equal(result, rolling_result)
def test_expanding_cov_diff_index(self):
# GH 7512
@@ -2507,8 +2626,6 @@ def test_rolling_functions_window_non_shrinkage(self):
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=['A', 'B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
- df_expected_panel = Panel(items=df.index, major_axis=df.columns,
- minor_axis=df.columns)
functions = [lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=False)),
@@ -2540,13 +2657,24 @@ def test_rolling_functions_window_non_shrinkage(self):
# scipy needed for rolling_window
continue
+ def test_rolling_functions_window_non_shrinkage_binary(self):
+
+ # corr/cov return a MI DataFrame
+ df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]],
+ columns=Index(['A', 'B'], name='foo'),
+ index=Index(range(4), name='bar'))
+ df_expected = DataFrame(
+ columns=Index(['A', 'B']),
+ index=pd.MultiIndex.from_product([df.index, df.columns],
+ names=['bar', 'foo']),
+ dtype='float64')
functions = [lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True))]
for f in functions:
- df_result_panel = f(df)
- tm.assert_panel_equal(df_result_panel, df_expected_panel)
+ df_result = f(df)
+ tm.assert_frame_equal(df_result, df_expected)
def test_moment_functions_zero_length(self):
# GH 8056
@@ -2554,13 +2682,9 @@ def test_moment_functions_zero_length(self):
s_expected = s
df1 = DataFrame()
df1_expected = df1
- df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns,
- minor_axis=df1.columns)
df2 = DataFrame(columns=['a'])
df2['a'] = df2['a'].astype('float64')
df2_expected = df2
- df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns,
- minor_axis=df2.columns)
functions = [lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(
@@ -2613,6 +2737,23 @@ def test_moment_functions_zero_length(self):
# scipy needed for rolling_window
continue
+ def test_moment_functions_zero_length_pairwise(self):
+
+ df1 = DataFrame()
+ df1_expected = df1
+ df2 = DataFrame(columns=Index(['a'], name='foo'),
+ index=Index([], name='bar'))
+ df2['a'] = df2['a'].astype('float64')
+
+ df1_expected = DataFrame(
+ index=pd.MultiIndex.from_product([df1.index, df1.columns]),
+ columns=Index([]))
+ df2_expected = DataFrame(
+ index=pd.MultiIndex.from_product([df2.index, df2.columns],
+ names=['bar', 'foo']),
+ columns=Index(['a']),
+ dtype='float64')
+
functions = [lambda x: (x.expanding(min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5)
@@ -2623,24 +2764,33 @@ def test_moment_functions_zero_length(self):
.corr(x, pairwise=True)),
]
for f in functions:
- df1_result_panel = f(df1)
- tm.assert_panel_equal(df1_result_panel, df1_expected_panel)
+ df1_result = f(df1)
+ tm.assert_frame_equal(df1_result, df1_expected)
- df2_result_panel = f(df2)
- tm.assert_panel_equal(df2_result_panel, df2_expected_panel)
+ df2_result = f(df2)
+ tm.assert_frame_equal(df2_result, df2_expected)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
- df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=['A', 'B'])
- df1a = DataFrame([[1, 5], [3, 9]], index=[0, 2], columns=['A', 'B'])
- df2 = DataFrame([[5, 6], [None, None], [2, 1]], columns=['X', 'Y'])
- df2a = DataFrame([[5, 6], [2, 1]], index=[0, 2], columns=['X', 'Y'])
- result1 = df1.expanding().cov(df2a, pairwise=True)[2]
- result2 = df1.expanding().cov(df2a, pairwise=True)[2]
- result3 = df1a.expanding().cov(df2, pairwise=True)[2]
- result4 = df1a.expanding().cov(df2a, pairwise=True)[2]
- expected = DataFrame([[-3., -5.], [-6., -10.]], index=['A', 'B'],
- columns=['X', 'Y'])
+ df1 = DataFrame([[1, 5], [3, 2], [3, 9]],
+ columns=Index(['A', 'B'], name='foo'))
+ df1a = DataFrame([[1, 5], [3, 9]],
+ index=[0, 2],
+ columns=Index(['A', 'B'], name='foo'))
+ df2 = DataFrame([[5, 6], [None, None], [2, 1]],
+ columns=Index(['X', 'Y'], name='foo'))
+ df2a = DataFrame([[5, 6], [2, 1]],
+ index=[0, 2],
+ columns=Index(['X', 'Y'], name='foo'))
+ # TODO: xref gh-15826
+ # .loc is not preserving the names
+ result1 = df1.expanding().cov(df2a, pairwise=True).loc[2]
+ result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
+ result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
+ result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
+ expected = DataFrame([[-3.0, -6.0], [-5.0, -10.0]],
+ columns=['A', 'B'],
+ index=Index(['X', 'Y'], name='foo'))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
@@ -2648,149 +2798,30 @@ def test_expanding_cov_pairwise_diff_length(self):
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
- df1 = DataFrame([[1, 2], [3, 2], [3, 4]], columns=['A', 'B'])
- df1a = DataFrame([[1, 2], [3, 4]], index=[0, 2], columns=['A', 'B'])
- df2 = DataFrame([[5, 6], [None, None], [2, 1]], columns=['X', 'Y'])
- df2a = DataFrame([[5, 6], [2, 1]], index=[0, 2], columns=['X', 'Y'])
- result1 = df1.expanding().corr(df2, pairwise=True)[2]
- result2 = df1.expanding().corr(df2a, pairwise=True)[2]
- result3 = df1a.expanding().corr(df2, pairwise=True)[2]
- result4 = df1a.expanding().corr(df2a, pairwise=True)[2]
- expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]], index=['A', 'B'],
- columns=['X', 'Y'])
+ df1 = DataFrame([[1, 2], [3, 2], [3, 4]],
+ columns=['A', 'B'],
+ index=Index(range(3), name='bar'))
+ df1a = DataFrame([[1, 2], [3, 4]],
+ index=Index([0, 2], name='bar'),
+ columns=['A', 'B'])
+ df2 = DataFrame([[5, 6], [None, None], [2, 1]],
+ columns=['X', 'Y'],
+ index=Index(range(3), name='bar'))
+ df2a = DataFrame([[5, 6], [2, 1]],
+ index=Index([0, 2], name='bar'),
+ columns=['X', 'Y'])
+ result1 = df1.expanding().corr(df2, pairwise=True).loc[2]
+ result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]
+ result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]
+ result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]
+ expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]],
+ columns=['A', 'B'],
+ index=Index(['X', 'Y']))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
- def test_pairwise_stats_column_names_order(self):
- # GH 7738
- df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]],
- columns=['C', 'C']),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1., 0]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0., 1]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=['C', 1]),
- DataFrame([[2., 4.], [1., 2.], [5., 2.], [8., 1.]],
- columns=[1, 0.]),
- DataFrame([[2, 4.], [1, 2.], [5, 2.], [8, 1.]],
- columns=[0, 1.]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.]],
- columns=[1., 'X']), ]
- df2 = DataFrame([[None, 1, 1], [None, 1, 2],
- [None, 3, 2], [None, 8, 1]], columns=['Y', 'Z', 'X'])
- s = Series([1, 1, 3, 8])
-
- # suppress warnings about incomparable objects, as we are deliberately
- # testing with such column labels
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore",
- message=".*incomparable objects.*",
- category=RuntimeWarning)
-
- # DataFrame methods (which do not call _flex_binary_moment())
- for f in [lambda x: x.cov(), lambda x: x.corr(), ]:
- results = [f(df) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.index, df.columns)
- tm.assert_index_equal(result.columns, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- # compare internal values, as columns can be different
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
- # DataFrame with itself, pairwise=True
- for f in [lambda x: x.expanding().cov(pairwise=True),
- lambda x: x.expanding().corr(pairwise=True),
- lambda x: x.rolling(window=3).cov(pairwise=True),
- lambda x: x.rolling(window=3).corr(pairwise=True),
- lambda x: x.ewm(com=3).cov(pairwise=True),
- lambda x: x.ewm(com=3).corr(pairwise=True), ]:
- results = [f(df) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.items, df.index)
- tm.assert_index_equal(result.major_axis, df.columns)
- tm.assert_index_equal(result.minor_axis, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
- # DataFrame with itself, pairwise=False
- for f in [lambda x: x.expanding().cov(pairwise=False),
- lambda x: x.expanding().corr(pairwise=False),
- lambda x: x.rolling(window=3).cov(pairwise=False),
- lambda x: x.rolling(window=3).corr(pairwise=False),
- lambda x: x.ewm(com=3).cov(pairwise=False),
- lambda x: x.ewm(com=3).corr(pairwise=False), ]:
- results = [f(df) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.index, df.index)
- tm.assert_index_equal(result.columns, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
- # DataFrame with another DataFrame, pairwise=True
- for f in [lambda x, y: x.expanding().cov(y, pairwise=True),
- lambda x, y: x.expanding().corr(y, pairwise=True),
- lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
- lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
- lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
- lambda x, y: x.ewm(com=3).corr(y, pairwise=True), ]:
- results = [f(df, df2) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.items, df.index)
- tm.assert_index_equal(result.major_axis, df.columns)
- tm.assert_index_equal(result.minor_axis, df2.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
- # DataFrame with another DataFrame, pairwise=False
- for f in [lambda x, y: x.expanding().cov(y, pairwise=False),
- lambda x, y: x.expanding().corr(y, pairwise=False),
- lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
- lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
- lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
- lambda x, y: x.ewm(com=3).corr(y, pairwise=False), ]:
- results = [f(df, df2) if df.columns.is_unique else None
- for df in df1s]
- for (df, result) in zip(df1s, results):
- if result is not None:
- expected_index = df.index.union(df2.index)
- expected_columns = df.columns.union(df2.columns)
- tm.assert_index_equal(result.index, expected_index)
- tm.assert_index_equal(result.columns, expected_columns)
- else:
- tm.assertRaisesRegexp(
- ValueError, "'arg1' columns are not unique", f, df,
- df2)
- tm.assertRaisesRegexp(
- ValueError, "'arg2' columns are not unique", f,
- df2, df)
-
- # DataFrame with a Series
- for f in [lambda x, y: x.expanding().cov(y),
- lambda x, y: x.expanding().corr(y),
- lambda x, y: x.rolling(window=3).cov(y),
- lambda x, y: x.rolling(window=3).corr(y),
- lambda x, y: x.ewm(com=3).cov(y),
- lambda x, y: x.ewm(com=3).corr(y), ]:
- results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.index, df.index)
- tm.assert_index_equal(result.columns, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
| from https://github.com/pandas-dev/pandas/pull/15601#issuecomment-284927333.
Unfortunately I don't see an easy way to even deprecate this and we simply have to switch. Good news is this will simply fail fast in accessing, as the ``Panels`` have a different access pattern (names of indices and indexing) that MI DataFrames (and another reason to remove them :>). | https://api.github.com/repos/pandas-dev/pandas/pulls/15677 | 2017-03-13T20:35:42Z | 2017-04-07T00:36:04Z | null | 2017-06-24T13:18:21Z |
TST: fix errant tight_layout test | diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 92e2dc7b5d934..c31d8b539ae6f 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -53,6 +53,7 @@ def setUp(self):
self.mpl_ge_1_4_0 = plotting._mpl_ge_1_4_0()
self.mpl_ge_1_5_0 = plotting._mpl_ge_1_5_0()
self.mpl_ge_2_0_0 = plotting._mpl_ge_2_0_0()
+ self.mpl_ge_2_0_1 = plotting._mpl_ge_2_0_1()
if self.mpl_ge_1_4_0:
self.bp_n_objects = 7
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 22de7055e3cea..380bdc12abce4 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -241,8 +241,8 @@ def test_hist_layout(self):
@slow
# GH 9351
def test_tight_layout(self):
- if self.mpl_ge_2_0_0:
- df = DataFrame(randn(100, 2))
+ if self.mpl_ge_2_0_1:
+ df = DataFrame(randn(100, 3))
_check_plot_works(df.hist)
self.plt.tight_layout()
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index d46c38c117445..d311b0e6d83eb 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -150,6 +150,14 @@ def _mpl_ge_2_0_0():
return False
+def _mpl_ge_2_0_1():
+ try:
+ import matplotlib
+ return matplotlib.__version__ >= LooseVersion('2.0.1')
+ except ImportError:
+ return False
+
+
if _mpl_ge_1_5_0():
# Compat with mp 1.5, which uses cycler.
import cycler
| closes #9351 | https://api.github.com/repos/pandas-dev/pandas/pulls/15671 | 2017-03-13T15:16:04Z | 2017-03-13T23:04:39Z | 2017-03-13T23:04:39Z | 2017-03-14T18:25:26Z |
CI: use conda-forge on 3.6 build | diff --git a/.travis.yml b/.travis.yml
index 97bf881f3b6fc..b0331941e2a1e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -86,6 +86,7 @@ matrix:
- JOB_NAME: "36"
- TEST_ARGS="--skip-slow --skip-network"
- PANDAS_TESTING_MODE="deprecate"
+ - CONDA_FORGE=true
addons:
apt:
packages:
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index b337f6e443be2..12202b4ceee70 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -53,14 +53,17 @@ conda config --set ssl_verify false || exit 1
conda config --set always_yes true --set changeps1 false || exit 1
conda update -q conda
+echo "[add channels]"
# add the pandas channel to take priority
# to add extra packages
-echo "[add channels]"
conda config --add channels pandas || exit 1
conda config --remove channels defaults || exit 1
conda config --add channels defaults || exit 1
-conda install anaconda-client
+if [ "$CONDA_FORGE" ]; then
+ # add conda-forge channel as priority
+ conda config --add channels conda-forge || exit 1
+fi
# Useful for debugging any issues with conda
conda info -a || exit 1
diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run
index 9a6c1c7edbc5e..41c9680ce1b7e 100644
--- a/ci/requirements-3.6.run
+++ b/ci/requirements-3.6.run
@@ -14,6 +14,7 @@ html5lib
jinja2
sqlalchemy
pymysql
+feather-format
# psycopg2 (not avail on defaults ATM)
beautifulsoup4
s3fs
diff --git a/ci/requirements-3.6.sh b/ci/requirements-3.6.sh
deleted file mode 100644
index 7d88ede751ec8..0000000000000
--- a/ci/requirements-3.6.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-source activate pandas
-
-echo "install 36"
-
-conda install -n pandas -c conda-forge feather-format
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 80ea01d3a05aa..923ed2e7c3444 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -548,7 +548,8 @@ def test_interp_nan_idx(self):
df.interpolate(method='values')
def test_interp_various(self):
- tm._skip_if_no_scipy()
+ tm.skip_if_no_package('scipy', max_version='0.19.0')
+
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py
index 151a89888c329..b115218d76958 100644
--- a/pandas/tests/frame/test_rank.py
+++ b/pandas/tests/frame/test_rank.py
@@ -193,7 +193,8 @@ def test_rank_axis(self):
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
def test_rank_methods_frame(self):
- tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
+ tm.skip_if_no_package('scipy', min_version='0.13',
+ app='scipy.stats.rankdata')
import scipy
from scipy.stats import rankdata
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 87cfcf32229b4..9e997da517bf6 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -827,7 +827,8 @@ def test_interp_quad(self):
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
- tm._skip_if_no_scipy()
+ tm.skip_if_no_package('scipy', max_version='0.19.0')
+
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
@@ -1027,8 +1028,8 @@ def test_spline(self):
def test_spline_extrapolate(self):
tm.skip_if_no_package(
- 'scipy', '0.15',
- 'setting ext on scipy.interpolate.UnivariateSpline')
+ 'scipy', min_version='0.15',
+ app='setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index 99257b343310f..f47eae3adc3ae 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -246,7 +246,8 @@ def _check(s, expected, method='average'):
_check(series, results[method], method=method)
def test_rank_methods_series(self):
- tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
+ tm.skip_if_no_package('scipy', min_version='0.13',
+ app='scipy.stats.rankdata')
import scipy
from scipy.stats import rankdata
diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py
index 4cd5a643ce4be..c0c678c184ee8 100644
--- a/pandas/tests/sparse/test_frame.py
+++ b/pandas/tests/sparse/test_frame.py
@@ -1129,10 +1129,10 @@ def test_isnotnull(self):
@pytest.mark.parametrize('index', [None, list('ab')]) # noqa: F811
@pytest.mark.parametrize('columns', [None, list('cd')])
@pytest.mark.parametrize('fill_value', [None, 0, np.nan])
-@pytest.mark.parametrize('dtype', [object, bool, int, float, np.uint16])
+@pytest.mark.parametrize('dtype', [bool, int, float, np.uint16])
def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype):
# GH 4343
- tm._skip_if_no_scipy()
+ tm.skip_if_no_package('scipy')
# Make one ndarray and from it one sparse matrix, both to be used for
# constructing frames and comparing results
@@ -1180,6 +1180,51 @@ def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype):
tm.assert_equal(sdf.to_coo().dtype, np.object_)
+@pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811
+def test_from_to_scipy_object(spmatrix, fill_value):
+ # GH 4343
+ dtype = object
+ columns = list('cd')
+ index = list('ab')
+ tm.skip_if_no_package('scipy', max_version='0.19.0')
+
+ # Make one ndarray and from it one sparse matrix, both to be used for
+ # constructing frames and comparing results
+ arr = np.eye(2, dtype=dtype)
+ try:
+ spm = spmatrix(arr)
+ assert spm.dtype == arr.dtype
+ except (TypeError, AssertionError):
+ # If conversion to sparse fails for this spmatrix type and arr.dtype,
+ # then the combination is not currently supported in NumPy, so we
+ # can just skip testing it thoroughly
+ return
+
+ sdf = pd.SparseDataFrame(spm, index=index, columns=columns,
+ default_fill_value=fill_value)
+
+ # Expected result construction is kind of tricky for all
+ # dtype-fill_value combinations; easiest to cast to something generic
+ # and except later on
+ rarr = arr.astype(object)
+ rarr[arr == 0] = np.nan
+ expected = pd.SparseDataFrame(rarr, index=index, columns=columns).fillna(
+ fill_value if fill_value is not None else np.nan)
+
+ # Assert frame is as expected
+ sdf_obj = sdf.astype(object)
+ tm.assert_sp_frame_equal(sdf_obj, expected)
+ tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense())
+
+ # Assert spmatrices equal
+ tm.assert_equal(dict(sdf.to_coo().todok()), dict(spm.todok()))
+
+ # Ensure dtype is preserved if possible
+ res_dtype = object
+ tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)})
+ tm.assert_equal(sdf.to_coo().dtype, res_dtype)
+
+
class TestSparseDataFrameArithmetic(tm.TestCase):
def test_numeric_op_scalar(self):
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 937c20d009b6b..75a7555d58ca5 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -5,7 +5,7 @@
import warnings
import numpy as np
-from pandas import Series, isnull
+from pandas import Series, isnull, _np_version_under1p9
from pandas.types.common import is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
@@ -338,8 +338,7 @@ def test_nanmean_overflow(self):
# is now consistent with numpy
# numpy < 1.9.0 is not computing this correctly
- from distutils.version import LooseVersion
- if LooseVersion(np.__version__) >= '1.9.0':
+ if not _np_version_under1p9:
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
@@ -388,8 +387,7 @@ def test_nanstd(self):
allow_tdelta=True, allow_obj='convert')
def test_nansem(self):
- tm.skip_if_no_package('scipy.stats')
- tm._skip_if_scipy_0_17()
+ tm.skip_if_no_package('scipy', min_version='0.17.0')
from scipy.stats import sem
self.check_funs_ddof(nanops.nansem, sem, allow_complex=False,
allow_str=False, allow_date=False,
@@ -448,16 +446,14 @@ def _skew_kurt_wrap(self, values, axis=None, func=None):
return result
def test_nanskew(self):
- tm.skip_if_no_package('scipy.stats')
- tm._skip_if_scipy_0_17()
+ tm.skip_if_no_package('scipy', min_version='0.17.0')
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
self.check_funs(nanops.nanskew, func, allow_complex=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nankurt(self):
- tm.skip_if_no_package('scipy.stats')
- tm._skip_if_scipy_0_17()
+ tm.skip_if_no_package('scipy', min_version='0.17.0')
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 3f2973a9834ca..b7164d31b2a5e 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -905,7 +905,7 @@ def test_cmov_window_na_min_periods(self):
def test_cmov_window_regular(self):
# GH 8238
- tm._skip_if_no_scipy()
+ tm.skip_if_no_package('scipy', max_version='0.19.0')
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
@@ -938,7 +938,7 @@ def test_cmov_window_regular(self):
def test_cmov_window_regular_linear_range(self):
# GH 8238
- tm._skip_if_no_scipy()
+ tm.skip_if_no_package('scipy', max_version='0.19.0')
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
@@ -955,7 +955,7 @@ def test_cmov_window_regular_linear_range(self):
def test_cmov_window_regular_missing_data(self):
# GH 8238
- tm._skip_if_no_scipy()
+ tm.skip_if_no_package('scipy', max_version='0.19.0')
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
@@ -988,7 +988,7 @@ def test_cmov_window_regular_missing_data(self):
def test_cmov_window_special(self):
# GH 8238
- tm._skip_if_no_scipy()
+ tm.skip_if_no_package('scipy', max_version='0.19.0')
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2.,
@@ -1015,7 +1015,7 @@ def test_cmov_window_special(self):
def test_cmov_window_special_linear_range(self):
# GH 8238
- tm._skip_if_no_scipy()
+ tm.skip_if_no_package('scipy', max_version='0.19.0')
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2.,
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 74ff480a9c198..529ecef3e2d6a 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -304,14 +304,6 @@ def _skip_if_no_scipy():
pytest.skip('scipy.sparse missing')
-def _skip_if_scipy_0_17():
- import scipy
- v = scipy.__version__
- if v >= LooseVersion("0.17.0"):
- import pytest
- pytest.skip("scipy 0.17")
-
-
def _check_if_lzma():
try:
return compat.import_lzma()
@@ -2020,15 +2012,18 @@ def __init__(self, *args, **kwargs):
# Dependency checks. Copied this from Nipy/Nipype (Copyright of
# respective developers, license: BSD-3)
-def package_check(pkg_name, version=None, app='pandas', checker=LooseVersion):
- """Check that the minimal version of the required package is installed.
+def package_check(pkg_name, min_version=None, max_version=None, app='pandas',
+ checker=LooseVersion):
+ """Check that the min/max version of the required package is installed.
Parameters
----------
pkg_name : string
Name of the required package.
- version : string, optional
+ min_version : string, optional
Minimal version number for required package.
+ max_version : string, optional
+ Max version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
@@ -2040,7 +2035,6 @@ def package_check(pkg_name, version=None, app='pandas', checker=LooseVersion):
Examples
--------
package_check('numpy', '1.3')
- package_check('networkx', '1.0', 'tutorial1')
"""
@@ -2049,8 +2043,10 @@ def package_check(pkg_name, version=None, app='pandas', checker=LooseVersion):
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'module requires %s' % pkg_name
- if version:
- msg += ' with version >= %s' % (version,)
+ if min_version:
+ msg += ' with version >= %s' % (min_version,)
+ if max_version:
+ msg += ' with version < %s' % (max_version,)
try:
mod = __import__(pkg_name)
except ImportError:
@@ -2059,7 +2055,9 @@ def package_check(pkg_name, version=None, app='pandas', checker=LooseVersion):
have_version = mod.__version__
except AttributeError:
pytest.skip('Cannot find version for %s' % pkg_name)
- if version and checker(have_version) < checker(version):
+ if min_version and checker(have_version) < checker(min_version):
+ pytest.skip(msg)
+ if max_version and checker(have_version) >= checker(max_version):
pytest.skip(msg)
| https://api.github.com/repos/pandas-dev/pandas/pulls/15668 | 2017-03-12T22:37:15Z | 2017-03-13T00:53:20Z | null | 2017-03-13T00:53:20Z | |
Fix another typo in the timeseries documentation | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index c0c178ad2fb49..7136b15a7633a 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -607,7 +607,7 @@ There are several time/date properties that one can access from ``Timestamp`` or
dayofyear,"The ordinal day of year"
weekofyear,"The week ordinal of the year"
week,"The week ordinal of the year"
- dayofweek,"The numer of the day of the week with Monday=0, Sunday=6"
+ dayofweek,"The number of the day of the week with Monday=0, Sunday=6"
weekday,"The number of the day of the week with Monday=0, Sunday=6"
weekday_name,"The name of the day in a week (ex: Friday)"
quarter,"Quarter of the date: Jan-Mar = 1, Apr-Jun = 2, etc."
| Just noticed this right after #15666 was merged. | https://api.github.com/repos/pandas-dev/pandas/pulls/15667 | 2017-03-12T20:59:36Z | 2017-03-12T22:40:06Z | 2017-03-12T22:40:05Z | 2017-03-12T22:40:09Z |
Fix typo in timeseries documentation | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index e09d240ed91b7..c0c178ad2fb49 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -610,7 +610,7 @@ There are several time/date properties that one can access from ``Timestamp`` or
dayofweek,"The numer of the day of the week with Monday=0, Sunday=6"
weekday,"The number of the day of the week with Monday=0, Sunday=6"
weekday_name,"The name of the day in a week (ex: Friday)"
- quarter,"Quarter of the date: Jan=Mar = 1, Apr-Jun = 2, etc."
+ quarter,"Quarter of the date: Jan-Mar = 1, Apr-Jun = 2, etc."
days_in_month,"The number of days in the month of the datetime"
is_month_start,"Logical indicating if first day of month (defined by frequency)"
is_month_end,"Logical indicating if last day of month (defined by frequency)"
| https://api.github.com/repos/pandas-dev/pandas/pulls/15666 | 2017-03-12T20:41:09Z | 2017-03-12T20:55:33Z | 2017-03-12T20:55:33Z | 2017-03-12T21:09:39Z | |
COMPAT: free parser memory at close() for non-refcnt gc | diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index 916f06d357473..6b0775e54da0c 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -162,6 +162,7 @@ int parser_cleanup(parser_t *self) {
if (self->cb_cleanup(self->source) < 0) {
status = -1;
}
+ self->cb_cleanup = NULL;
}
return status;
@@ -239,6 +240,9 @@ int parser_init(parser_t *self) {
void parser_free(parser_t *self) {
// opposite of parser_init
parser_cleanup(self);
+}
+
+void parser_del(parser_t *self) {
free(self);
}
diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h
index 9853b5149bee3..b4344e8a6c070 100644
--- a/pandas/_libs/src/parser/tokenizer.h
+++ b/pandas/_libs/src/parser/tokenizer.h
@@ -243,6 +243,8 @@ int parser_set_skipfirstnrows(parser_t *self, int64_t nrows);
void parser_free(parser_t *self);
+void parser_del(parser_t *self);
+
void parser_set_default_options(parser_t *self);
void debug_print_parser(parser_t *self);
diff --git a/pandas/io/parsers.pyx b/pandas/io/parsers.pyx
index a5858accbb6f5..3728cda559050 100644
--- a/pandas/io/parsers.pyx
+++ b/pandas/io/parsers.pyx
@@ -214,6 +214,7 @@ cdef extern from "parser/tokenizer.h":
int parser_init(parser_t *self) nogil
void parser_free(parser_t *self) nogil
+ void parser_del(parser_t *self) nogil
int parser_add_skiprow(parser_t *self, int64_t row)
int parser_set_skipfirstnrows(parser_t *self, int64_t nrows)
@@ -573,8 +574,13 @@ cdef class TextReader:
def __dealloc__(self):
parser_free(self.parser)
- kh_destroy_str(self.true_set)
- kh_destroy_str(self.false_set)
+ if self.true_set:
+ kh_destroy_str(self.true_set)
+ self.true_set = NULL
+ if self.false_set:
+ kh_destroy_str(self.false_set)
+ self.false_set = NULL
+ parser_del(self.parser)
def close(self):
# we need to properly close an open derived
@@ -584,6 +590,14 @@ cdef class TextReader:
self.handle.close()
except:
pass
+ # also preemptively free all allocated memory
+ parser_free(self.parser)
+ if self.true_set:
+ kh_destroy_str(self.true_set)
+ self.true_set = NULL
+ if self.false_set:
+ kh_destroy_str(self.false_set)
+ self.false_set = NULL
def set_error_bad_lines(self, int status):
self.parser.error_bad_lines = status
| relying on ``__dealloc__`` to clean up ``malloc()``ed memory can lead to a perceived "leak" on PyPy since the garbage collector will not necessarily collect the object as soon as its ``refcnt`` reaches 0.
Instead, pre-emptively release memory when ``close()`` is called
The code still maintains backward compatibility for the case where ``close()`` is never called | https://api.github.com/repos/pandas-dev/pandas/pulls/15665 | 2017-03-12T18:20:08Z | 2017-03-13T00:56:17Z | null | 2017-03-13T00:56:21Z |
Merge pull request #1 from pydata/master | 11/24/2015
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15659 | 2017-03-12T03:53:34Z | 2017-03-12T03:54:48Z | null | 2017-03-12T03:55:13Z | |
CLN: Cleanup tests for .rank() | diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 4758ee1323ca0..6c917444f9f43 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -2,7 +2,7 @@
from __future__ import print_function
-from datetime import timedelta, datetime
+from datetime import timedelta
from distutils.version import LooseVersion
import sys
import pytest
@@ -642,173 +642,6 @@ def test_cumprod(self):
df.cumprod(0)
df.cumprod(1)
- def test_rank(self):
- tm._skip_if_no_scipy()
- from scipy.stats import rankdata
-
- self.frame['A'][::2] = np.nan
- self.frame['B'][::3] = np.nan
- self.frame['C'][::4] = np.nan
- self.frame['D'][::5] = np.nan
-
- ranks0 = self.frame.rank()
- ranks1 = self.frame.rank(1)
- mask = np.isnan(self.frame.values)
-
- fvals = self.frame.fillna(np.inf).values
-
- exp0 = np.apply_along_axis(rankdata, 0, fvals)
- exp0[mask] = np.nan
-
- exp1 = np.apply_along_axis(rankdata, 1, fvals)
- exp1[mask] = np.nan
-
- tm.assert_almost_equal(ranks0.values, exp0)
- tm.assert_almost_equal(ranks1.values, exp1)
-
- # integers
- df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
-
- result = df.rank()
- exp = df.astype(float).rank()
- tm.assert_frame_equal(result, exp)
-
- result = df.rank(1)
- exp = df.astype(float).rank(1)
- tm.assert_frame_equal(result, exp)
-
- def test_rank2(self):
- df = DataFrame([[1, 3, 2], [1, 2, 3]])
- expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
- result = df.rank(1, pct=True)
- tm.assert_frame_equal(result, expected)
-
- df = DataFrame([[1, 3, 2], [1, 2, 3]])
- expected = df.rank(0) / 2.0
- result = df.rank(0, pct=True)
- tm.assert_frame_equal(result, expected)
-
- df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
- expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
- result = df.rank(1, numeric_only=False)
- tm.assert_frame_equal(result, expected)
-
- expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
- result = df.rank(0, numeric_only=False)
- tm.assert_frame_equal(result, expected)
-
- df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
- expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
- result = df.rank(1, numeric_only=False)
- tm.assert_frame_equal(result, expected)
-
- expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
- result = df.rank(0, numeric_only=False)
- tm.assert_frame_equal(result, expected)
-
- # f7u12, this does not work without extensive workaround
- data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
- [datetime(2000, 1, 2), datetime(2000, 1, 3),
- datetime(2000, 1, 1)]]
- df = DataFrame(data)
-
- # check the rank
- expected = DataFrame([[2., nan, 1.],
- [2., 3., 1.]])
- result = df.rank(1, numeric_only=False, ascending=True)
- tm.assert_frame_equal(result, expected)
-
- expected = DataFrame([[1., nan, 2.],
- [2., 1., 3.]])
- result = df.rank(1, numeric_only=False, ascending=False)
- tm.assert_frame_equal(result, expected)
-
- # mixed-type frames
- self.mixed_frame['datetime'] = datetime.now()
- self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
-
- result = self.mixed_frame.rank(1)
- expected = self.mixed_frame.rank(1, numeric_only=True)
- tm.assert_frame_equal(result, expected)
-
- df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10,
- 1e60, 1e80, 1e-30]})
- exp = DataFrame({"a": [3.5, 1., 3.5, 5., 6., 7., 2.]})
- tm.assert_frame_equal(df.rank(), exp)
-
- def test_rank_na_option(self):
- tm._skip_if_no_scipy()
- from scipy.stats import rankdata
-
- self.frame['A'][::2] = np.nan
- self.frame['B'][::3] = np.nan
- self.frame['C'][::4] = np.nan
- self.frame['D'][::5] = np.nan
-
- # bottom
- ranks0 = self.frame.rank(na_option='bottom')
- ranks1 = self.frame.rank(1, na_option='bottom')
-
- fvals = self.frame.fillna(np.inf).values
-
- exp0 = np.apply_along_axis(rankdata, 0, fvals)
- exp1 = np.apply_along_axis(rankdata, 1, fvals)
-
- tm.assert_almost_equal(ranks0.values, exp0)
- tm.assert_almost_equal(ranks1.values, exp1)
-
- # top
- ranks0 = self.frame.rank(na_option='top')
- ranks1 = self.frame.rank(1, na_option='top')
-
- fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
- fval1 = self.frame.T
- fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
- fval1 = fval1.fillna(np.inf).values
-
- exp0 = np.apply_along_axis(rankdata, 0, fval0)
- exp1 = np.apply_along_axis(rankdata, 1, fval1)
-
- tm.assert_almost_equal(ranks0.values, exp0)
- tm.assert_almost_equal(ranks1.values, exp1)
-
- # descending
-
- # bottom
- ranks0 = self.frame.rank(na_option='top', ascending=False)
- ranks1 = self.frame.rank(1, na_option='top', ascending=False)
-
- fvals = self.frame.fillna(np.inf).values
-
- exp0 = np.apply_along_axis(rankdata, 0, -fvals)
- exp1 = np.apply_along_axis(rankdata, 1, -fvals)
-
- tm.assert_almost_equal(ranks0.values, exp0)
- tm.assert_almost_equal(ranks1.values, exp1)
-
- # descending
-
- # top
- ranks0 = self.frame.rank(na_option='bottom', ascending=False)
- ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
-
- fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
- fval1 = self.frame.T
- fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
- fval1 = fval1.fillna(np.inf).values
-
- exp0 = np.apply_along_axis(rankdata, 0, -fval0)
- exp1 = np.apply_along_axis(rankdata, 1, -fval1)
-
- tm.assert_numpy_array_equal(ranks0.values, exp0)
- tm.assert_numpy_array_equal(ranks1.values, exp1)
-
- def test_rank_axis(self):
- # check if using axes' names gives the same result
- df = pd.DataFrame([[2, 1], [4, 3]])
- tm.assert_frame_equal(df.rank(axis=0), df.rank(axis='index'))
- tm.assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
-
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py
new file mode 100644
index 0000000000000..151a89888c329
--- /dev/null
+++ b/pandas/tests/frame/test_rank.py
@@ -0,0 +1,268 @@
+# -*- coding: utf-8 -*-
+from datetime import timedelta, datetime
+from distutils.version import LooseVersion
+from numpy import nan
+import numpy as np
+
+from pandas import Series, DataFrame
+
+from pandas.compat import product
+from pandas.util.testing import assert_frame_equal
+import pandas.util.testing as tm
+from pandas.tests.frame.common import TestData
+
+
+class TestRank(tm.TestCase, TestData):
+ s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
+ df = DataFrame({'A': s, 'B': s})
+
+ results = {
+ 'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
+ 3.5, 1.5, 8.0, nan, 5.5]),
+ 'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
+ 'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
+ 'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),
+ 'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),
+ }
+
+ def test_rank(self):
+ tm._skip_if_no_scipy()
+ from scipy.stats import rankdata
+
+ self.frame['A'][::2] = np.nan
+ self.frame['B'][::3] = np.nan
+ self.frame['C'][::4] = np.nan
+ self.frame['D'][::5] = np.nan
+
+ ranks0 = self.frame.rank()
+ ranks1 = self.frame.rank(1)
+ mask = np.isnan(self.frame.values)
+
+ fvals = self.frame.fillna(np.inf).values
+
+ exp0 = np.apply_along_axis(rankdata, 0, fvals)
+ exp0[mask] = np.nan
+
+ exp1 = np.apply_along_axis(rankdata, 1, fvals)
+ exp1[mask] = np.nan
+
+ tm.assert_almost_equal(ranks0.values, exp0)
+ tm.assert_almost_equal(ranks1.values, exp1)
+
+ # integers
+ df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
+
+ result = df.rank()
+ exp = df.astype(float).rank()
+ tm.assert_frame_equal(result, exp)
+
+ result = df.rank(1)
+ exp = df.astype(float).rank(1)
+ tm.assert_frame_equal(result, exp)
+
+ def test_rank2(self):
+ df = DataFrame([[1, 3, 2], [1, 2, 3]])
+ expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
+ result = df.rank(1, pct=True)
+ tm.assert_frame_equal(result, expected)
+
+ df = DataFrame([[1, 3, 2], [1, 2, 3]])
+ expected = df.rank(0) / 2.0
+ result = df.rank(0, pct=True)
+ tm.assert_frame_equal(result, expected)
+
+ df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
+ expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
+ result = df.rank(1, numeric_only=False)
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
+ result = df.rank(0, numeric_only=False)
+ tm.assert_frame_equal(result, expected)
+
+ df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
+ expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
+ result = df.rank(1, numeric_only=False)
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
+ result = df.rank(0, numeric_only=False)
+ tm.assert_frame_equal(result, expected)
+
+ # f7u12, this does not work without extensive workaround
+ data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
+ [datetime(2000, 1, 2), datetime(2000, 1, 3),
+ datetime(2000, 1, 1)]]
+ df = DataFrame(data)
+
+ # check the rank
+ expected = DataFrame([[2., nan, 1.],
+ [2., 3., 1.]])
+ result = df.rank(1, numeric_only=False, ascending=True)
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame([[1., nan, 2.],
+ [2., 1., 3.]])
+ result = df.rank(1, numeric_only=False, ascending=False)
+ tm.assert_frame_equal(result, expected)
+
+ # mixed-type frames
+ self.mixed_frame['datetime'] = datetime.now()
+ self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
+
+ result = self.mixed_frame.rank(1)
+ expected = self.mixed_frame.rank(1, numeric_only=True)
+ tm.assert_frame_equal(result, expected)
+
+ df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10,
+ 1e60, 1e80, 1e-30]})
+ exp = DataFrame({"a": [3.5, 1., 3.5, 5., 6., 7., 2.]})
+ tm.assert_frame_equal(df.rank(), exp)
+
+ def test_rank_na_option(self):
+ tm._skip_if_no_scipy()
+ from scipy.stats import rankdata
+
+ self.frame['A'][::2] = np.nan
+ self.frame['B'][::3] = np.nan
+ self.frame['C'][::4] = np.nan
+ self.frame['D'][::5] = np.nan
+
+ # bottom
+ ranks0 = self.frame.rank(na_option='bottom')
+ ranks1 = self.frame.rank(1, na_option='bottom')
+
+ fvals = self.frame.fillna(np.inf).values
+
+ exp0 = np.apply_along_axis(rankdata, 0, fvals)
+ exp1 = np.apply_along_axis(rankdata, 1, fvals)
+
+ tm.assert_almost_equal(ranks0.values, exp0)
+ tm.assert_almost_equal(ranks1.values, exp1)
+
+ # top
+ ranks0 = self.frame.rank(na_option='top')
+ ranks1 = self.frame.rank(1, na_option='top')
+
+ fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
+ fval1 = self.frame.T
+ fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
+ fval1 = fval1.fillna(np.inf).values
+
+ exp0 = np.apply_along_axis(rankdata, 0, fval0)
+ exp1 = np.apply_along_axis(rankdata, 1, fval1)
+
+ tm.assert_almost_equal(ranks0.values, exp0)
+ tm.assert_almost_equal(ranks1.values, exp1)
+
+ # descending
+
+ # bottom
+ ranks0 = self.frame.rank(na_option='top', ascending=False)
+ ranks1 = self.frame.rank(1, na_option='top', ascending=False)
+
+ fvals = self.frame.fillna(np.inf).values
+
+ exp0 = np.apply_along_axis(rankdata, 0, -fvals)
+ exp1 = np.apply_along_axis(rankdata, 1, -fvals)
+
+ tm.assert_almost_equal(ranks0.values, exp0)
+ tm.assert_almost_equal(ranks1.values, exp1)
+
+ # descending
+
+ # top
+ ranks0 = self.frame.rank(na_option='bottom', ascending=False)
+ ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
+
+ fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
+ fval1 = self.frame.T
+ fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
+ fval1 = fval1.fillna(np.inf).values
+
+ exp0 = np.apply_along_axis(rankdata, 0, -fval0)
+ exp1 = np.apply_along_axis(rankdata, 1, -fval1)
+
+ tm.assert_numpy_array_equal(ranks0.values, exp0)
+ tm.assert_numpy_array_equal(ranks1.values, exp1)
+
+ def test_rank_axis(self):
+ # check if using axes' names gives the same result
+ df = DataFrame([[2, 1], [4, 3]])
+ tm.assert_frame_equal(df.rank(axis=0), df.rank(axis='index'))
+ tm.assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
+
+ def test_rank_methods_frame(self):
+ tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
+ import scipy
+ from scipy.stats import rankdata
+
+ xs = np.random.randint(0, 21, (100, 26))
+ xs = (xs - 10.0) / 10.0
+ cols = [chr(ord('z') - i) for i in range(xs.shape[1])]
+
+ for vals in [xs, xs + 1e6, xs * 1e-6]:
+ df = DataFrame(vals, columns=cols)
+
+ for ax in [0, 1]:
+ for m in ['average', 'min', 'max', 'first', 'dense']:
+ result = df.rank(axis=ax, method=m)
+ sprank = np.apply_along_axis(
+ rankdata, ax, vals,
+ m if m != 'first' else 'ordinal')
+ sprank = sprank.astype(np.float64)
+ expected = DataFrame(sprank, columns=cols)
+
+ if LooseVersion(scipy.__version__) >= '0.17.0':
+ expected = expected.astype('float64')
+ tm.assert_frame_equal(result, expected)
+
+ def test_rank_descending(self):
+ dtypes = ['O', 'f8', 'i8']
+
+ for dtype, method in product(dtypes, self.results):
+ if 'i' in dtype:
+ df = self.df.dropna()
+ else:
+ df = self.df.astype(dtype)
+
+ res = df.rank(ascending=False)
+ expected = (df.max() - df).rank()
+ assert_frame_equal(res, expected)
+
+ if method == 'first' and dtype == 'O':
+ continue
+
+ expected = (df.max() - df).rank(method=method)
+
+ if dtype != 'O':
+ res2 = df.rank(method=method, ascending=False,
+ numeric_only=True)
+ assert_frame_equal(res2, expected)
+
+ res3 = df.rank(method=method, ascending=False,
+ numeric_only=False)
+ assert_frame_equal(res3, expected)
+
+ def test_rank_2d_tie_methods(self):
+ df = self.df
+
+ def _check2d(df, expected, method='average', axis=0):
+ exp_df = DataFrame({'A': expected, 'B': expected})
+
+ if axis == 1:
+ df = df.T
+ exp_df = exp_df.T
+
+ result = df.rank(method=method, axis=axis)
+ assert_frame_equal(result, exp_df)
+
+ dtypes = [None, object]
+ disabled = set([(object, 'first')])
+ results = self.results
+
+ for method, axis, dtype in product(results, [0, 1], dtypes):
+ if (dtype, method) in disabled:
+ continue
+ frame = df if dtype is None else df.astype(dtype)
+ _check2d(frame, results[method], method=method, axis=axis)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index b6985abb64e40..c2543581dca50 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -969,207 +969,6 @@ def test_drop_duplicates(self):
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
- def test_rank(self):
- tm._skip_if_no_scipy()
- from scipy.stats import rankdata
-
- self.ts[::2] = np.nan
- self.ts[:10][::3] = 4.
-
- ranks = self.ts.rank()
- oranks = self.ts.astype('O').rank()
-
- assert_series_equal(ranks, oranks)
-
- mask = np.isnan(self.ts)
- filled = self.ts.fillna(np.inf)
-
- # rankdata returns a ndarray
- exp = Series(rankdata(filled), index=filled.index, name='ts')
- exp[mask] = np.nan
-
- tm.assert_series_equal(ranks, exp)
-
- iseries = Series(np.arange(5).repeat(2))
-
- iranks = iseries.rank()
- exp = iseries.astype(float).rank()
- assert_series_equal(iranks, exp)
- iseries = Series(np.arange(5)) + 1.0
- exp = iseries / 5.0
- iranks = iseries.rank(pct=True)
-
- assert_series_equal(iranks, exp)
-
- iseries = Series(np.repeat(1, 100))
- exp = Series(np.repeat(0.505, 100))
- iranks = iseries.rank(pct=True)
- assert_series_equal(iranks, exp)
-
- iseries[1] = np.nan
- exp = Series(np.repeat(50.0 / 99.0, 100))
- exp[1] = np.nan
- iranks = iseries.rank(pct=True)
- assert_series_equal(iranks, exp)
-
- iseries = Series(np.arange(5)) + 1.0
- iseries[4] = np.nan
- exp = iseries / 4.0
- iranks = iseries.rank(pct=True)
- assert_series_equal(iranks, exp)
-
- iseries = Series(np.repeat(np.nan, 100))
- exp = iseries.copy()
- iranks = iseries.rank(pct=True)
- assert_series_equal(iranks, exp)
-
- iseries = Series(np.arange(5)) + 1
- iseries[4] = np.nan
- exp = iseries / 4.0
- iranks = iseries.rank(pct=True)
- assert_series_equal(iranks, exp)
-
- rng = date_range('1/1/1990', periods=5)
- iseries = Series(np.arange(5), rng) + 1
- iseries.iloc[4] = np.nan
- exp = iseries / 4.0
- iranks = iseries.rank(pct=True)
- assert_series_equal(iranks, exp)
-
- iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])
- exp = Series([2, 1, 3, 5, 4, 6.0])
- iranks = iseries.rank()
- assert_series_equal(iranks, exp)
-
- # GH 5968
- iseries = Series(['3 day', '1 day 10m', '-2 day', pd.NaT],
- dtype='m8[ns]')
- exp = Series([3, 2, 1, np.nan])
- iranks = iseries.rank()
- assert_series_equal(iranks, exp)
-
- values = np.array(
- [-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40
- ], dtype='float64')
- random_order = np.random.permutation(len(values))
- iseries = Series(values[random_order])
- exp = Series(random_order + 1.0, dtype='float64')
- iranks = iseries.rank()
- assert_series_equal(iranks, exp)
-
- def test_rank_categorical(self):
- # GH issue #15420 rank incorrectly orders ordered categories
-
- # Test ascending/descending ranking for ordered categoricals
- exp = pd.Series([1., 2., 3., 4., 5., 6.])
- exp_desc = pd.Series([6., 5., 4., 3., 2., 1.])
- ordered = pd.Series(
- ['first', 'second', 'third', 'fourth', 'fifth', 'sixth']
- ).astype(
- 'category',
- categories=['first', 'second', 'third',
- 'fourth', 'fifth', 'sixth'],
- ordered=True
- )
- assert_series_equal(ordered.rank(), exp)
- assert_series_equal(ordered.rank(ascending=False), exp_desc)
-
- # Unordered categoricals should be ranked as objects
- unordered = pd.Series(
- ['first', 'second', 'third', 'fourth', 'fifth', 'sixth'],
- ).astype(
- 'category',
- categories=['first', 'second', 'third',
- 'fourth', 'fifth', 'sixth'],
- ordered=False
- )
- exp_unordered = pd.Series([2., 4., 6., 3., 1., 5.])
- res = unordered.rank()
- assert_series_equal(res, exp_unordered)
-
- unordered1 = pd.Series(
- [1, 2, 3, 4, 5, 6],
- ).astype(
- 'category',
- categories=[1, 2, 3, 4, 5, 6],
- ordered=False
- )
- exp_unordered1 = pd.Series([1., 2., 3., 4., 5., 6.])
- res1 = unordered1.rank()
- assert_series_equal(res1, exp_unordered1)
-
- # Test na_option for rank data
- na_ser = pd.Series(
- ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', np.NaN]
- ).astype(
- 'category',
- categories=[
- 'first', 'second', 'third', 'fourth',
- 'fifth', 'sixth', 'seventh'
- ],
- ordered=True
- )
-
- exp_top = pd.Series([2., 3., 4., 5., 6., 7., 1.])
- exp_bot = pd.Series([1., 2., 3., 4., 5., 6., 7.])
- exp_keep = pd.Series([1., 2., 3., 4., 5., 6., np.NaN])
-
- assert_series_equal(na_ser.rank(na_option='top'), exp_top)
- assert_series_equal(na_ser.rank(na_option='bottom'), exp_bot)
- assert_series_equal(na_ser.rank(na_option='keep'), exp_keep)
-
- # Test na_option for rank data with ascending False
- exp_top = pd.Series([7., 6., 5., 4., 3., 2., 1.])
- exp_bot = pd.Series([6., 5., 4., 3., 2., 1., 7.])
- exp_keep = pd.Series([6., 5., 4., 3., 2., 1., np.NaN])
-
- assert_series_equal(
- na_ser.rank(na_option='top', ascending=False),
- exp_top
- )
- assert_series_equal(
- na_ser.rank(na_option='bottom', ascending=False),
- exp_bot
- )
- assert_series_equal(
- na_ser.rank(na_option='keep', ascending=False),
- exp_keep
- )
-
- # Test with pct=True
- na_ser = pd.Series(
- ['first', 'second', 'third', 'fourth', np.NaN],
- ).astype(
- 'category',
- categories=['first', 'second', 'third', 'fourth'],
- ordered=True
- )
- exp_top = pd.Series([0.4, 0.6, 0.8, 1., 0.2])
- exp_bot = pd.Series([0.2, 0.4, 0.6, 0.8, 1.])
- exp_keep = pd.Series([0.25, 0.5, 0.75, 1., np.NaN])
-
- assert_series_equal(na_ser.rank(na_option='top', pct=True), exp_top)
- assert_series_equal(na_ser.rank(na_option='bottom', pct=True), exp_bot)
- assert_series_equal(na_ser.rank(na_option='keep', pct=True), exp_keep)
-
- def test_rank_signature(self):
- s = Series([0, 1])
- s.rank(method='average')
- self.assertRaises(ValueError, s.rank, 'average')
-
- def test_rank_inf(self):
- pytest.skip('DataFrame.rank does not currently rank '
- 'np.inf and -np.inf properly')
-
- values = np.array(
- [-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10,
- 2, 40, np.inf], dtype='float64')
- random_order = np.random.permutation(len(values))
- iseries = Series(values[random_order])
- exp = Series(random_order + 1.0, dtype='float64')
- iranks = iseries.rank()
- assert_series_equal(iranks, exp)
-
def test_clip(self):
val = self.ts.median()
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
new file mode 100644
index 0000000000000..99257b343310f
--- /dev/null
+++ b/pandas/tests/series/test_rank.py
@@ -0,0 +1,323 @@
+# -*- coding: utf-8 -*-
+from pandas import compat
+
+import pytest
+
+from distutils.version import LooseVersion
+from numpy import nan
+import numpy as np
+
+from pandas import (Series, date_range, NaT)
+
+from pandas.compat import product
+from pandas.util.testing import assert_series_equal
+import pandas.util.testing as tm
+from pandas.tests.series.common import TestData
+
+
+class TestSeriesRank(tm.TestCase, TestData):
+ s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
+
+ results = {
+ 'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
+ 3.5, 1.5, 8.0, nan, 5.5]),
+ 'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
+ 'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
+ 'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),
+ 'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),
+ }
+
+ def test_rank(self):
+ tm._skip_if_no_scipy()
+ from scipy.stats import rankdata
+
+ self.ts[::2] = np.nan
+ self.ts[:10][::3] = 4.
+
+ ranks = self.ts.rank()
+ oranks = self.ts.astype('O').rank()
+
+ assert_series_equal(ranks, oranks)
+
+ mask = np.isnan(self.ts)
+ filled = self.ts.fillna(np.inf)
+
+ # rankdata returns a ndarray
+ exp = Series(rankdata(filled), index=filled.index, name='ts')
+ exp[mask] = np.nan
+
+ tm.assert_series_equal(ranks, exp)
+
+ iseries = Series(np.arange(5).repeat(2))
+
+ iranks = iseries.rank()
+ exp = iseries.astype(float).rank()
+ assert_series_equal(iranks, exp)
+ iseries = Series(np.arange(5)) + 1.0
+ exp = iseries / 5.0
+ iranks = iseries.rank(pct=True)
+
+ assert_series_equal(iranks, exp)
+
+ iseries = Series(np.repeat(1, 100))
+ exp = Series(np.repeat(0.505, 100))
+ iranks = iseries.rank(pct=True)
+ assert_series_equal(iranks, exp)
+
+ iseries[1] = np.nan
+ exp = Series(np.repeat(50.0 / 99.0, 100))
+ exp[1] = np.nan
+ iranks = iseries.rank(pct=True)
+ assert_series_equal(iranks, exp)
+
+ iseries = Series(np.arange(5)) + 1.0
+ iseries[4] = np.nan
+ exp = iseries / 4.0
+ iranks = iseries.rank(pct=True)
+ assert_series_equal(iranks, exp)
+
+ iseries = Series(np.repeat(np.nan, 100))
+ exp = iseries.copy()
+ iranks = iseries.rank(pct=True)
+ assert_series_equal(iranks, exp)
+
+ iseries = Series(np.arange(5)) + 1
+ iseries[4] = np.nan
+ exp = iseries / 4.0
+ iranks = iseries.rank(pct=True)
+ assert_series_equal(iranks, exp)
+
+ rng = date_range('1/1/1990', periods=5)
+ iseries = Series(np.arange(5), rng) + 1
+ iseries.iloc[4] = np.nan
+ exp = iseries / 4.0
+ iranks = iseries.rank(pct=True)
+ assert_series_equal(iranks, exp)
+
+ iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])
+ exp = Series([2, 1, 3, 5, 4, 6.0])
+ iranks = iseries.rank()
+ assert_series_equal(iranks, exp)
+
+ # GH 5968
+ iseries = Series(['3 day', '1 day 10m', '-2 day', NaT],
+ dtype='m8[ns]')
+ exp = Series([3, 2, 1, np.nan])
+ iranks = iseries.rank()
+ assert_series_equal(iranks, exp)
+
+ values = np.array(
+ [-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40
+ ], dtype='float64')
+ random_order = np.random.permutation(len(values))
+ iseries = Series(values[random_order])
+ exp = Series(random_order + 1.0, dtype='float64')
+ iranks = iseries.rank()
+ assert_series_equal(iranks, exp)
+
+ def test_rank_categorical(self):
+ # GH issue #15420 rank incorrectly orders ordered categories
+
+ # Test ascending/descending ranking for ordered categoricals
+ exp = Series([1., 2., 3., 4., 5., 6.])
+ exp_desc = Series([6., 5., 4., 3., 2., 1.])
+ ordered = Series(
+ ['first', 'second', 'third', 'fourth', 'fifth', 'sixth']
+ ).astype(
+ 'category',
+ categories=['first', 'second', 'third',
+ 'fourth', 'fifth', 'sixth'],
+ ordered=True
+ )
+ assert_series_equal(ordered.rank(), exp)
+ assert_series_equal(ordered.rank(ascending=False), exp_desc)
+
+ # Unordered categoricals should be ranked as objects
+ unordered = Series(
+ ['first', 'second', 'third', 'fourth', 'fifth', 'sixth'],
+ ).astype(
+ 'category',
+ categories=['first', 'second', 'third',
+ 'fourth', 'fifth', 'sixth'],
+ ordered=False
+ )
+ exp_unordered = Series([2., 4., 6., 3., 1., 5.])
+ res = unordered.rank()
+ assert_series_equal(res, exp_unordered)
+
+ unordered1 = Series(
+ [1, 2, 3, 4, 5, 6],
+ ).astype(
+ 'category',
+ categories=[1, 2, 3, 4, 5, 6],
+ ordered=False
+ )
+ exp_unordered1 = Series([1., 2., 3., 4., 5., 6.])
+ res1 = unordered1.rank()
+ assert_series_equal(res1, exp_unordered1)
+
+ # Test na_option for rank data
+ na_ser = Series(
+ ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', np.NaN]
+ ).astype(
+ 'category',
+ categories=[
+ 'first', 'second', 'third', 'fourth',
+ 'fifth', 'sixth', 'seventh'
+ ],
+ ordered=True
+ )
+
+ exp_top = Series([2., 3., 4., 5., 6., 7., 1.])
+ exp_bot = Series([1., 2., 3., 4., 5., 6., 7.])
+ exp_keep = Series([1., 2., 3., 4., 5., 6., np.NaN])
+
+ assert_series_equal(na_ser.rank(na_option='top'), exp_top)
+ assert_series_equal(na_ser.rank(na_option='bottom'), exp_bot)
+ assert_series_equal(na_ser.rank(na_option='keep'), exp_keep)
+
+ # Test na_option for rank data with ascending False
+ exp_top = Series([7., 6., 5., 4., 3., 2., 1.])
+ exp_bot = Series([6., 5., 4., 3., 2., 1., 7.])
+ exp_keep = Series([6., 5., 4., 3., 2., 1., np.NaN])
+
+ assert_series_equal(
+ na_ser.rank(na_option='top', ascending=False),
+ exp_top
+ )
+ assert_series_equal(
+ na_ser.rank(na_option='bottom', ascending=False),
+ exp_bot
+ )
+ assert_series_equal(
+ na_ser.rank(na_option='keep', ascending=False),
+ exp_keep
+ )
+
+ # Test with pct=True
+ na_ser = Series(
+ ['first', 'second', 'third', 'fourth', np.NaN],
+ ).astype(
+ 'category',
+ categories=['first', 'second', 'third', 'fourth'],
+ ordered=True
+ )
+ exp_top = Series([0.4, 0.6, 0.8, 1., 0.2])
+ exp_bot = Series([0.2, 0.4, 0.6, 0.8, 1.])
+ exp_keep = Series([0.25, 0.5, 0.75, 1., np.NaN])
+
+ assert_series_equal(na_ser.rank(na_option='top', pct=True), exp_top)
+ assert_series_equal(na_ser.rank(na_option='bottom', pct=True), exp_bot)
+ assert_series_equal(na_ser.rank(na_option='keep', pct=True), exp_keep)
+
+ def test_rank_signature(self):
+ s = Series([0, 1])
+ s.rank(method='average')
+ self.assertRaises(ValueError, s.rank, 'average')
+
+ def test_rank_inf(self):
+ pytest.skip('DataFrame.rank does not currently rank '
+ 'np.inf and -np.inf properly')
+
+ values = np.array(
+ [-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10,
+ 2, 40, np.inf], dtype='float64')
+ random_order = np.random.permutation(len(values))
+ iseries = Series(values[random_order])
+ exp = Series(random_order + 1.0, dtype='float64')
+ iranks = iseries.rank()
+ assert_series_equal(iranks, exp)
+
+ def test_rank_tie_methods(self):
+ s = self.s
+
+ def _check(s, expected, method='average'):
+ result = s.rank(method=method)
+ tm.assert_series_equal(result, Series(expected))
+
+ dtypes = [None, object]
+ disabled = set([(object, 'first')])
+ results = self.results
+
+ for method, dtype in product(results, dtypes):
+ if (dtype, method) in disabled:
+ continue
+ series = s if dtype is None else s.astype(dtype)
+ _check(series, results[method], method=method)
+
+ def test_rank_methods_series(self):
+ tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
+ import scipy
+ from scipy.stats import rankdata
+
+ xs = np.random.randn(9)
+ xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
+ np.random.shuffle(xs)
+
+ index = [chr(ord('a') + i) for i in range(len(xs))]
+
+ for vals in [xs, xs + 1e6, xs * 1e-6]:
+ ts = Series(vals, index=index)
+
+ for m in ['average', 'min', 'max', 'first', 'dense']:
+ result = ts.rank(method=m)
+ sprank = rankdata(vals, m if m != 'first' else 'ordinal')
+ expected = Series(sprank, index=index)
+
+ if LooseVersion(scipy.__version__) >= '0.17.0':
+ expected = expected.astype('float64')
+ tm.assert_series_equal(result, expected)
+
+ def test_rank_dense_method(self):
+ dtypes = ['O', 'f8', 'i8']
+ in_out = [([1], [1]),
+ ([2], [1]),
+ ([0], [1]),
+ ([2, 2], [1, 1]),
+ ([1, 2, 3], [1, 2, 3]),
+ ([4, 2, 1], [3, 2, 1],),
+ ([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]),
+ ([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5])]
+
+ for ser, exp in in_out:
+ for dtype in dtypes:
+ s = Series(ser).astype(dtype)
+ result = s.rank(method='dense')
+ expected = Series(exp).astype(result.dtype)
+ assert_series_equal(result, expected)
+
+ def test_rank_descending(self):
+ dtypes = ['O', 'f8', 'i8']
+
+ for dtype, method in product(dtypes, self.results):
+ if 'i' in dtype:
+ s = self.s.dropna()
+ else:
+ s = self.s.astype(dtype)
+
+ res = s.rank(ascending=False)
+ expected = (s.max() - s).rank()
+ assert_series_equal(res, expected)
+
+ if method == 'first' and dtype == 'O':
+ continue
+
+ expected = (s.max() - s).rank(method=method)
+ res2 = s.rank(method=method, ascending=False)
+ assert_series_equal(res2, expected)
+
+ def test_rank_int(self):
+ s = self.s.dropna().astype('i8')
+
+ for method, res in compat.iteritems(self.results):
+ result = s.rank(method=method)
+ expected = Series(res).dropna()
+ expected.index = result.index
+ assert_series_equal(result, expected)
+
+ def test_rank_object_bug(self):
+ # GH 13445
+
+ # smoke tests
+ Series([np.nan] * 32).astype(object).rank(ascending=True)
+ Series([np.nan] * 32).astype(object).rank(ascending=False)
diff --git a/pandas/tests/test_stats.py b/pandas/tests/test_stats.py
deleted file mode 100644
index 118c4147a2019..0000000000000
--- a/pandas/tests/test_stats.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# -*- coding: utf-8 -*-
-from pandas import compat
-
-from distutils.version import LooseVersion
-from numpy import nan
-import numpy as np
-
-from pandas import Series, DataFrame
-
-from pandas.compat import product
-from pandas.util.testing import (assert_frame_equal, assert_series_equal)
-import pandas.util.testing as tm
-
-
-class TestRank(tm.TestCase):
- s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
- df = DataFrame({'A': s, 'B': s})
-
- results = {
- 'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
- 3.5, 1.5, 8.0, nan, 5.5]),
- 'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
- 'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
- 'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),
- 'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),
- }
-
- def test_rank_tie_methods(self):
- s = self.s
-
- def _check(s, expected, method='average'):
- result = s.rank(method=method)
- tm.assert_series_equal(result, Series(expected))
-
- dtypes = [None, object]
- disabled = set([(object, 'first')])
- results = self.results
-
- for method, dtype in product(results, dtypes):
- if (dtype, method) in disabled:
- continue
- series = s if dtype is None else s.astype(dtype)
- _check(series, results[method], method=method)
-
- def test_rank_methods_series(self):
- tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
- import scipy
- from scipy.stats import rankdata
-
- xs = np.random.randn(9)
- xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
- np.random.shuffle(xs)
-
- index = [chr(ord('a') + i) for i in range(len(xs))]
-
- for vals in [xs, xs + 1e6, xs * 1e-6]:
- ts = Series(vals, index=index)
-
- for m in ['average', 'min', 'max', 'first', 'dense']:
- result = ts.rank(method=m)
- sprank = rankdata(vals, m if m != 'first' else 'ordinal')
- expected = Series(sprank, index=index)
-
- if LooseVersion(scipy.__version__) >= '0.17.0':
- expected = expected.astype('float64')
- tm.assert_series_equal(result, expected)
-
- def test_rank_methods_frame(self):
- tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
- import scipy
- from scipy.stats import rankdata
-
- xs = np.random.randint(0, 21, (100, 26))
- xs = (xs - 10.0) / 10.0
- cols = [chr(ord('z') - i) for i in range(xs.shape[1])]
-
- for vals in [xs, xs + 1e6, xs * 1e-6]:
- df = DataFrame(vals, columns=cols)
-
- for ax in [0, 1]:
- for m in ['average', 'min', 'max', 'first', 'dense']:
- result = df.rank(axis=ax, method=m)
- sprank = np.apply_along_axis(
- rankdata, ax, vals,
- m if m != 'first' else 'ordinal')
- sprank = sprank.astype(np.float64)
- expected = DataFrame(sprank, columns=cols)
-
- if LooseVersion(scipy.__version__) >= '0.17.0':
- expected = expected.astype('float64')
- tm.assert_frame_equal(result, expected)
-
- def test_rank_dense_method(self):
- dtypes = ['O', 'f8', 'i8']
- in_out = [([1], [1]),
- ([2], [1]),
- ([0], [1]),
- ([2, 2], [1, 1]),
- ([1, 2, 3], [1, 2, 3]),
- ([4, 2, 1], [3, 2, 1],),
- ([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]),
- ([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5])]
-
- for ser, exp in in_out:
- for dtype in dtypes:
- s = Series(ser).astype(dtype)
- result = s.rank(method='dense')
- expected = Series(exp).astype(result.dtype)
- assert_series_equal(result, expected)
-
- def test_rank_descending(self):
- dtypes = ['O', 'f8', 'i8']
-
- for dtype, method in product(dtypes, self.results):
- if 'i' in dtype:
- s = self.s.dropna()
- df = self.df.dropna()
- else:
- s = self.s.astype(dtype)
- df = self.df.astype(dtype)
-
- res = s.rank(ascending=False)
- expected = (s.max() - s).rank()
- assert_series_equal(res, expected)
-
- res = df.rank(ascending=False)
- expected = (df.max() - df).rank()
- assert_frame_equal(res, expected)
-
- if method == 'first' and dtype == 'O':
- continue
-
- expected = (s.max() - s).rank(method=method)
- res2 = s.rank(method=method, ascending=False)
- assert_series_equal(res2, expected)
-
- expected = (df.max() - df).rank(method=method)
-
- if dtype != 'O':
- res2 = df.rank(method=method, ascending=False,
- numeric_only=True)
- assert_frame_equal(res2, expected)
-
- res3 = df.rank(method=method, ascending=False,
- numeric_only=False)
- assert_frame_equal(res3, expected)
-
- def test_rank_2d_tie_methods(self):
- df = self.df
-
- def _check2d(df, expected, method='average', axis=0):
- exp_df = DataFrame({'A': expected, 'B': expected})
-
- if axis == 1:
- df = df.T
- exp_df = exp_df.T
-
- result = df.rank(method=method, axis=axis)
- assert_frame_equal(result, exp_df)
-
- dtypes = [None, object]
- disabled = set([(object, 'first')])
- results = self.results
-
- for method, axis, dtype in product(results, [0, 1], dtypes):
- if (dtype, method) in disabled:
- continue
- frame = df if dtype is None else df.astype(dtype)
- _check2d(frame, results[method], method=method, axis=axis)
-
- def test_rank_int(self):
- s = self.s.dropna().astype('i8')
-
- for method, res in compat.iteritems(self.results):
- result = s.rank(method=method)
- expected = Series(res).dropna()
- expected.index = result.index
- assert_series_equal(result, expected)
-
- def test_rank_object_bug(self):
- # GH 13445
-
- # smoke tests
- Series([np.nan] * 32).astype(object).rank(ascending=True)
- Series([np.nan] * 32).astype(object).rank(ascending=False)
| Moves `rank` tests for `DataFrame` and `Series` to respective test suites.
1. Created new `pandas/tests/series/test_rank` and `pandas/tests/frame/test_rank`
2. Moved tests from `pandas/tests/test_stats` to `series` and `frame` version of `test_rank`
3. Moved `rank` tests from `frame/test_analytics` to `frame/test_rank`
4. Moved `rank` tests from `series/test_analytics` to `series/test_analytics`
- [X] closes #15640
- [x] tests added / passed
- [X] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15658 | 2017-03-12T01:37:02Z | 2017-03-12T15:16:30Z | null | 2017-03-12T15:16:30Z |
Subclassed reshape clean | diff --git a/doc/source/internals.rst b/doc/source/internals.rst
index 3d96b93de4cc9..6a3a0e7c766b7 100644
--- a/doc/source/internals.rst
+++ b/doc/source/internals.rst
@@ -9,6 +9,7 @@
np.random.seed(123456)
np.set_printoptions(precision=4, suppress=True)
import pandas as pd
+ from pandas import Series, DataFrame
pd.options.display.max_rows = 15
*********
@@ -110,15 +111,15 @@ This section describes how to subclass ``pandas`` data structures to meet more s
Override Constructor Properties
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Each data structure has constructor properties to specifying data constructors. By overriding these properties, you can retain defined-classes through ``pandas`` data manipulations.
+Each data structure has constructor properties to specifying data constructors. By overriding these properties, you can retain defined subclass families through ``pandas`` data manipulations.
There are 3 constructors to be defined:
- ``_constructor``: Used when a manipulation result has the same dimesions as the original.
- ``_constructor_sliced``: Used when a manipulation result has one lower dimension(s) as the original, such as ``DataFrame`` single columns slicing.
-- ``_constructor_expanddim``: Used when a manipulation result has one higher dimension as the original, such as ``Series.to_frame()`` and ``DataFrame.to_panel()``.
+- ``_constructor_expanddim``: Used when a manipulation result has one higher dimension as the original, such as ``Series.to_frame()``.
-Following table shows how ``pandas`` data structures define constructor properties by default.
+The following table shows how ``pandas`` data structures define constructor properties by default.
=========================== ======================= =================== =======================
Property Attributes ``Series`` ``DataFrame`` ``Panel``
@@ -128,68 +129,153 @@ Property Attributes ``Series`` ``DataFrame`` ``Panel
``_constructor_expanddim`` ``DataFrame`` ``Panel`` ``NotImplementedError``
=========================== ======================= =================== =======================
-Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame`` overriding constructor properties.
+The below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame`` classes, overriding the default constructor properties.
-.. code-block:: python
+.. ipython:: python
+
+ In [1]: class SubclassedSeries(Series):
+ ...:
+ ...: @property
+ ...: def _constructor(self):
+ ...: return SubclassedSeries
+ ...:
+ ...: @property
+ ...: def _constructor_expanddim(self):
+ ...: return SubclassedDataFrame
+ ...:
+
+ In [1]: class SubclassedDataFrame(DataFrame):
+ ...:
+ ...: @property
+ ...: def _constructor(self):
+ ...: return SubclassedDataFrame
+ ...:
+ ...: @property
+ ...: def _constructor_sliced(self):
+ ...: return SubclassedSeries
+ ...:
+
+Overriding constructor properties allows subclass families to be preserved across slice and reshape operations:
+
+.. ipython:: python
- class SubclassedSeries(Series):
+ In [1]: ser = SubclassedSeries([1, 2, 3])
+ In [1]: ser
+ Out[1]:
+ 0 1
+ 1 2
+ 2 3
+ dtype: int64
+ In [1]: type(ser)
+ Out[1]:
+ <class '__main__.SubclassedSeries'>
- @property
- def _constructor(self):
- return SubclassedSeries
+ In [1]: to_framed = s.to_frame()
+ In [1]: type(to_framed)
+ Out[1]:
+ <class '__main__.SubclassedDataFrame'>
- @property
- def _constructor_expanddim(self):
- return SubclassedDataFrame
+ In [1]: df = SubclassedDataFrame({
+ ...: 'A': ['a', 'a', 'b', 'b'],
+ ...: 'B': ['x', 'y', 'x', 'y'],
+ ...: 'C': [1, 2, 3, 4]})
+ In [1]: df
+ Out[1]:
+ A B C
+ 0 a x 0
+ 1 a y 1
+ 2 b x 2
+ 3 b y 3
- class SubclassedDataFrame(DataFrame):
+ In [1]: type(df)
+ Out[1]:
+ <class '__main__.SubclassedDataFrame'>
- @property
- def _constructor(self):
- return SubclassedDataFrame
+ In [1]: sliced1 = df[['A', 'B']]
+ In [1]: sliced1
+ Out[1]:
+ A B
+ 0 a x
+ 1 a y
+ 2 b x
+ 3 b y
+ In [1]: type(sliced1)
+ Out[1]:
+ <class '__main__.SubclassedDataFrame'>
- @property
- def _constructor_sliced(self):
- return SubclassedSeries
+ In [1]: sliced2 = df['C']
+ In [1]: sliced2
+ Out[1]:
+ 0 0
+ 1 1
+ 2 2
+ 3 3
+ Name: A, dtype: int64
-.. code-block:: python
+ In [1]: type(sliced2)
+ Out[1]:
+ <class '__main__.SubclassedSeries'>
- >>> s = SubclassedSeries([1, 2, 3])
- >>> type(s)
+ In [1]: stacked = df.stack()
+ In [1]: stacked
+ Out[1]:
+ 0 A a
+ B x
+ C 1
+ 1 A a
+ B y
+ C 2
+ 2 A b
+ B x
+ C 3
+ 3 A b
+ B y
+ C 4
+ dtype: object
+ In [1]: type(stacked)
+ Out[1]:
<class '__main__.SubclassedSeries'>
- >>> to_framed = s.to_frame()
- >>> type(to_framed)
+ In [1]: pivoted = pd.pivot(index='A', columns='B', values='C')
+ In [1]: pivoted
+ Out[1]:
+ B x y
+ A
+ a 1 2
+ b 3 4
+ In [1]: type(pivoted)
+ Out[1]:
<class '__main__.SubclassedDataFrame'>
- >>> df = SubclassedDataFrame({'A', [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
- >>> df
- A B C
- 0 1 4 7
- 1 2 5 8
- 2 3 6 9
+Most data operations also preserve the class:
- >>> type(df)
- <class '__main__.SubclassedDataFrame'>
+.. ipython:: python
- >>> sliced1 = df[['A', 'B']]
- >>> sliced1
- A B
- 0 1 4
- 1 2 5
- 2 3 6
- >>> type(sliced1)
+ In [1]: squared = pivoted**2
+ In [1]: squared
+ Out[1]:
+ B x y
+ A
+ a 1 4
+ b 9 16
+ In [1]: type(pivoted)
+ Out[1]:
<class '__main__.SubclassedDataFrame'>
- >>> sliced2 = df['A']
- >>> sliced2
- 0 1
- 1 2
- 2 3
- Name: A, dtype: int64
- >>> type(sliced2)
+ In [1]: interped = ser.loc[[0, 0.5, 1, 1.5, 2]].interpolate()
+ In [1]: interped
+ Out[1]:
+ 0.0 1.0
+ 0.5 1.5
+ 1.0 2.0
+ 1.5 2.5
+ 2.0 3.0
+ dtype: float64
+ In [1]: type(interped)
+ Out[1]:
<class '__main__.SubclassedSeries'>
+
Define Original Properties
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -200,42 +286,48 @@ To let original data structures have additional properties, you should let ``pan
Below is an example to define 2 original properties, "internal_cache" as a temporary property and "added_property" as a normal property
-.. code-block:: python
-
- class SubclassedDataFrame2(DataFrame):
-
- # temporary properties
- _internal_names = pd.DataFrame._internal_names + ['internal_cache']
- _internal_names_set = set(_internal_names)
-
- # normal properties
- _metadata = ['added_property']
+.. ipython:: python
- @property
- def _constructor(self):
- return SubclassedDataFrame2
+ In [1]: class SubclassedDataFrame2(DataFrame):
+ ...:
+ ...: # temporary properties
+ ...: _internal_names = DataFrame._internal_names + ['internal_cache']
+ ...: _internal_names_set = set(_internal_names)
+ ...:
+ ...: # normal properties
+ ...: _metadata = ['added_property']
+ ...:
+ ...: @property
+ ...: def _constructor(self):
+ ...: return SubclassedDataFrame2
-.. code-block:: python
+.. ipython:: python
- >>> df = SubclassedDataFrame2({'A', [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
- >>> df
+ In [1]: df = SubclassedDataFrame2({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
+ In [1]: df
+ Out[1]:
A B C
0 1 4 7
1 2 5 8
2 3 6 9
- >>> df.internal_cache = 'cached'
- >>> df.added_property = 'property'
+ In [1]: df.internal_cache = 'cached'
+ In [1]: df.added_property = 'property'
+ Out[1]:
- >>> df.internal_cache
+ In [1]: df.internal_cache
+ Out[1]:
cached
- >>> df.added_property
+ In [1]: df.added_property
+ Out[1]:
property
# properties defined in _internal_names is reset after manipulation
- >>> df[['A', 'B']].internal_cache
+ In [1]: df[['A', 'B']].internal_cache
+ Out[1]:
AttributeError: 'SubclassedDataFrame2' object has no attribute 'internal_cache'
# properties defined in _metadata are retained
- >>> df[['A', 'B']].added_property
+ In [1]: df[['A', 'B']].added_property
+ Out[1]:
property
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index a0bf2f9b3758a..f6d5e3df814fc 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1,39 +1,23 @@
.. _whatsnew_0200:
-v0.20.1 (May 5, 2017)
----------------------
+v0.20.0 (????, 2017)
+--------------------
-This is a major release from 0.19.2 and includes a number of API changes, deprecations, new features,
+This is a major release from 0.19 and includes a small number of API changes, several new features,
enhancements, and performance improvements along with a large number of bug fixes. We recommend that all
users upgrade to this version.
Highlights include:
-- New ``.agg()`` API for Series/DataFrame similar to the groupby-rolling-resample API's, see :ref:`here <whatsnew_0200.enhancements.agg>`
-- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`.
+- Building pandas for development now requires ``cython >= 0.23`` (:issue:`14831`)
- The ``.ix`` indexer has been deprecated, see :ref:`here <whatsnew_0200.api_breaking.deprecate_ix>`
-- ``Panel`` has been deprecated, see :ref:`here <whatsnew_0200.api_breaking.deprecate_panel>`
-- Addition of an ``IntervalIndex`` and ``Interval`` scalar type, see :ref:`here <whatsnew_0200.enhancements.intervalindex>`
-- Improved user API when grouping by index levels in ``.groupby()``, see :ref:`here <whatsnew_0200.enhancements.groupby_access>`
-- Improved support for ``UInt64`` dtypes, see :ref:`here <whatsnew_0200.enhancements.uint64_support>`
-- A new orient for JSON serialization, ``orient='table'``, that uses the Table Schema spec and that gives the possibility for a more interactive repr in the Jupyter Notebook, see :ref:`here <whatsnew_0200.enhancements.table_schema>`
-- Experimental support for exporting styled DataFrames (``DataFrame.style``) to Excel, see :ref:`here <whatsnew_0200.enhancements.style_excel>`
-- Window binary corr/cov operations now return a MultiIndexed ``DataFrame`` rather than a ``Panel``, as ``Panel`` is now deprecated, see :ref:`here <whatsnew_0200.api_breaking.rolling_pairwise>`
-- Support for S3 handling now uses ``s3fs``, see :ref:`here <whatsnew_0200.api_breaking.s3>`
-- Google BigQuery support now uses the ``pandas-gbq`` library, see :ref:`here <whatsnew_0200.api_breaking.gbq>`
+- Switched the test framework to `pytest`_ (:issue:`13097`)
+- A new orient for JSON serialization, ``orient='table'``, that uses the Table Schema spec, see :ref:`here <whatsnew_0200.enhancements.table_schema>`
-.. warning::
-
- Pandas has changed the internal structure and layout of the codebase.
- This can affect imports that are not from the top-level ``pandas.*`` namespace, please see the changes :ref:`here <whatsnew_0200.privacy>`.
+.. _pytest: http://doc.pytest.org/en/latest/
Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations <whatsnew_0200.deprecations>` before updating.
-.. note::
-
- This is a combined release for 0.20.0 and and 0.20.1.
- Version 0.20.1 contains one additional change for backwards-compatibility with downstream projects using pandas' ``utils`` routines. (:issue:`16250`)
-
.. contents:: What's new in v0.20.0
:local:
:backlinks: none
@@ -43,81 +27,22 @@ Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations
New features
~~~~~~~~~~~~
-.. _whatsnew_0200.enhancements.agg:
-
-``agg`` API for DataFrame/Series
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Series & DataFrame have been enhanced to support the aggregation API. This is a familiar API
-from groupby, window operations, and resampling. This allows aggregation operations in a concise way
-by using :meth:`~DataFrame.agg` and :meth:`~DataFrame.transform`. The full documentation
-is :ref:`here <basics.aggregate>` (:issue:`1623`).
-
-Here is a sample
-
-.. ipython:: python
-
- df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
- index=pd.date_range('1/1/2000', periods=10))
- df.iloc[3:7] = np.nan
- df
-
-One can operate using string function names, callables, lists, or dictionaries of these.
-
-Using a single function is equivalent to ``.apply``.
-
-.. ipython:: python
-
- df.agg('sum')
-
-Multiple aggregations with a list of functions.
-
-.. ipython:: python
-
- df.agg(['sum', 'min'])
-
-Using a dict provides the ability to apply specific aggregations per column.
-You will get a matrix-like output of all of the aggregators. The output has one column
-per unique function. Those functions applied to a particular column will be ``NaN``:
-
-.. ipython:: python
-
- df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
-
-The API also supports a ``.transform()`` function for broadcasting results.
-
-.. ipython:: python
- :okwarning:
-
- df.transform(['abs', lambda x: x - x.min()])
-
-When presented with mixed dtypes that cannot be aggregated, ``.agg()`` will only take the valid
-aggregations. This is similiar to how groupby ``.agg()`` works. (:issue:`15015`)
-
-.. ipython:: python
-
- df = pd.DataFrame({'A': [1, 2, 3],
- 'B': [1., 2., 3.],
- 'C': ['foo', 'bar', 'baz'],
- 'D': pd.date_range('20130101', periods=3)})
- df.dtypes
-
-.. ipython:: python
-
- df.agg(['min', 'sum'])
.. _whatsnew_0200.enhancements.dataio_dtype:
``dtype`` keyword for data IO
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The ``'python'`` engine for :func:`read_csv`, as well as the :func:`read_fwf` function for parsing
-fixed-width text files and :func:`read_excel` for parsing Excel files, now accept the ``dtype`` keyword argument for specifying the types of specific columns (:issue:`14295`). See the :ref:`io docs <io.dtypes>` for more information.
+The ``dtype`` keyword argument in the :func:`read_csv` function for specifying the types of parsed columns is now supported with the ``'python'`` engine (:issue:`14295`). See the :ref:`io docs <io.dtypes>` for more information.
.. ipython:: python
- :suppress:
- from pandas.compat import StringIO
+ data = "a,b\n1,2\n3,4"
+ pd.read_csv(StringIO(data), engine='python').dtypes
+ pd.read_csv(StringIO(data), engine='python', dtype={'a':'float64', 'b':'object'}).dtypes
+
+The ``dtype`` keyword argument is also now supported in the :func:`read_fwf` function for parsing
+fixed-width text files, and :func:`read_excel` for parsing Excel files.
.. ipython:: python
@@ -125,34 +50,12 @@ fixed-width text files and :func:`read_excel` for parsing Excel files, now accep
pd.read_fwf(StringIO(data)).dtypes
pd.read_fwf(StringIO(data), dtype={'a':'float64', 'b':'object'}).dtypes
-.. _whatsnew_0120.enhancements.datetime_origin:
-
-``.to_datetime()`` has gained an ``origin`` parameter
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-:func:`to_datetime` has gained a new parameter, ``origin``, to define a reference date
-from where to compute the resulting timestamps when parsing numerical values with a specific ``unit`` specified. (:issue:`11276`, :issue:`11745`)
-
-For example, with 1960-01-01 as the starting date:
-
-.. ipython:: python
-
- pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
-
-The default is set at ``origin='unix'``, which defaults to ``1970-01-01 00:00:00``, which is
-commonly called 'unix epoch' or POSIX time. This was the previous default, so this is a backward compatible change.
-
-.. ipython:: python
-
- pd.to_datetime([1, 2, 3], unit='D')
-
-
.. _whatsnew_0200.enhancements.groupby_access:
Groupby Enhancements
^^^^^^^^^^^^^^^^^^^^
-Strings passed to ``DataFrame.groupby()`` as the ``by`` parameter may now reference either column names or index level names. Previously, only column names could be referenced. This allows to easily group by a column and index level at the same time. (:issue:`5677`)
+Strings passed to ``DataFrame.groupby()`` as the ``by`` parameter may now reference either column names or index level names (:issue:`5677`)
.. ipython:: python
@@ -168,7 +71,6 @@ Strings passed to ``DataFrame.groupby()`` as the ``by`` parameter may now refere
df.groupby(['second', 'A']).sum()
-
.. _whatsnew_0200.enhancements.compressed_urls:
Better support for compressed URLs in ``read_csv``
@@ -178,8 +80,8 @@ The compression code was refactored (:issue:`12688`). As a result, reading
dataframes from URLs in :func:`read_csv` or :func:`read_table` now supports
additional compression methods: ``xz``, ``bz2``, and ``zip`` (:issue:`14570`).
Previously, only ``gzip`` compression was supported. By default, compression of
-URLs and paths are now inferred using their file extensions. Additionally,
-support for bz2 compression in the python 2 C-engine improved (:issue:`14874`).
+URLs and paths are now both inferred using their file extensions. Additionally,
+support for bz2 compression in the python 2 c-engine improved (:issue:`14874`).
.. ipython:: python
@@ -197,10 +99,10 @@ support for bz2 compression in the python 2 C-engine improved (:issue:`14874`).
Pickle file I/O now supports compression
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-:func:`read_pickle`, :meth:`DataFrame.to_pickle` and :meth:`Series.to_pickle`
+:func:`read_pickle`, :meth:`DataFame.to_pickle` and :meth:`Series.to_pickle`
can now read from and write to compressed pickle files. Compression methods
can be an explicit parameter or be inferred from the file extension.
-See :ref:`the docs here. <io.pickle.compression>`
+See :ref:`the docs here <io.pickle.compression>`
.. ipython:: python
@@ -215,24 +117,33 @@ Using an explicit compression type
df.to_pickle("data.pkl.compress", compression="gzip")
rt = pd.read_pickle("data.pkl.compress", compression="gzip")
- rt.head()
+ rt
+
+Inferring compression type from the extension
+
+.. ipython:: python
+
+ df.to_pickle("data.pkl.xz", compression="infer")
+ rt = pd.read_pickle("data.pkl.xz", compression="infer")
+ rt
-The default is to infer the compression type from the extension (``compression='infer'``):
+The default is to 'infer
.. ipython:: python
df.to_pickle("data.pkl.gz")
rt = pd.read_pickle("data.pkl.gz")
- rt.head()
+ rt
df["A"].to_pickle("s1.pkl.bz2")
rt = pd.read_pickle("s1.pkl.bz2")
- rt.head()
+ rt
.. ipython:: python
:suppress:
import os
os.remove("data.pkl.compress")
+ os.remove("data.pkl.xz")
os.remove("data.pkl.gz")
os.remove("s1.pkl.bz2")
@@ -278,7 +189,7 @@ In previous versions, ``.groupby(..., sort=False)`` would fail with a ``ValueErr
ordered=True)})
df
-**Previous Behavior**:
+Previous Behavior:
.. code-block:: ipython
@@ -286,7 +197,7 @@ In previous versions, ``.groupby(..., sort=False)`` would fail with a ``ValueErr
---------------------------------------------------------------------------
ValueError: items in new_categories are not the same as in old categories
-**New Behavior**:
+New Behavior:
.. ipython:: python
@@ -312,7 +223,7 @@ the data.
df.to_json(orient='table')
-See :ref:`IO: Table Schema for more information <io.table_schema>`.
+See :ref:`IO: Table Schema for more<io.table_schema>`.
Additionally, the repr for ``DataFrame`` and ``Series`` can now publish
this JSON Table schema representation of the Series or DataFrame if you are
@@ -321,7 +232,7 @@ protocol).
This gives frontends like the Jupyter notebook and `nteract`_
more flexiblity in how they display pandas objects, since they have
more information about the data.
-You must enable this by setting the ``display.html.table_schema`` option to ``True``.
+You must enable this by setting the ``display.html.table_schema`` option to True.
.. _Table Schema: http://specs.frictionlessdata.io/json-table-schema/
.. _nteract: http://nteract.io/
@@ -352,184 +263,113 @@ To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you
sdf.to_coo()
-.. _whatsnew_0200.enhancements.style_excel:
-
-Excel output for styled DataFrames
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Experimental support has been added to export ``DataFrame.style`` formats to Excel using the ``openpyxl`` engine. (:issue:`15530`)
+.. _whatsnew_0200.enhancements.other:
-For example, after running the following, ``styled.xlsx`` renders as below:
+Other enhancements
+^^^^^^^^^^^^^^^^^^
-.. ipython:: python
- :okwarning:
-
- np.random.seed(24)
- df = pd.DataFrame({'A': np.linspace(1, 10, 10)})
- df = pd.concat([df, pd.DataFrame(np.random.RandomState(24).randn(10, 4),
- columns=list('BCDE'))],
- axis=1)
- df.iloc[0, 2] = np.nan
- df
- styled = df.style.\
- applymap(lambda val: 'color: %s' % 'red' if val < 0 else 'black').\
- highlight_max()
- styled.to_excel('styled.xlsx', engine='openpyxl')
+- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`.
+- ``Series.str.replace()`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`)
+- ``Series.str.replace()`` now accepts a compiled regular expression as a pattern (:issue:`15446`)
-.. image:: _static/style-excel.png
-.. ipython:: python
- :suppress:
+- ``Series.sort_index`` accepts parameters ``kind`` and ``na_position`` (:issue:`13589`, :issue:`14444`)
- import os
- os.remove('styled.xlsx')
+- ``DataFrame`` has gained a ``nunique()`` method to count the distinct values over an axis (:issue:`14336`).
+- ``DataFrame.groupby()`` has gained a ``.nunique()`` method to count the distinct values for all columns within each group (:issue:`14336`, :issue:`15197`).
-See the :ref:`Style documentation <style.ipynb#Export-to-Excel>` for more detail.
+- ``pd.read_excel`` now preserves sheet order when using ``sheetname=None`` (:issue:`9930`)
+- Multiple offset aliases with decimal points are now supported (e.g. '0.5min' is parsed as '30s') (:issue:`8419`)
+- ``.isnull()`` and ``.notnull()`` have been added to ``Index`` object to make them more consistent with the ``Series`` API (:issue:`15300`)
-.. _whatsnew_0200.enhancements.intervalindex:
+- New ``UnsortedIndexError`` (subclass of ``KeyError``) raised when indexing/slicing into an
+ unsorted MultiIndex (:issue:`11897`). This allows differentiation between errors due to lack
+ of sorting or an incorrect key. See :ref:`here <advanced.unsorted>`
+- ``MultiIndex`` has gained a ``.to_frame()`` method to convert to a ``DataFrame`` (:issue:`12397`)
+- ``pd.cut`` and ``pd.qcut`` now support datetime64 and timedelta64 dtypes (:issue:`14714`, :issue:`14798`)
+- ``pd.qcut`` has gained the ``duplicates='raise'|'drop'`` option to control whether to raise on duplicated edges (:issue:`7751`)
+- ``Series`` provides a ``to_excel`` method to output Excel files (:issue:`8825`)
+- The ``usecols`` argument in ``pd.read_csv`` now accepts a callable function as a value (:issue:`14154`)
+- The ``skiprows`` argument in ``pd.read_csv`` now accepts a callable function as a value (:issue:`10882`)
+- ``pd.DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`)
+- ``pd.Series.interpolate`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`)
+- ``Timedelta.isoformat`` method added for formatting Timedeltas as an `ISO 8601 duration`_. See the :ref:`Timedelta docs <timedeltas.isoformat>` (:issue:`15136`)
+- ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`)
+- ``.select_dtypes()`` now allows the string 'datetimetz' to generically select datetimes with tz (:issue:`14910`)
+- The ``.to_latex()`` method will now accept ``multicolumn`` and ``multirow`` arguments to use the accompanying LaTeX enhancements
+- ``pd.merge_asof()`` gained the option ``direction='backward'|'forward'|'nearest'`` (:issue:`14887`)
+- ``Series/DataFrame.asfreq()`` have gained a ``fill_value`` parameter, to fill missing values (:issue:`3715`).
+- ``Series/DataFrame.resample.asfreq`` have gained a ``fill_value`` parameter, to fill missing values during resampling (:issue:`3715`).
+- ``pandas.tools.hashing`` has gained a ``hash_tuples`` routine, and ``hash_pandas_object`` has gained the ability to hash a ``MultiIndex`` (:issue:`15224`)
+- ``Series/DataFrame.squeeze()`` have gained the ``axis`` parameter. (:issue:`15339`)
+- ``DataFrame.to_excel()`` has a new ``freeze_panes`` parameter to turn on Freeze Panes when exporting to Excel (:issue:`15160`)
+- HTML table output skips ``colspan`` or ``rowspan`` attribute if equal to 1. (:issue:`15403`)
+- ``pd.TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`)
+- ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs <categorical.union>` for more information.
+- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
+- ``pd.DataFrame.to_latex`` and ``pd.DataFrame.to_string`` now allow optional header aliases. (:issue:`15536`)
-IntervalIndex
-^^^^^^^^^^^^^
+.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
-pandas has gained an ``IntervalIndex`` with its own dtype, ``interval`` as well as the ``Interval`` scalar type. These allow first-class support for interval
-notation, specifically as a return type for the categories in :func:`cut` and :func:`qcut`. The ``IntervalIndex`` allows some unique indexing, see the
-:ref:`docs <indexing.intervallindex>`. (:issue:`7640`, :issue:`8625`)
-.. warning::
+.. _whatsnew_0200.api_breaking:
- These indexing behaviors of the IntervalIndex are provisional and may change in a future version of pandas. Feedback on usage is welcome.
+Backwards incompatible API changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. _whatsnew_0200.api_breaking.deprecate_ix:
-Previous behavior:
+Deprecate .ix
+^^^^^^^^^^^^^
-The returned categories were strings, representing Intervals
+The ``.ix`` indexer is deprecated, in favor of the more strict ``.iloc`` and ``.loc`` indexers. ``.ix`` offers a lot of magic on the inference of what the user wants to do. To wit, ``.ix`` can decide to index *positionally* OR via *labels*, depending on the data type of the index. This has caused quite a bit of user confusion over the years. The full indexing documentation are :ref:`here <indexing>`. (:issue:`14218`)
-.. code-block:: ipython
- In [1]: c = pd.cut(range(4), bins=2)
+The recommended methods of indexing are:
- In [2]: c
- Out[2]:
- [(-0.003, 1.5], (-0.003, 1.5], (1.5, 3], (1.5, 3]]
- Categories (2, object): [(-0.003, 1.5] < (1.5, 3]]
+- ``.loc`` if you want to *label* index
+- ``.iloc`` if you want to *positionally* index.
- In [3]: c.categories
- Out[3]: Index(['(-0.003, 1.5]', '(1.5, 3]'], dtype='object')
+Using ``.ix`` will now show a ``DeprecationWarning`` with a link to some examples of how to convert code :ref:`here <indexing.deprecate_ix>`.
-New behavior:
.. ipython:: python
- c = pd.cut(range(4), bins=2)
- c
- c.categories
-
-Furthermore, this allows one to bin *other* data with these same bins, with ``NaN`` representing a missing
-value similar to other dtypes.
-
-.. ipython:: python
+ df = pd.DataFrame({'A': [1, 2, 3],
+ 'B': [4, 5, 6]},
+ index=list('abc'))
- pd.cut([0, 3, 5, 1], bins=c.categories)
+ df
-An ``IntervalIndex`` can also be used in ``Series`` and ``DataFrame`` as the index.
+Previous Behavior, where you wish to get the 0th and the 2nd elements from the index in the 'A' column.
-.. ipython:: python
+.. code-block:: ipython
- df = pd.DataFrame({'A': range(4),
- 'B': pd.cut([0, 3, 1, 1], bins=c.categories)}
- ).set_index('B')
- df
+ In [3]: df.ix[[0, 2], 'A']
+ Out[3]:
+ a 1
+ c 3
+ Name: A, dtype: int64
-Selecting via a specific interval:
+Using ``.loc``. Here we will select the appropriate indexes from the index, then use *label* indexing.
.. ipython:: python
- df.loc[pd.Interval(1.5, 3.0)]
+ df.loc[df.index[[0, 2]], 'A']
-Selecting via a scalar value that is contained *in* the intervals.
+Using ``.iloc``. Here we will get the location of the 'A' column, then use *positional* indexing to select things.
.. ipython:: python
- df.loc[0]
-
-.. _whatsnew_0200.enhancements.other:
-
-Other Enhancements
-^^^^^^^^^^^^^^^^^^
-
-- ``DataFrame.rolling()`` now accepts the parameter ``closed='right'|'left'|'both'|'neither'`` to choose the rolling window-endpoint closedness. See the :ref:`documentation <stats.rolling_window.endpoints>` (:issue:`13965`)
-- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`.
-- ``Series.str.replace()`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`)
-- ``Series.str.replace()`` now accepts a compiled regular expression as a pattern (:issue:`15446`)
-- ``Series.sort_index`` accepts parameters ``kind`` and ``na_position`` (:issue:`13589`, :issue:`14444`)
-- ``DataFrame`` and ``DataFrame.groupby()`` have gained a ``nunique()`` method to count the distinct values over an axis (:issue:`14336`, :issue:`15197`).
-- ``DataFrame`` has gained a ``melt()`` method, equivalent to ``pd.melt()``, for unpivoting from a wide to long format (:issue:`12640`).
-- ``pd.read_excel()`` now preserves sheet order when using ``sheetname=None`` (:issue:`9930`)
-- Multiple offset aliases with decimal points are now supported (e.g. ``0.5min`` is parsed as ``30s``) (:issue:`8419`)
-- ``.isnull()`` and ``.notnull()`` have been added to ``Index`` object to make them more consistent with the ``Series`` API (:issue:`15300`)
-- New ``UnsortedIndexError`` (subclass of ``KeyError``) raised when indexing/slicing into an
- unsorted MultiIndex (:issue:`11897`). This allows differentiation between errors due to lack
- of sorting or an incorrect key. See :ref:`here <advanced.unsorted>`
-- ``MultiIndex`` has gained a ``.to_frame()`` method to convert to a ``DataFrame`` (:issue:`12397`)
-- ``pd.cut`` and ``pd.qcut`` now support datetime64 and timedelta64 dtypes (:issue:`14714`, :issue:`14798`)
-- ``pd.qcut`` has gained the ``duplicates='raise'|'drop'`` option to control whether to raise on duplicated edges (:issue:`7751`)
-- ``Series`` provides a ``to_excel`` method to output Excel files (:issue:`8825`)
-- The ``usecols`` argument in ``pd.read_csv()`` now accepts a callable function as a value (:issue:`14154`)
-- The ``skiprows`` argument in ``pd.read_csv()`` now accepts a callable function as a value (:issue:`10882`)
-- The ``nrows`` and ``chunksize`` arguments in ``pd.read_csv()`` are supported if both are passed (:issue:`6774`, :issue:`15755`)
-- ``DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`)
-- ``DataFrame.plot`` can pass the matplotlib 2.0 default color cycle as a single string as color parameter, see `here <http://matplotlib.org/2.0.0/users/colors.html#cn-color-selection>`__. (:issue:`15516`)
-- ``Series.interpolate()`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`)
-- Addition of a ``level`` keyword to ``DataFrame/Series.rename`` to rename
- labels in the specified level of a MultiIndex (:issue:`4160`).
-- ``DataFrame.reset_index()`` will now interpret a tuple ``index.name`` as a key spanning across levels of ``columns``, if this is a ``MultiIndex`` (:issue:`16164`)
-- ``Timedelta.isoformat`` method added for formatting Timedeltas as an `ISO 8601 duration`_. See the :ref:`Timedelta docs <timedeltas.isoformat>` (:issue:`15136`)
-- ``.select_dtypes()`` now allows the string ``datetimetz`` to generically select datetimes with tz (:issue:`14910`)
-- The ``.to_latex()`` method will now accept ``multicolumn`` and ``multirow`` arguments to use the accompanying LaTeX enhancements
-- ``pd.merge_asof()`` gained the option ``direction='backward'|'forward'|'nearest'`` (:issue:`14887`)
-- ``Series/DataFrame.asfreq()`` have gained a ``fill_value`` parameter, to fill missing values (:issue:`3715`).
-- ``Series/DataFrame.resample.asfreq`` have gained a ``fill_value`` parameter, to fill missing values during resampling (:issue:`3715`).
-- :func:`pandas.util.hash_pandas_object` has gained the ability to hash a ``MultiIndex`` (:issue:`15224`)
-- ``Series/DataFrame.squeeze()`` have gained the ``axis`` parameter. (:issue:`15339`)
-- ``DataFrame.to_excel()`` has a new ``freeze_panes`` parameter to turn on Freeze Panes when exporting to Excel (:issue:`15160`)
-- ``pd.read_html()`` will parse multiple header rows, creating a MutliIndex header. (:issue:`13434`).
-- HTML table output skips ``colspan`` or ``rowspan`` attribute if equal to 1. (:issue:`15403`)
-- :class:`pandas.io.formats.style.Styler` template now has blocks for easier extension, :ref:`see the example notebook <style.ipynb#Subclassing>` (:issue:`15649`)
-- :meth:`Styler.render() <pandas.io.formats.style.Styler.render>` now accepts ``**kwargs`` to allow user-defined variables in the template (:issue:`15649`)
-- Compatibility with Jupyter notebook 5.0; MultiIndex column labels are left-aligned and MultiIndex row-labels are top-aligned (:issue:`15379`)
-- ``TimedeltaIndex`` now has a custom date-tick formatter specifically designed for nanosecond level precision (:issue:`8711`)
-- ``pd.api.types.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs <categorical.union>` for more information.
-- ``DataFrame.to_latex()`` and ``DataFrame.to_string()`` now allow optional header aliases. (:issue:`15536`)
-- Re-enable the ``parse_dates`` keyword of ``pd.read_excel()`` to parse string columns as dates (:issue:`14326`)
-- Added ``.empty`` property to subclasses of ``Index``. (:issue:`15270`)
-- Enabled floor division for ``Timedelta`` and ``TimedeltaIndex`` (:issue:`15828`)
-- ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`)
-- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
-- ``pandas.io.json.json_normalize()`` has gained a ``sep`` option that accepts ``str`` to separate joined fields; the default is ".", which is backward compatible. (:issue:`14883`)
-- :meth:`MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels <advanced.shown_levels>`. (:issue:`15694`)
-- ``pd.read_csv()`` will now raise a ``ParserError`` error whenever any parsing error occurs (:issue:`15913`, :issue:`15925`)
-- ``pd.read_csv()`` now supports the ``error_bad_lines`` and ``warn_bad_lines`` arguments for the Python parser (:issue:`15925`)
-- The ``display.show_dimensions`` option can now also be used to specify
- whether the length of a ``Series`` should be shown in its repr (:issue:`7117`).
-- ``parallel_coordinates()`` has gained a ``sort_labels`` keyword argument that sorts class labels and the colors assigned to them (:issue:`15908`)
-- Options added to allow one to turn on/off using ``bottleneck`` and ``numexpr``, see :ref:`here <basics.accelerate>` (:issue:`16157`)
-- ``DataFrame.style.bar()`` now accepts two more options to further customize the bar chart. Bar alignment is set with ``align='left'|'mid'|'zero'``, the default is "left", which is backward compatible; You can now pass a list of ``color=[color_negative, color_positive]``. (:issue:`14757`)
-
-
-.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
-
-
-.. _whatsnew_0200.api_breaking:
+ df.iloc[[0, 2], df.columns.get_loc('A')]
-Backwards incompatible API changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _whatsnew.api_breaking.io_compat:
-Possible incompatibility for HDF5 formats created with pandas < 0.13.0
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Possible incompat for HDF5 formats for pandas < 0.13.0
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-``pd.TimeSeries`` was deprecated officially in 0.17.0, though has already been an alias since 0.13.0. It has
+``pd.TimeSeries`` was deprecated officially in 0.17.0, though has only been an alias since 0.13.0. It has
been dropped in favor of ``pd.Series``. (:issue:`15098`).
This *may* cause HDF5 files that were created in prior versions to become unreadable if ``pd.TimeSeries``
@@ -630,115 +470,13 @@ New Behavior:
s.map(lambda x: x.hour)
-
-.. _whatsnew_0200.api_breaking.index_dt_field:
-
-Accessing datetime fields of Index now return Index
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The datetime-related attributes (see :ref:`here <timeseries.components>`
-for an overview) of ``DatetimeIndex``, ``PeriodIndex`` and ``TimedeltaIndex`` previously
-returned numpy arrays. They will now return a new ``Index`` object, except
-in the case of a boolean field, where the result will still be a boolean ndarray. (:issue:`15022`)
-
-Previous behaviour:
-
-.. code-block:: ipython
-
- In [1]: idx = pd.date_range("2015-01-01", periods=5, freq='10H')
-
- In [2]: idx.hour
- Out[2]: array([ 0, 10, 20, 6, 16], dtype=int32)
-
-New Behavior:
-
-.. ipython:: python
-
- idx = pd.date_range("2015-01-01", periods=5, freq='10H')
- idx.hour
-
-This has the advantage that specific ``Index`` methods are still available on the
-result. On the other hand, this might have backward incompatibilities: e.g.
-compared to numpy arrays, ``Index`` objects are not mutable. To get the original
-ndarray, you can always convert explicitly using ``np.asarray(idx.hour)``.
-
-.. _whatsnew_0200.api_breaking.unique:
-
-pd.unique will now be consistent with extension types
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In prior versions, using :meth:`Series.unique` and :func:`pandas.unique` on ``Categorical`` and tz-aware
-data-types would yield different return types. These are now made consistent. (:issue:`15903`)
-
-- Datetime tz-aware
-
- Previous behaviour:
-
- .. code-block:: ipython
-
- # Series
- In [5]: pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
- pd.Timestamp('20160101', tz='US/Eastern')]).unique()
- Out[5]: array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object)
-
- In [6]: pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
- pd.Timestamp('20160101', tz='US/Eastern')]))
- Out[6]: array(['2016-01-01T05:00:00.000000000'], dtype='datetime64[ns]')
-
- # Index
- In [7]: pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
- pd.Timestamp('20160101', tz='US/Eastern')]).unique()
- Out[7]: DatetimeIndex(['2016-01-01 00:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq=None)
-
- In [8]: pd.unique([pd.Timestamp('20160101', tz='US/Eastern'),
- pd.Timestamp('20160101', tz='US/Eastern')])
- Out[8]: array(['2016-01-01T05:00:00.000000000'], dtype='datetime64[ns]')
-
- New Behavior:
-
- .. ipython:: python
-
- # Series, returns an array of Timestamp tz-aware
- pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
- pd.Timestamp('20160101', tz='US/Eastern')]).unique()
- pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
- pd.Timestamp('20160101', tz='US/Eastern')]))
-
- # Index, returns a DatetimeIndex
- pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
- pd.Timestamp('20160101', tz='US/Eastern')]).unique()
- pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
- pd.Timestamp('20160101', tz='US/Eastern')]))
-
-- Categoricals
-
- Previous behaviour:
-
- .. code-block:: ipython
-
- In [1]: pd.Series(list('baabc'), dtype='category').unique()
- Out[1]:
- [b, a, c]
- Categories (3, object): [b, a, c]
-
- In [2]: pd.unique(pd.Series(list('baabc'), dtype='category'))
- Out[2]: array(['b', 'a', 'c'], dtype=object)
-
- New Behavior:
-
- .. ipython:: python
-
- # returns a Categorical
- pd.Series(list('baabc'), dtype='category').unique()
- pd.unique(pd.Series(list('baabc'), dtype='category'))
-
.. _whatsnew_0200.api_breaking.s3:
S3 File Handling
^^^^^^^^^^^^^^^^
pandas now uses `s3fs <http://s3fs.readthedocs.io/>`_ for handling S3 connections. This shouldn't break
-any code. However, since ``s3fs`` is not a required dependency, you will need to install it separately, like ``boto``
+any code. However, since s3fs is not a required dependency, you will need to install it separately, like ``boto``
in prior versions of pandas. (:issue:`11915`).
.. _whatsnew_0200.api_breaking.partial_string_indexing:
@@ -746,7 +484,7 @@ in prior versions of pandas. (:issue:`11915`).
Partial String Indexing Changes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-:ref:`DatetimeIndex Partial String Indexing <timeseries.partialindexing>` now works as an exact match, provided that string resolution coincides with index resolution, including a case when both are seconds (:issue:`14826`). See :ref:`Slice vs. Exact Match <timeseries.slice_vs_exact_match>` for details.
+:ref:`DatetimeIndex Partial String Indexing <timeseries.partialindexing>` now works as exact match, provided that string resolution coincides with index resolution, including a case when both are seconds (:issue:`14826`). See :ref:`Slice vs. Exact Match <timeseries.slice_vs_exact_match>` for details.
.. ipython:: python
@@ -778,45 +516,14 @@ New Behavior:
In [5]: df['a']['2011-12-31 23:59:59']
Out[5]: 1
-.. _whatsnew_0200.api_breaking.concat_dtypes:
-
-Concat of different float dtypes will not automatically upcast
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Previously, ``concat`` of multiple objects with different ``float`` dtypes would automatically upcast results to a dtype of ``float64``.
-Now the smallest acceptable dtype will be used (:issue:`13247`)
-
-.. ipython:: python
-
- df1 = pd.DataFrame(np.array([1.0], dtype=np.float32, ndmin=2))
- df1.dtypes
-
- df2 = pd.DataFrame(np.array([np.nan], dtype=np.float32, ndmin=2))
- df2.dtypes
-
-Previous Behavior:
-
-.. code-block:: ipython
-
- In [7]: pd.concat([df1, df2]).dtypes
- Out[7]:
- 0 float64
- dtype: object
-
-New Behavior:
-
-.. ipython:: python
-
- pd.concat([df1, df2]).dtypes
-
.. _whatsnew_0200.api_breaking.gbq:
Pandas Google BigQuery support has moved
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-pandas has split off Google BigQuery support into a separate package ``pandas-gbq``. You can ``conda install pandas-gbq -c conda-forge`` or
-``pip install pandas-gbq`` to get it. The functionality of :func:`read_gbq` and :meth:`DataFrame.to_gbq` remain the same with the
-currently released version of ``pandas-gbq=0.1.4``. Documentation is now hosted `here <https://pandas-gbq.readthedocs.io/>`__ (:issue:`15347`)
+pandas has split off Google BigQuery support into a separate package ``pandas-gbq``. You can ``pip install pandas-gbq`` to get it.
+The functionality of :func:`read_gbq` and :meth:`DataFrame.to_gbq` remain the same with the currently released version of ``pandas-gbq=0.1.3``.
+Documentation is now hosted `here <https://pandas-gbq.readthedocs.io/>`__ (:issue:`15347`)
.. _whatsnew_0200.api_breaking.memory_usage:
@@ -855,73 +562,33 @@ New Behavior:
In [11]: index.memory_usage(deep=True)
Out[11]: 260
-.. _whatsnew_0200.api_breaking.sort_index:
-
-DataFrame.sort_index changes
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In certain cases, calling ``.sort_index()`` on a MultiIndexed DataFrame would return the *same* DataFrame without seeming to sort.
-This would happen with a ``lexsorted``, but non-monotonic levels. (:issue:`15622`, :issue:`15687`, :issue:`14015`, :issue:`13431`, :issue:`15797`)
-
-This is *unchanged* from prior versions, but shown for illustration purposes:
-
-.. ipython:: python
-
- df = DataFrame(np.arange(6), columns=['value'], index=MultiIndex.from_product([list('BA'), range(3)]))
- df
-
-.. ipython:: python
-
- df.index.is_lexsorted()
- df.index.is_monotonic
-
-Sorting works as expected
-
-.. ipython:: python
-
- df.sort_index()
-
-.. ipython:: python
-
- df.sort_index().index.is_lexsorted()
- df.sort_index().index.is_monotonic
-
-However, this example, which has a non-monotonic 2nd level,
-doesn't behave as desired.
-
-.. ipython:: python
-
- df = pd.DataFrame(
- {'value': [1, 2, 3, 4]},
- index=pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
- labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
- df
-
-Previous Behavior:
+.. _whatsnew_0200.api_breaking.extensions:
-.. code-block:: python
-
- In [11]: df.sort_index()
- Out[11]:
- value
- a bb 1
- aa 2
- b bb 3
- aa 4
-
- In [14]: df.sort_index().index.is_lexsorted()
- Out[14]: True
-
- In [15]: df.sort_index().index.is_monotonic
- Out[15]: False
+Extension Modules Moved
+^^^^^^^^^^^^^^^^^^^^^^^
-New Behavior:
+Some formerly public c/c++/cython extension modules have been moved and/or renamed. These are all removed from the public API.
+If indicated, a deprecation warning will be issued if you reference that module. (:issue:`12588`)
-.. ipython:: python
+.. csv-table::
+ :header: "Previous Location", "New Location", "Deprecated"
+ :widths: 30, 30, 4
- df.sort_index()
- df.sort_index().index.is_lexsorted()
- df.sort_index().index.is_monotonic
+ "pandas.lib", "pandas._libs.lib", "X"
+ "pandas.tslib", "pandas._libs.tslib", "X"
+ "pandas._join", "pandas._libs.join", ""
+ "pandas._period", "pandas._libs.period", ""
+ "pandas.msgpack", "pandas.io.msgpack", ""
+ "pandas.index", "pandas._libs.index", ""
+ "pandas.algos", "pandas._libs.algos", ""
+ "pandas.hashtable", "pandas._libs.hashtable", ""
+ "pandas.json", "pandas.io.json.libjson", "X"
+ "pandas.parser", "pandas.io.libparsers", "X"
+ "pandas.io.sas.saslib", "pandas.io.sas.libsas", ""
+ "pandas._testing", "pandas.util.libtesting", ""
+ "pandas._sparse", "pandas.sparse.libsparse", ""
+ "pandas._hash", "pandas.tools.libhash", ""
+ "pandas._window", "pandas.core.libwindow", ""
.. _whatsnew_0200.api_breaking.groupby_describe:
@@ -977,79 +644,27 @@ New Behavior:
df.groupby('A').agg([np.mean, np.std, np.min, np.max])
-.. _whatsnew_0200.api_breaking.rolling_pairwise:
+.. _whatsnew_0200.api_breaking.hdfstore_where:
-Window Binary Corr/Cov operations return a MultiIndex DataFrame
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A binary window operation, like ``.corr()`` or ``.cov()``, when operating on a ``.rolling(..)``, ``.expanding(..)``, or ``.ewm(..)`` object,
-will now return a 2-level ``MultiIndexed DataFrame`` rather than a ``Panel``, as ``Panel`` is now deprecated,
-see :ref:`here <whatsnew_0200.api_breaking.deprecate_panel>`. These are equivalent in function,
-but a MultiIndexed ``DataFrame`` enjoys more support in pandas.
-See the section on :ref:`Windowed Binary Operations <stats.moments.binary>` for more information. (:issue:`15677`)
-
-.. ipython:: python
-
- np.random.seed(1234)
- df = pd.DataFrame(np.random.rand(100, 2),
- columns=pd.Index(['A', 'B'], name='bar'),
- index=pd.date_range('20160101',
- periods=100, freq='D', name='foo'))
- df.tail()
-
-Previous Behavior:
-
-.. code-block:: ipython
-
- In [2]: df.rolling(12).corr()
- Out[2]:
- <class 'pandas.core.panel.Panel'>
- Dimensions: 100 (items) x 2 (major_axis) x 2 (minor_axis)
- Items axis: 2016-01-01 00:00:00 to 2016-04-09 00:00:00
- Major_axis axis: A to B
- Minor_axis axis: A to B
-
-New Behavior:
-
-.. ipython:: python
-
- res = df.rolling(12).corr()
- res.tail()
-
-Retrieving a correlation matrix for a cross-section
-
-.. ipython:: python
-
- df.rolling(12).corr().loc['2016-04-07']
-
-.. _whatsnew_0200.api_breaking.hdfstore_where:
-
-HDFStore where string comparison
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+HDFStore where string comparison
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In previous versions most types could be compared to string column in a ``HDFStore``
-usually resulting in an invalid comparison, returning an empty result frame. These comparisons will now raise a
+usually resulting in an invalid comparsion. These comparisions will now raise a
``TypeError`` (:issue:`15492`)
-.. ipython:: python
-
- df = pd.DataFrame({'unparsed_date': ['2014-01-01', '2014-01-01']})
- df.to_hdf('store.h5', 'key', format='table', data_columns=True)
- df.dtypes
-
-Previous Behavior:
+New Behavior:
.. code-block:: ipython
- In [4]: pd.read_hdf('store.h5', 'key', where='unparsed_date > ts')
- File "<string>", line 1
- (unparsed_date > 1970-01-01 00:00:01.388552400)
- ^
- SyntaxError: invalid token
+ In [15]: df = pd.DataFrame({'unparsed_date': ['2014-01-01', '2014-01-01']})
-New Behavior:
+ In [16]: df.dtypes
+ Out[16]:
+ unparsed_date object
+ dtype: object
-.. code-block:: ipython
+ In [17]: df.to_hdf('store.h5', 'key', format='table', data_columns=True)
In [18]: ts = pd.Timestamp('2014-01-01')
@@ -1057,100 +672,6 @@ New Behavior:
TypeError: Cannot compare 2014-01-01 00:00:00 of
type <class 'pandas.tslib.Timestamp'> to string column
-.. ipython:: python
- :suppress:
-
- import os
- os.remove('store.h5')
-
-.. _whatsnew_0200.api_breaking.index_order:
-
-Index.intersection and inner join now preserve the order of the left Index
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-:meth:`Index.intersection` now preserves the order of the calling ``Index`` (left)
-instead of the other ``Index`` (right) (:issue:`15582`). This affects inner
-joins, :meth:`DataFrame.join` and :func:`merge`, and the ``.align`` method.
-
-- ``Index.intersection``
-
- .. ipython:: python
-
- left = pd.Index([2, 1, 0])
- left
- right = pd.Index([1, 2, 3])
- right
-
- Previous Behavior:
-
- .. code-block:: ipython
-
- In [4]: left.intersection(right)
- Out[4]: Int64Index([1, 2], dtype='int64')
-
- New Behavior:
-
- .. ipython:: python
-
- left.intersection(right)
-
-- ``DataFrame.join`` and ``pd.merge``
-
- .. ipython:: python
-
- left = pd.DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
- left
- right = pd.DataFrame({'b': [100, 200, 300]}, index=[1, 2, 3])
- right
-
- Previous Behavior:
-
- .. code-block:: ipython
-
- In [4]: left.join(right, how='inner')
- Out[4]:
- a b
- 1 10 100
- 2 20 200
-
- New Behavior:
-
- .. ipython:: python
-
- left.join(right, how='inner')
-
-.. _whatsnew_0200.api_breaking.pivot_table:
-
-Pivot Table always returns a DataFrame
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The documentation for :meth:`pivot_table` states that a ``DataFrame`` is *always* returned. Here a bug
-is fixed that allowed this to return a ``Series`` under certain circumstance. (:issue:`4386`)
-
-.. ipython:: python
-
- df = DataFrame({'col1': [3, 4, 5],
- 'col2': ['C', 'D', 'E'],
- 'col3': [1, 3, 9]})
- df
-
-Previous Behavior:
-
-.. code-block:: ipython
-
- In [2]: df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
- Out[2]:
- col3 col2
- 1 C 3
- 3 D 4
- 9 E 5
- Name: col1, dtype: int64
-
-New Behavior:
-
-.. ipython:: python
-
- df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
.. _whatsnew_0200.api:
@@ -1158,10 +679,9 @@ Other API Changes
^^^^^^^^^^^^^^^^^
- ``numexpr`` version is now required to be >= 2.4.6 and it will not be used at all if this requisite is not fulfilled (:issue:`15213`).
-- ``CParserError`` has been renamed to ``ParserError`` in ``pd.read_csv()`` and will be removed in the future (:issue:`12665`)
+- ``CParserError`` has been renamed to ``ParserError`` in ``pd.read_csv`` and will be removed in the future (:issue:`12665`)
- ``SparseArray.cumsum()`` and ``SparseSeries.cumsum()`` will now always return ``SparseArray`` and ``SparseSeries`` respectively (:issue:`12855`)
- ``DataFrame.applymap()`` with an empty ``DataFrame`` will return a copy of the empty ``DataFrame`` instead of a ``Series`` (:issue:`8222`)
-- ``Series.map()`` now respects default values of dictionary subclasses with a ``__missing__`` method, such as ``collections.Counter`` (:issue:`15999`)
- ``.loc`` has compat with ``.ix`` for accepting iterators, and NamedTuples (:issue:`15120`)
- ``interpolate()`` and ``fillna()`` will raise a ``ValueError`` if the ``limit`` keyword argument is not greater than 0. (:issue:`9217`)
- ``pd.read_csv()`` will now issue a ``ParserWarning`` whenever there are conflicting values provided by the ``dialect`` parameter and the user (:issue:`14898`)
@@ -1169,331 +689,19 @@ Other API Changes
- ``inplace`` arguments now require a boolean value, else a ``ValueError`` is thrown (:issue:`14189`)
- ``pandas.api.types.is_datetime64_ns_dtype`` will now report ``True`` on a tz-aware dtype, similar to ``pandas.api.types.is_datetime64_any_dtype``
- ``DataFrame.asof()`` will return a null filled ``Series`` instead the scalar ``NaN`` if a match is not found (:issue:`15118`)
+- Reorganization of timeseries development tests (:issue:`14854`)
- Specific support for ``copy.copy()`` and ``copy.deepcopy()`` functions on NDFrame objects (:issue:`15444`)
- ``Series.sort_values()`` accepts a one element list of bool for consistency with the behavior of ``DataFrame.sort_values()`` (:issue:`15604`)
- ``.merge()`` and ``.join()`` on ``category`` dtype columns will now preserve the category dtype when possible (:issue:`10409`)
-- ``SparseDataFrame.default_fill_value`` will be 0, previously was ``nan`` in the return from ``pd.get_dummies(..., sparse=True)`` (:issue:`15594`)
-- The default behaviour of ``Series.str.match`` has changed from extracting
- groups to matching the pattern. The extracting behaviour was deprecated
- since pandas version 0.13.0 and can be done with the ``Series.str.extract``
- method (:issue:`5224`). As a consequence, the ``as_indexer`` keyword is
- ignored (no longer needed to specify the new behaviour) and is deprecated.
-- ``NaT`` will now correctly report ``False`` for datetimelike boolean operations such as ``is_month_start`` (:issue:`15781`)
-- ``NaT`` will now correctly return ``np.nan`` for ``Timedelta`` and ``Period`` accessors such as ``days`` and ``quarter`` (:issue:`15782`)
-- ``NaT`` will now returns ``NaT`` for ``tz_localize`` and ``tz_convert``
- methods (:issue:`15830`)
-- ``DataFrame`` and ``Panel`` constructors with invalid input will now raise ``ValueError`` rather than ``PandasError``, if called with scalar inputs and not axes (:issue:`15541`)
-- ``DataFrame`` and ``Panel`` constructors with invalid input will now raise ``ValueError`` rather than ``pandas.core.common.PandasError``, if called with scalar inputs and not axes; The exception ``PandasError`` is removed as well. (:issue:`15541`)
-- The exception ``pandas.core.common.AmbiguousIndexError`` is removed as it is not referenced (:issue:`15541`)
-
-
-.. _whatsnew_0200.privacy:
-
-Reorganization of the library: Privacy Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. _whatsnew_0200.privacy.extensions:
-
-Modules Privacy Has Changed
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Some formerly public python/c/c++/cython extension modules have been moved and/or renamed. These are all removed from the public API.
-Furthermore, the ``pandas.core``, ``pandas.compat``, and ``pandas.util`` top-level modules are now considered to be PRIVATE.
-If indicated, a deprecation warning will be issued if you reference theses modules. (:issue:`12588`)
-
-.. csv-table::
- :header: "Previous Location", "New Location", "Deprecated"
- :widths: 30, 30, 4
-
- "pandas.lib", "pandas._libs.lib", "X"
- "pandas.tslib", "pandas._libs.tslib", "X"
- "pandas.computation", "pandas.core.computation", "X"
- "pandas.msgpack", "pandas.io.msgpack", ""
- "pandas.index", "pandas._libs.index", ""
- "pandas.algos", "pandas._libs.algos", ""
- "pandas.hashtable", "pandas._libs.hashtable", ""
- "pandas.indexes", "pandas.core.indexes", ""
- "pandas.json", "pandas._libs.json / pandas.io.json", "X"
- "pandas.parser", "pandas._libs.parsers", "X"
- "pandas.formats", "pandas.io.formats", ""
- "pandas.sparse", "pandas.core.sparse", ""
- "pandas.tools", "pandas.core.reshape", "X"
- "pandas.types", "pandas.core.dtypes", "X"
- "pandas.io.sas.saslib", "pandas.io.sas._sas", ""
- "pandas._join", "pandas._libs.join", ""
- "pandas._hash", "pandas._libs.hashing", ""
- "pandas._period", "pandas._libs.period", ""
- "pandas._sparse", "pandas._libs.sparse", ""
- "pandas._testing", "pandas._libs.testing", ""
- "pandas._window", "pandas._libs.window", ""
-
-
-Some new subpackages are created with public functionality that is not directly
-exposed in the top-level namespace: ``pandas.errors``, ``pandas.plotting`` and
-``pandas.testing`` (more details below). Together with ``pandas.api.types`` and
-certain functions in the ``pandas.io`` and ``pandas.tseries`` submodules,
-these are now the public subpackages.
-
-Further changes:
-
-- The function :func:`~pandas.api.types.union_categoricals` is now importable from ``pandas.api.types``, formerly from ``pandas.types.concat`` (:issue:`15998`)
-- The type import ``pandas.tslib.NaTType`` is deprecated and can be replaced by using ``type(pandas.NaT)`` (:issue:`16146`)
-- The public functions in ``pandas.tools.hashing`` deprecated from that locations, but are now importable from ``pandas.util`` (:issue:`16223`)
-- The modules in ``pandas.util``: ``decorators``, ``print_versions``, ``doctools``, ``validators``, ``depr_module`` are now private. Only the functions exposed in ``pandas.util`` itself are public (:issue:`16223`)
-
-.. _whatsnew_0200.privacy.errors:
-
-``pandas.errors``
-^^^^^^^^^^^^^^^^^
-
-We are adding a standard public module for all pandas exceptions & warnings ``pandas.errors``. (:issue:`14800`). Previously
-these exceptions & warnings could be imported from ``pandas.core.common`` or ``pandas.io.common``. These exceptions and warnings
-will be removed from the ``*.common`` locations in a future release. (:issue:`15541`)
-
-The following are now part of this API:
-
-.. code-block:: python
-
- ['DtypeWarning',
- 'EmptyDataError',
- 'OutOfBoundsDatetime',
- 'ParserError',
- 'ParserWarning',
- 'PerformanceWarning',
- 'UnsortedIndexError',
- 'UnsupportedFunctionCall']
-
-
-.. _whatsnew_0200.privacy.testing:
-
-``pandas.testing``
-^^^^^^^^^^^^^^^^^^
-
-We are adding a standard module that exposes the public testing functions in ``pandas.testing`` (:issue:`9895`). Those functions can be used when writing tests for functionality using pandas objects.
-
-The following testing functions are now part of this API:
-
-- :func:`testing.assert_frame_equal`
-- :func:`testing.assert_series_equal`
-- :func:`testing.assert_index_equal`
-
-
-.. _whatsnew_0200.privacy.plotting:
-
-``pandas.plotting``
-^^^^^^^^^^^^^^^^^^^
-
-A new public ``pandas.plotting`` module has been added that holds plotting functionality that was previously in either ``pandas.tools.plotting`` or in the top-level namespace. See the :ref:`deprecations sections <whatsnew_0200.privacy.deprecate_plotting>` for more details.
-
-.. _whatsnew_0200.privacy.development:
-
-Other Development Changes
-^^^^^^^^^^^^^^^^^^^^^^^^^
-
-- Building pandas for development now requires ``cython >= 0.23`` (:issue:`14831`)
-- Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`)
-- Switched the test framework to use `pytest <http://doc.pytest.org/en/latest>`__ (:issue:`13097`)
-- Reorganization of tests directory layout (:issue:`14854`, :issue:`15707`).
-
.. _whatsnew_0200.deprecations:
Deprecations
-~~~~~~~~~~~~
-
-.. _whatsnew_0200.api_breaking.deprecate_ix:
-
-Deprecate ``.ix``
-^^^^^^^^^^^^^^^^^
-
-The ``.ix`` indexer is deprecated, in favor of the more strict ``.iloc`` and ``.loc`` indexers. ``.ix`` offers a lot of magic on the inference of what the user wants to do. To wit, ``.ix`` can decide to index *positionally* OR via *labels*, depending on the data type of the index. This has caused quite a bit of user confusion over the years. The full indexing documentation is :ref:`here <indexing>`. (:issue:`14218`)
-
-The recommended methods of indexing are:
-
-- ``.loc`` if you want to *label* index
-- ``.iloc`` if you want to *positionally* index.
-
-Using ``.ix`` will now show a ``DeprecationWarning`` with a link to some examples of how to convert code :ref:`here <indexing.deprecate_ix>`.
-
-
-.. ipython:: python
-
- df = pd.DataFrame({'A': [1, 2, 3],
- 'B': [4, 5, 6]},
- index=list('abc'))
-
- df
-
-Previous Behavior, where you wish to get the 0th and the 2nd elements from the index in the 'A' column.
-
-.. code-block:: ipython
-
- In [3]: df.ix[[0, 2], 'A']
- Out[3]:
- a 1
- c 3
- Name: A, dtype: int64
-
-Using ``.loc``. Here we will select the appropriate indexes from the index, then use *label* indexing.
-
-.. ipython:: python
-
- df.loc[df.index[[0, 2]], 'A']
-
-Using ``.iloc``. Here we will get the location of the 'A' column, then use *positional* indexing to select things.
-
-.. ipython:: python
-
- df.iloc[[0, 2], df.columns.get_loc('A')]
-
-
-.. _whatsnew_0200.api_breaking.deprecate_panel:
-
-Deprecate Panel
-^^^^^^^^^^^^^^^
-
-``Panel`` is deprecated and will be removed in a future version. The recommended way to represent 3-D data are
-with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas
-provides a :meth:`~Panel.to_xarray` method to automate this conversion. For more details see :ref:`Deprecate Panel <dsintro.deprecate_panel>` documentation. (:issue:`13563`).
-
-.. ipython:: python
- :okwarning:
-
- p = tm.makePanel()
- p
-
-Convert to a MultiIndex DataFrame
-
-.. ipython:: python
-
- p.to_frame()
-
-Convert to an xarray DataArray
-
-.. ipython:: python
-
- p.to_xarray()
-
-.. _whatsnew_0200.api_breaking.deprecate_group_agg_dict:
-
-Deprecate groupby.agg() with a dictionary when renaming
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``.groupby(..).agg(..)``, ``.rolling(..).agg(..)``, and ``.resample(..).agg(..)`` syntax can accept a variable of inputs, including scalars,
-list, and a dict of column names to scalars or lists. This provides a useful syntax for constructing multiple
-(potentially different) aggregations.
-
-However, ``.agg(..)`` can *also* accept a dict that allows 'renaming' of the result columns. This is a complicated and confusing syntax, as well as not consistent
-between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functionaility.
-
-- We are deprecating passing a dict to a grouped/rolled/resampled ``Series``. This allowed
- one to ``rename`` the resulting aggregation, but this had a completely different
- meaning than passing a dictionary to a grouped ``DataFrame``, which accepts column-to-aggregations.
-- We are deprecating passing a dict-of-dicts to a grouped/rolled/resampled ``DataFrame`` in a similar manner.
-
-This is an illustrative example:
-
-.. ipython:: python
-
- df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
- 'B': range(5),
- 'C': range(5)})
- df
-
-Here is a typical useful syntax for computing different aggregations for different columns. This
-is a natural, and useful syntax. We aggregate from the dict-to-list by taking the specified
-columns and applying the list of functions. This returns a ``MultiIndex`` for the columns (this is *not* deprecated).
-
-.. ipython:: python
-
- df.groupby('A').agg({'B': 'sum', 'C': 'min'})
-
-Here's an example of the first deprecation, passing a dict to a grouped ``Series``. This
-is a combination aggregation & renaming:
-
-.. code-block:: ipython
-
- In [6]: df.groupby('A').B.agg({'foo': 'count'})
- FutureWarning: using a dict on a Series for aggregation
- is deprecated and will be removed in a future version
-
- Out[6]:
- foo
- A
- 1 3
- 2 2
-
-You can accomplish the same operation, more idiomatically by:
-
-.. ipython:: python
-
- df.groupby('A').B.agg(['count']).rename(columns={'count': 'foo'})
-
-
-Here's an example of the second deprecation, passing a dict-of-dict to a grouped ``DataFrame``:
-
-.. code-block:: python
-
- In [23]: (df.groupby('A')
- .agg({'B': {'foo': 'sum'}, 'C': {'bar': 'min'}})
- )
- FutureWarning: using a dict with renaming is deprecated and
- will be removed in a future version
-
- Out[23]:
- B C
- foo bar
- A
- 1 3 0
- 2 7 3
-
-
-You can accomplish nearly the same by:
-
-.. ipython:: python
-
- (df.groupby('A')
- .agg({'B': 'sum', 'C': 'min'})
- .rename(columns={'B': 'foo', 'C': 'bar'})
- )
-
-
-
-.. _whatsnew_0200.privacy.deprecate_plotting:
-
-Deprecate .plotting
-^^^^^^^^^^^^^^^^^^^
-
-The ``pandas.tools.plotting`` module has been deprecated, in favor of the top level ``pandas.plotting`` module. All the public plotting functions are now available
-from ``pandas.plotting`` (:issue:`12548`).
-
-Furthermore, the top-level ``pandas.scatter_matrix`` and ``pandas.plot_params`` are deprecated.
-Users can import these from ``pandas.plotting`` as well.
-
-Previous script:
-
-.. code-block:: python
-
- pd.tools.plotting.scatter_matrix(df)
- pd.scatter_matrix(df)
-
-Should be changed to:
-
-.. code-block:: python
-
- pd.plotting.scatter_matrix(df)
-
-
-
-.. _whatsnew_0200.deprecations.other:
-
-Other Deprecations
-^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^
- ``SparseArray.to_dense()`` has deprecated the ``fill`` parameter, as that parameter was not being respected (:issue:`14647`)
- ``SparseSeries.to_dense()`` has deprecated the ``sparse_only`` parameter (:issue:`14647`)
- ``Series.repeat()`` has deprecated the ``reps`` parameter in favor of ``repeats`` (:issue:`12662`)
-- The ``Series`` constructor and ``.astype`` method have deprecated accepting timestamp dtypes without a frequency (e.g. ``np.datetime64``) for the ``dtype`` parameter (:issue:`15524`)
- ``Index.repeat()`` and ``MultiIndex.repeat()`` have deprecated the ``n`` parameter in favor of ``repeats`` (:issue:`12662`)
- ``Categorical.searchsorted()`` and ``Series.searchsorted()`` have deprecated the ``v`` parameter in favor of ``value`` (:issue:`12662`)
- ``TimedeltaIndex.searchsorted()``, ``DatetimeIndex.searchsorted()``, and ``PeriodIndex.searchsorted()`` have deprecated the ``key`` parameter in favor of ``value`` (:issue:`12662`)
@@ -1501,21 +709,17 @@ Other Deprecations
- ``Series.sortlevel`` and ``DataFrame.sortlevel`` have been deprecated in favor of ``Series.sort_index`` and ``DataFrame.sort_index`` (:issue:`15099`)
- importing ``concat`` from ``pandas.tools.merge`` has been deprecated in favor of imports from the ``pandas`` namespace. This should only affect explict imports (:issue:`15358`)
- ``Series/DataFrame/Panel.consolidate()`` been deprecated as a public method. (:issue:`15483`)
-- The ``as_indexer`` keyword of ``Series.str.match()`` has been deprecated (ignored keyword) (:issue:`15257`).
-- The following top-level pandas functions have been deprecated and will be removed in a future version (:issue:`13790`, :issue:`15940`)
-
+- The following top-level pandas functions have been deprecated and will be removed in a future version (:issue:`13790`)
* ``pd.pnow()``, replaced by ``Period.now()``
* ``pd.Term``, is removed, as it is not applicable to user code. Instead use in-line string expressions in the where clause when searching in HDFStore
* ``pd.Expr``, is removed, as it is not applicable to user code.
* ``pd.match()``, is removed.
* ``pd.groupby()``, replaced by using the ``.groupby()`` method directly on a ``Series/DataFrame``
- * ``pd.get_store()``, replaced by a direct call to ``pd.HDFStore(...)``
-- ``is_any_int_dtype``, ``is_floating_dtype``, and ``is_sequence`` are deprecated from ``pandas.api.types`` (:issue:`16042`)
.. _whatsnew_0200.prior_deprecations:
Removal of prior version deprecations/changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- The ``pandas.rpy`` module is removed. Similar functionality can be accessed
through the `rpy2 <https://rpy2.readthedocs.io/>`__ project.
@@ -1531,11 +735,6 @@ Removal of prior version deprecations/changes
in favor of ``iloc`` and ``iat`` as explained :ref:`here <whatsnew_0170.deprecations>` (:issue:`10711`).
- The deprecated ``DataFrame.iterkv()`` has been removed in favor of ``DataFrame.iteritems()`` (:issue:`10711`)
- The ``Categorical`` constructor has dropped the ``name`` parameter (:issue:`10632`)
-- ``Categorical`` has dropped support for ``NaN`` categories (:issue:`10748`)
-- The ``take_last`` parameter has been dropped from ``duplicated()``, ``drop_duplicates()``, ``nlargest()``, and ``nsmallest()`` methods (:issue:`10236`, :issue:`10792`, :issue:`10920`)
-- ``Series``, ``Index``, and ``DataFrame`` have dropped the ``sort`` and ``order`` methods (:issue:`10726`)
-- Where clauses in ``pytables`` are only accepted as strings and expressions types and not other data-types (:issue:`12027`)
-- ``DataFrame`` has dropped the ``combineAdd`` and ``combineMult`` methods in favor of ``add`` and ``mul`` respectively (:issue:`10735`)
.. _whatsnew_0200.performance:
@@ -1543,7 +742,7 @@ Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- Improved performance of ``pd.wide_to_long()`` (:issue:`14779`)
-- Improved performance of ``pd.factorize()`` by releasing the GIL with ``object`` dtype when inferred as strings (:issue:`14859`, :issue:`16057`)
+- Increased performance of ``pd.factorize()`` by releasing the GIL with ``object`` dtype when inferred as strings (:issue:`14859`)
- Improved performance of timeseries plotting with an irregular DatetimeIndex
(or with ``compat_x=True``) (:issue:`15073`).
- Improved performance of ``groupby().cummin()`` and ``groupby().cummax()`` (:issue:`15048`, :issue:`15109`, :issue:`15561`, :issue:`15635`)
@@ -1552,176 +751,138 @@ Performance Improvements
- Improved performance of ``.rank()`` for categorical data (:issue:`15498`)
- Improved performance when using ``.unstack()`` (:issue:`15503`)
- Improved performance of merge/join on ``category`` columns (:issue:`10409`)
-- Improved performance of ``drop_duplicates()`` on ``bool`` columns (:issue:`12963`)
-- Improve performance of ``pd.core.groupby.GroupBy.apply`` when the applied
- function used the ``.name`` attribute of the group DataFrame (:issue:`15062`).
-- Improved performance of ``iloc`` indexing with a list or array (:issue:`15504`).
-- Improved performance of ``Series.sort_index()`` with a monotonic index (:issue:`15694`)
-- Improved performance in ``pd.read_csv()`` on some platforms with buffered reads (:issue:`16039`)
+
.. _whatsnew_0200.bug_fixes:
Bug Fixes
~~~~~~~~~
-Conversion
-^^^^^^^^^^
-
- Bug in ``Timestamp.replace`` now raises ``TypeError`` when incorrect argument names are given; previously this raised ``ValueError`` (:issue:`15240`)
-- Bug in ``Timestamp.replace`` with compat for passing long integers (:issue:`15030`)
-- Bug in ``Timestamp`` returning UTC based time/date attributes when a timezone was provided (:issue:`13303`, :issue:`6538`)
-- Bug in ``Timestamp`` incorrectly localizing timezones during construction (:issue:`11481`, :issue:`15777`)
+- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
- Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`)
- Bug in ``TimedeltaIndex`` raising a ``ValueError`` when boolean indexing with ``loc`` (:issue:`14946`)
-- Bug in catching an overflow in ``Timestamp`` + ``Timedelta/Offset`` operations (:issue:`15126`)
- Bug in ``DatetimeIndex.round()`` and ``Timestamp.round()`` floating point accuracy when rounding by milliseconds or less (:issue:`14440`, :issue:`15578`)
- Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`)
- Bug in ``DataFrame(..).apply(to_numeric)`` when values are of type decimal.Decimal. (:issue:`14827`)
- Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`)
+- Bug in ``DataFrame.sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`)
+- Bug in ``DataFrame.reindex()`` in which ``method`` was ignored when passing ``columns`` (:issue:`14992`)
+- Bug in ``pd.to_numeric()`` in which float and unsigned integer elements were being improperly casted (:issue:`14941`, :issue:`15005`)
- Cleaned up ``PeriodIndex`` constructor, including raising on floats more consistently (:issue:`13277`)
+- Bug in ``pd.read_csv()`` in which the ``dialect`` parameter was not being verified before processing (:issue:`14898`)
+- Bug in ``pd.read_fwf`` where the skiprows parameter was not being respected during column width inference (:issue:`11256`)
+- Bug in ``pd.read_csv()`` in which missing data was being improperly handled with ``usecols`` (:issue:`6710`)
+- Bug in ``pd.read_csv()`` in which a file containing a row with many columns followed by rows with fewer columns would cause a crash (:issue:`14125`)
+- Bug in ``pd.tools.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`)
+- Bug in ``.groupby(..).resample()`` when passed the ``on=`` kwarg. (:issue:`15021`)
- Bug in using ``__deepcopy__`` on empty NDFrame objects (:issue:`15370`)
-- Bug in ``.replace()`` may result in incorrect dtypes. (:issue:`12747`, :issue:`15765`)
-- Bug in ``Series.replace`` and ``DataFrame.replace`` which failed on empty replacement dicts (:issue:`15289`)
-- Bug in ``Series.replace`` which replaced a numeric by string (:issue:`15743`)
-- Bug in ``Index`` construction with ``NaN`` elements and integer dtype specified (:issue:`15187`)
-- Bug in ``Series`` construction with a datetimetz (:issue:`14928`)
-- Bug in ``Series.dt.round()`` inconsistent behaviour on ``NaT`` 's with different arguments (:issue:`14940`)
-- Bug in ``Series`` constructor when both ``copy=True`` and ``dtype`` arguments are provided (:issue:`15125`)
-- Incorrect dtyped ``Series`` was returned by comparison methods (e.g., ``lt``, ``gt``, ...) against a constant for an empty ``DataFrame`` (:issue:`15077`)
-- Bug in ``Series.ffill()`` with mixed dtypes containing tz-aware datetimes. (:issue:`14956`)
-- Bug in ``DataFrame.fillna()`` where the argument ``downcast`` was ignored when fillna value was of type ``dict`` (:issue:`15277`)
+- Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a ``Series`` indexer (:issue:`14730`, :issue:`15424`)
+- Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a numpy array (:issue:`15434`)
+- Bug in ``Rolling.quantile`` function that caused a segmentation fault when called with a quantile value outside of the range [0, 1] (:issue:`15463`)
+- Bug in ``pd.cut()`` with a single bin on an all 0s array (:issue:`15428`)
+- Bug in ``pd.qcut()`` with a single quantile and an array with identical values (:issue:`15431`)
+
+
+
+- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
+
- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
-- Bug in ``DataFrame`` construction with nulls and datetimes in a list-like (:issue:`15869`)
-- Bug in ``DataFrame.fillna()`` with tz-aware datetimes (:issue:`15855`)
-- Bug in ``is_string_dtype``, ``is_timedelta64_ns_dtype``, and ``is_string_like_dtype`` in which an error was raised when ``None`` was passed in (:issue:`15941`)
-- Bug in the return type of ``pd.unique`` on a ``Categorical``, which was returning an ndarray and not a ``Categorical`` (:issue:`15903`)
-- Bug in ``Index.to_series()`` where the index was not copied (and so mutating later would change the original), (:issue:`15949`)
-- Bug in indexing with partial string indexing with a len-1 DataFrame (:issue:`16071`)
-- Bug in ``Series`` construction where passing invalid dtype didn't raise an error. (:issue:`15520`)
-Indexing
-^^^^^^^^
+- Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`)
+- Bug in ``Series.ffill()`` with mixed dtypes containing tz-aware datetimes. (:issue:`14956`)
+
+- Bug in ``DataFrame.isin`` comparing datetimelike to empty frame (:issue:`15473`)
-- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
-- Bug in ``DataFrame.sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`)
-- Bug in ``DataFrame.reindex()`` in which ``method`` was ignored when passing ``columns`` (:issue:`14992`)
-- Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a ``Series`` indexer (:issue:`14730`, :issue:`15424`)
-- Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a numpy array (:issue:`15434`)
-- Bug in ``Series.asof`` which raised if the series contained all ``np.nan`` (:issue:`15713`)
-- Bug in ``.at`` when selecting from a tz-aware column (:issue:`15822`)
- Bug in ``Series.where()`` and ``DataFrame.where()`` where array-like conditionals were being rejected (:issue:`15414`)
-- Bug in ``Series.where()`` where TZ-aware data was converted to float representation (:issue:`15701`)
-- Bug in ``.loc`` that would not return the correct dtype for scalar access for a DataFrame (:issue:`11617`)
+- Bug in ``Index`` construction with ``NaN`` elements and integer dtype specified (:issue:`15187`)
+- Bug in ``Series`` construction with a datetimetz (:issue:`14928`)
- Bug in output formatting of a ``MultiIndex`` when names are integers (:issue:`12223`, :issue:`15262`)
-- Bug in ``Categorical.searchsorted()`` where alphabetical instead of the provided categorical order was used (:issue:`14522`)
-- Bug in ``Series.iloc`` where a ``Categorical`` object for list-like indexes input was returned, where a ``Series`` was expected. (:issue:`14580`)
-- Bug in ``DataFrame.isin`` comparing datetimelike to empty frame (:issue:`15473`)
-- Bug in ``.reset_index()`` when an all ``NaN`` level of a ``MultiIndex`` would fail (:issue:`6322`)
-- Bug in ``.reset_index()`` when raising error for index name already present in ``MultiIndex`` columns (:issue:`16120`)
-- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; this will now raise ``ValueError`` (:issue:`15110`)
-- Bug in the HTML display with with a ``MultiIndex`` and truncation (:issue:`14882`)
-- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
-- Bug in ``pd.concat()`` where the names of ``MultiIndex`` of resulting ``DataFrame`` are not handled correctly when ``None`` is presented in the names of ``MultiIndex`` of input ``DataFrame`` (:issue:`15787`)
-- Bug in ``DataFrame.sort_index()`` and ``Series.sort_index()`` where ``na_position`` doesn't work with a ``MultiIndex`` (:issue:`14784`, :issue:`16604`)
-- Bug in in ``pd.concat()`` when combining objects with a ``CategoricalIndex`` (:issue:`16111`)
-- Bug in indexing with a scalar and a ``CategoricalIndex`` (:issue:`16123`)
-I/O
-^^^
+- Bug in compat for passing long integers to ``Timestamp.replace`` (:issue:`15030`)
+- Bug in ``.loc`` that would not return the correct dtype for scalar access for a DataFrame (:issue:`11617`)
+- Bug in ``GroupBy.get_group()`` failing with a categorical grouper (:issue:`15155`)
+- Bug in ``pandas.tools.utils.cartesian_product()`` with large input can cause overflow on windows (:issue:`15265`)
-- Bug in ``pd.to_numeric()`` in which float and unsigned integer elements were being improperly casted (:issue:`14941`, :issue:`15005`)
-- Bug in ``pd.read_fwf()`` where the skiprows parameter was not being respected during column width inference (:issue:`11256`)
-- Bug in ``pd.read_csv()`` in which the ``dialect`` parameter was not being verified before processing (:issue:`14898`)
-- Bug in ``pd.read_csv()`` in which missing data was being improperly handled with ``usecols`` (:issue:`6710`)
-- Bug in ``pd.read_csv()`` in which a file containing a row with many columns followed by rows with fewer columns would cause a crash (:issue:`14125`)
-- Bug in ``pd.read_csv()`` for the C engine where ``usecols`` were being indexed incorrectly with ``parse_dates`` (:issue:`14792`)
-- Bug in ``pd.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`)
-- Bug in ``pd.read_csv()`` with ``float_precision='round_trip'`` which caused a segfault when a text entry is parsed (:issue:`15140`)
-- Bug in ``pd.read_csv()`` when an index was specified and no values were specified as null values (:issue:`15835`)
-- Bug in ``pd.read_csv()`` in which certain invalid file objects caused the Python interpreter to crash (:issue:`15337`)
-- Bug in ``pd.read_csv()`` in which invalid values for ``nrows`` and ``chunksize`` were allowed (:issue:`15767`)
-- Bug in ``pd.read_csv()`` for the Python engine in which unhelpful error messages were being raised when parsing errors occurred (:issue:`15910`)
-- Bug in ``pd.read_csv()`` in which the ``skipfooter`` parameter was not being properly validated (:issue:`15925`)
-- Bug in ``pd.to_csv()`` in which there was numeric overflow when a timestamp index was being written (:issue:`15982`)
-- Bug in ``pd.util.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`)
-- Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`)
-- Bug in ``.to_json()`` causing single byte ascii characters to be expanded to four byte unicode (:issue:`15344`)
-- Bug in ``.to_json()`` for the C engine where rollover was not correctly handled for case where frac is odd and diff is exactly 0.5 (:issue:`15716`, :issue:`15864`)
-- Bug in ``pd.read_json()`` for Python 2 where ``lines=True`` and contents contain non-ascii unicode characters (:issue:`15132`)
-- Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`)
-- Bug in ``pd.read_msgpack()`` which did not allow loading of a dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
-- Bug in ``pd.read_msgpack()`` when deserializing a ``CategoricalIndex`` (:issue:`15487`)
- Bug in ``DataFrame.to_records()`` with converting a ``DatetimeIndex`` with a timezone (:issue:`13937`)
-- Bug in ``DataFrame.to_records()`` which failed with unicode characters in column names (:issue:`11879`)
-- Bug in ``.to_sql()`` when writing a DataFrame with numeric index names (:issue:`15404`).
-- Bug in ``DataFrame.to_html()`` with ``index=False`` and ``max_rows`` raising in ``IndexError`` (:issue:`14998`)
-- Bug in ``pd.read_hdf()`` passing a ``Timestamp`` to the ``where`` parameter with a non date column (:issue:`15492`)
-- Bug in ``DataFrame.to_stata()`` and ``StataWriter`` which produces incorrectly formatted files to be produced for some locales (:issue:`13856`)
-- Bug in ``StataReader`` and ``StataWriter`` which allows invalid encodings (:issue:`15723`)
-- Bug in the ``Series`` repr not showing the length when the output was truncated (:issue:`15962`).
-Plotting
-^^^^^^^^
-- Bug in ``DataFrame.hist`` where ``plt.tight_layout`` caused an ``AttributeError`` (use ``matplotlib >= 2.0.1``) (:issue:`9351`)
-- Bug in ``DataFrame.boxplot`` where ``fontsize`` was not applied to the tick labels on both axes (:issue:`15108`)
-- Bug in the date and time converters pandas registers with matplotlib not handling multiple dimensions (:issue:`16026`)
-- Bug in ``pd.scatter_matrix()`` could accept either ``color`` or ``c``, but not both (:issue:`14855`)
+- Bug in ``.groupby(...).rolling(...)`` when ``on`` is specified and using a ``DatetimeIndex`` (:issue:`15130`)
-Groupby/Resample/Rolling
-^^^^^^^^^^^^^^^^^^^^^^^^
-- Bug in ``.groupby(..).resample()`` when passed the ``on=`` kwarg. (:issue:`15021`)
-- Properly set ``__name__`` and ``__qualname__`` for ``Groupby.*`` functions (:issue:`14620`)
-- Bug in ``GroupBy.get_group()`` failing with a categorical grouper (:issue:`15155`)
-- Bug in ``.groupby(...).rolling(...)`` when ``on`` is specified and using a ``DatetimeIndex`` (:issue:`15130`, :issue:`13966`)
-- Bug in groupby operations with ``timedelta64`` when passing ``numeric_only=False`` (:issue:`5724`)
-- Bug in ``groupby.apply()`` coercing ``object`` dtypes to numeric types, when not all values were numeric (:issue:`14423`, :issue:`15421`, :issue:`15670`)
-- Bug in ``resample``, where a non-string ``loffset`` argument would not be applied when resampling a timeseries (:issue:`13218`)
-- Bug in ``DataFrame.groupby().describe()`` when grouping on ``Index`` containing tuples (:issue:`14848`)
-- Bug in ``groupby().nunique()`` with a datetimelike-grouper where bins counts were incorrect (:issue:`13453`)
-- Bug in ``groupby.transform()`` that would coerce the resultant dtypes back to the original (:issue:`10972`, :issue:`11444`)
-- Bug in ``groupby.agg()`` incorrectly localizing timezone on ``datetime`` (:issue:`15426`, :issue:`10668`, :issue:`13046`)
+- Bug in ``to_sql`` when writing a DataFrame with numeric index names (:issue:`15404`).
+- Bug in ``Series.iloc`` where a ``Categorical`` object for list-like indexes input was returned, where a ``Series`` was expected. (:issue:`14580`)
+- Bug in repr-formatting a ``SparseDataFrame`` after a value was set on (a copy of) one of its series (:issue:`15488`)
+- Bug in ``SparseSeries.reindex`` on single level with list of length 1 (:issue:`15447`)
+
+
+- Bug in groupby operations with timedelta64 when passing ``numeric_only=False`` (:issue:`5724`)
+
+
+- Bug in ``DataFrame.to_html`` with ``index=False`` and ``max_rows`` raising in ``IndexError`` (:issue:`14998`)
+
+- Bug in ``Categorical.searchsorted()`` where alphabetical instead of the provided categorical order was used (:issue:`14522`)
+
+
+
+- Bug in ``resample``, where a non-string ```loffset`` argument would not be applied when resampling a timeseries (:issue:`13218`)
+
+
+
+- Bug in ``.rank()`` which incorrectly ranks ordered categories (:issue:`15420`)
+- Bug in ``.corr()`` and ``.cov()`` where the column and index were the same object (:issue:`14617`)
+
+
+- Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`)
+- Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`)
+
+- Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`)
+- Bug in ``.to_json()`` causing single byte ascii characters to be expanded to four byte unicode (:issue:`15344`)
+- Bug in ``.read_json()`` for Python 2 where ``lines=True`` and contents contain non-ascii unicode characters (:issue:`15132`)
- Bug in ``.rolling/expanding()`` functions where ``count()`` was not counting ``np.Inf``, nor handling ``object`` dtypes (:issue:`12541`)
- Bug in ``.rolling()`` where ``pd.Timedelta`` or ``datetime.timedelta`` was not accepted as a ``window`` argument (:issue:`15440`)
-- Bug in ``Rolling.quantile`` function that caused a segmentation fault when called with a quantile value outside of the range [0, 1] (:issue:`15463`)
- Bug in ``DataFrame.resample().median()`` if duplicate column names are present (:issue:`14233`)
-Sparse
-^^^^^^
+- Bug in ``DataFrame.groupby().describe()`` when grouping on ``Index`` containing tuples (:issue:`14848`)
+- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; this will now raise ``ValueError`` (:issue:`15110`)
+- Bug in ``groupby().nunique()`` with a datetimelike-grouper where bins counts were incorrect (:issue:`13453`)
-- Bug in ``SparseSeries.reindex`` on single level with list of length 1 (:issue:`15447`)
-- Bug in repr-formatting a ``SparseDataFrame`` after a value was set on (a copy of) one of its series (:issue:`15488`)
-- Bug in ``SparseDataFrame`` construction with lists not coercing to dtype (:issue:`15682`)
-- Bug in sparse array indexing in which indices were not being validated (:issue:`15863`)
+- Bug in catching an overflow in ``Timestamp`` + ``Timedelta/Offset`` operations (:issue:`15126`)
+- Bug in the HTML display with with a ``MultiIndex`` and truncation (:issue:`14882`)
-Reshaping
-^^^^^^^^^
-- Bug in ``pd.merge_asof()`` where ``left_index`` or ``right_index`` caused a failure when multiple ``by`` was specified (:issue:`15676`)
- Bug in ``pd.merge_asof()`` where ``left_index``/``right_index`` together caused a failure when ``tolerance`` was specified (:issue:`15135`)
- Bug in ``DataFrame.pivot_table()`` where ``dropna=True`` would not drop all-NaN columns when the columns was a ``category`` dtype (:issue:`15193`)
-- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
-- Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`)
-- Bug in ``pd.concat()`` in which concatenating with an empty dataframe with ``join='inner'`` was being improperly handled (:issue:`15328`)
-- Bug with ``sort=True`` in ``DataFrame.join`` and ``pd.merge`` when joining on indexes (:issue:`15582`)
-- Bug in ``DataFrame.nsmallest`` and ``DataFrame.nlargest`` where identical values resulted in duplicated rows (:issue:`15297`)
-Numeric
-^^^^^^^
-- Bug in ``.rank()`` which incorrectly ranks ordered categories (:issue:`15420`)
-- Bug in ``.corr()`` and ``.cov()`` where the column and index were the same object (:issue:`14617`)
-- Bug in ``.mode()`` where ``mode`` was not returned if was only a single value (:issue:`15714`)
-- Bug in ``pd.cut()`` with a single bin on an all 0s array (:issue:`15428`)
-- Bug in ``pd.qcut()`` with a single quantile and an array with identical values (:issue:`15431`)
-- Bug in ``pandas.tools.utils.cartesian_product()`` with large input can cause overflow on windows (:issue:`15265`)
-- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
+- Bug in ``pd.read_hdf()`` passing a ``Timestamp`` to the ``where`` parameter with a non date column (:issue:`15492`)
+
+
+- Bug in ``Series`` constructor when both ``copy=True`` and ``dtype`` arguments are provided (:issue:`15125`)
+- Bug in ``pd.read_csv()`` for the C engine where ``usecols`` were being indexed incorrectly with ``parse_dates`` (:issue:`14792`)
+- Incorrect dtyped ``Series`` was returned by comparison methods (e.g., ``lt``, ``gt``, ...) against a constant for an empty ``DataFrame`` (:issue:`15077`)
+- Bug in ``Series.dt.round`` inconsistent behaviour on NAT's with different arguments (:issue:`14940`)
+- Bug in ``DataFrame.fillna()`` where the argument ``downcast`` was ignored when fillna value was of type ``dict`` (:issue:`15277`)
+- Bug in ``.reset_index()`` when an all ``NaN`` level of a ``MultiIndex`` would fail (:issue:`6322`)
+
+- Bug in ``pd.read_msgpack()`` when deserializing a ``CategoricalIndex`` (:issue:`15487`)
+- Bug in ``pd.DataFrame.to_records()`` which failed with unicode characters in column names (:issue:`11879`)
+
-Other
-^^^^^
+- Bug in ``pd.read_csv()`` with ``float_precision='round_trip'`` which caused a segfault when a text entry is parsed (:issue:`15140`)
-- Compat with SciPy 0.19.0 for testing on ``.interpolate()`` (:issue:`15662`)
-- Compat for 32-bit platforms for ``.qcut/cut``; bins will now be ``int64`` dtype (:issue:`14866`)
-- Bug in interactions with ``Qt`` when a ``QtApplication`` already exists (:issue:`14372`)
-- Avoid use of ``np.finfo()`` during ``import pandas`` removed to mitigate deadlock on Python GIL misuse (:issue:`14641`)
+- Bug in ``DataFrame.to_stata()`` and ``StataWriter`` which produces incorrectly formatted files to be produced for some locales (:issue:`13856`)
+- Bug in ``pd.concat()`` in which concatting with an empty dataframe with ``join='inner'`` was being improperly handled (:issue:`15328`)
+- Bug in ``groupby.agg()`` incorrectly localizing timezone on ``datetime`` (:issue:`15426`, :issue:`10668`, :issue:`13046`)
+
+
+
+- Bug in ``.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`)
+- Bug in ``groupby.transform()`` that would coerce the resultant dtypes back to the original (:issue:`10972`, :issue:`11444`)
+
+- Bug in ``DataFrame.hist`` where ``plt.tight_layout`` caused an ``AttributeError`` (use ``matplotlib >= 0.2.0``) (:issue:`9351`)
+- Bug in ``DataFrame.boxplot`` where ``fontsize`` was not applied to the tick labels on both axes (:issue:`15108`)
+- Bug in ``Series.replace`` and ``DataFrame.replace`` which failed on empty replacement dicts (:issue:`15289`)
+- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
+- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
+- Bug in ``pd.read_msgpack`` which did not allow to load dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index b0ed6d4c4b84d..af7de61a25d21 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -40,8 +40,37 @@ class _Unstacker(object):
Parameters
----------
+ values : ndarray
+ Values of DataFrame to "Unstack"
+ index : object
+ Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
+ value_columns : Index, optional
+ Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame
+ fill_value : scalar, optional
+ Default value to fill in missing values if subgroups do not have the
+ same set of labels. By default, missing values will be replaced with
+ the default fill value for that data type, NaN for float, NaT for
+ datetimelike, etc. For integer types, by default data will converted to
+ float and missing values will be set to NaN.
+ constructor : object, default DataFrame
+ ``Series``, ``DataFrame``, or subclass used to create unstacked
+ response
+
+ value_columns : object, optional
+ Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame
+
+ fill_value : scalar, optional
+ Default value to fill in missing values if subgroups do not have the
+ same set of labels. By default, missing values will be replaced with
+ the default fill value for that data type, NaN for float, NaT for
+ datetimelike, etc. For integer types, by default data will converted to
+ float and missing values will be set to NaN.
+
+ constructor : object, default DataFrame
+ ``Series``, ``DataFrame``, or subclass used to create unstacked
+ response
Examples
--------
@@ -72,7 +101,8 @@ class _Unstacker(object):
"""
def __init__(self, values, index, level=-1, value_columns=None,
- fill_value=None):
+ fill_value=None,
+ constructor=DataFrame):
self.is_categorical = None
if values.ndim == 1:
@@ -83,6 +113,7 @@ def __init__(self, values, index, level=-1, value_columns=None,
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
+ self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
@@ -177,7 +208,7 @@ def get_result(self):
ordered=ordered)
for i in range(values.shape[-1])]
- return DataFrame(values, index=index, columns=columns)
+ return self.constructor(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
@@ -373,8 +404,9 @@ def pivot(self, index=None, columns=None, values=None):
index = self.index
else:
index = self[index]
- indexed = Series(self[values].values,
- index=MultiIndex.from_arrays([index, self[columns]]))
+ indexed = self._constructor_sliced(
+ self[values].values,
+ index=MultiIndex.from_arrays([index, self[columns]]))
return indexed.unstack(columns)
@@ -455,7 +487,8 @@ def unstack(obj, level, fill_value=None):
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
- fill_value=fill_value)
+ fill_value=fill_value,
+ constructor=obj._constructor_expanddim)
return unstacker.get_result()
@@ -487,13 +520,14 @@ def _unstack_frame(obj, level, fill_value=None):
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
- result = DataFrame(BlockManager(new_blocks, new_axes))
- mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
+ result = obj._constructor(BlockManager(new_blocks, new_axes))
+ mask_frame = obj._constructor(BlockManager(mask_blocks, new_axes))
return result.loc[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
- fill_value=fill_value)
+ fill_value=fill_value,
+ constructor=obj._constructor)
return unstacker.get_result()
@@ -550,7 +584,7 @@ def factorize(index):
mask = notnull(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
- return Series(new_values, index=new_index)
+ return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
@@ -696,7 +730,7 @@ def _convert_level_number(level_num, columns):
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
- result = DataFrame(new_data, index=new_index, columns=new_columns)
+ result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
@@ -770,7 +804,7 @@ def melt(frame, id_vars=None, value_vars=None, var_name=None,
mdata[col] = np.asanyarray(frame.columns
._get_level_values(i)).repeat(N)
- return DataFrame(mdata, columns=mcolumns)
+ return frame._constructor(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
@@ -839,7 +873,7 @@ def lreshape(data, groups, dropna=True, label=None):
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
- return DataFrame(mdata, columns=id_cols + pivot_cols)
+ return data._constructor(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j, sep="", suffix='\d+'):
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index 52c591e4dcbb0..637f8e2910928 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -5,7 +5,7 @@
from warnings import catch_warnings
import numpy as np
-from pandas import DataFrame, Series, MultiIndex, Panel
+from pandas import DataFrame, Series, MultiIndex, Panel, Index
import pandas as pd
import pandas.util.testing as tm
@@ -126,6 +126,194 @@ def test_indexing_sliced(self):
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
+ def test_subclass_stack(self):
+ # GH 15564
+ df = tm.SubclassedDataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ index=['a', 'b', 'c'],
+ columns=['X', 'Y', 'Z'])
+
+ res = df.stack()
+ exp = tm.SubclassedSeries(
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+ index=[list('aaabbbccc'), list('XYZXYZXYZ')])
+
+ tm.assert_series_equal(res, exp)
+ tm.assertIsInstance(res, tm.SubclassedSeries)
+
+ def test_subclass_stack_multi(self):
+ # GH 15564
+ df = tm.SubclassedDataFrame([
+ [10, 11, 12, 13],
+ [20, 21, 22, 23],
+ [30, 31, 32, 33],
+ [40, 41, 42, 43]],
+ index=MultiIndex.from_tuples(
+ list(zip(list('AABB'), list('cdcd'))),
+ names=['aaa', 'ccc']),
+ columns=MultiIndex.from_tuples(
+ list(zip(list('WWXX'), list('yzyz'))),
+ names=['www', 'yyy']))
+
+ exp = tm.SubclassedDataFrame([
+ [10, 12],
+ [11, 13],
+ [20, 22],
+ [21, 23],
+ [30, 32],
+ [31, 33],
+ [40, 42],
+ [41, 43]],
+ index=MultiIndex.from_tuples(list(zip(
+ list('AAAABBBB'), list('ccddccdd'), list('yzyzyzyz'))),
+ names=['aaa', 'ccc', 'yyy']),
+ columns=Index(['W', 'X'], name='www'))
+
+ res = df.stack()
+ tm.assert_frame_equal(res, exp)
+ tm.assertIsInstance(res, tm.SubclassedDataFrame)
+
+ res = df.stack('yyy')
+ tm.assert_frame_equal(res, exp)
+ tm.assertIsInstance(res, tm.SubclassedDataFrame)
+
+ exp = tm.SubclassedDataFrame([
+ [10, 11],
+ [12, 13],
+ [20, 21],
+ [22, 23],
+ [30, 31],
+ [32, 33],
+ [40, 41],
+ [42, 43]],
+ index=MultiIndex.from_tuples(list(zip(
+ list('AAAABBBB'), list('ccddccdd'), list('WXWXWXWX'))),
+ names=['aaa', 'ccc', 'www']),
+ columns=Index(['y', 'z'], name='yyy'))
+
+ res = df.stack('www')
+ tm.assert_frame_equal(res, exp)
+ tm.assertIsInstance(res, tm.SubclassedDataFrame)
+
+ def test_subclass_unstack(self):
+ # GH 15564
+ df = tm.SubclassedDataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ index=['a', 'b', 'c'],
+ columns=['X', 'Y', 'Z'])
+
+ res = df.unstack()
+ exp = tm.SubclassedSeries(
+ [1, 4, 7, 2, 5, 8, 3, 6, 9],
+ index=[list('XXXYYYZZZ'), list('abcabcabc')])
+
+ tm.assert_series_equal(res, exp)
+ tm.assertIsInstance(res, tm.SubclassedSeries)
+
+ def test_subclass_unstack_multi(self):
+ # GH 15564
+ df = tm.SubclassedDataFrame([
+ [10, 11, 12, 13],
+ [20, 21, 22, 23],
+ [30, 31, 32, 33],
+ [40, 41, 42, 43]],
+ index=MultiIndex.from_tuples(
+ list(zip(list('AABB'), list('cdcd'))),
+ names=['aaa', 'ccc']),
+ columns=MultiIndex.from_tuples(
+ list(zip(list('WWXX'), list('yzyz'))),
+ names=['www', 'yyy']))
+
+ exp = tm.SubclassedDataFrame([
+ [10, 20, 11, 21, 12, 22, 13, 23],
+ [30, 40, 31, 41, 32, 42, 33, 43]],
+ index=Index(['A', 'B'], name='aaa'),
+ columns=MultiIndex.from_tuples(list(zip(
+ list('WWWWXXXX'), list('yyzzyyzz'), list('cdcdcdcd'))),
+ names=['www', 'yyy', 'ccc']))
+
+ res = df.unstack()
+ tm.assert_frame_equal(res, exp)
+ tm.assertIsInstance(res, tm.SubclassedDataFrame)
+
+ res = df.unstack('ccc')
+ tm.assert_frame_equal(res, exp)
+ tm.assertIsInstance(res, tm.SubclassedDataFrame)
+
+ exp = tm.SubclassedDataFrame([
+ [10, 30, 11, 31, 12, 32, 13, 33],
+ [20, 40, 21, 41, 22, 42, 23, 43]],
+ index=Index(['c', 'd'], name='ccc'),
+ columns=MultiIndex.from_tuples(list(zip(
+ list('WWWWXXXX'), list('yyzzyyzz'), list('ABABABAB'))),
+ names=['www', 'yyy', 'aaa']))
+
+ res = df.unstack('aaa')
+ tm.assert_frame_equal(res, exp)
+ tm.assertIsInstance(res, tm.SubclassedDataFrame)
+
+ def test_subclass_pivot(self):
+ # GH 15564
+ df = tm.SubclassedDataFrame({
+ 'index': ['A', 'B', 'C', 'C', 'B', 'A'],
+ 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
+ 'values': [1., 2., 3., 3., 2., 1.]})
+
+ pivoted = df.pivot(
+ index='index', columns='columns', values='values')
+
+ expected = tm.SubclassedDataFrame({
+ 'One': {'A': 1., 'B': 2., 'C': 3.},
+ 'Two': {'A': 1., 'B': 2., 'C': 3.}})
+
+ expected.index.name, expected.columns.name = 'index', 'columns'
+
+ tm.assert_frame_equal(pivoted, expected)
+ tm.assertIsInstance(pivoted, tm.SubclassedDataFrame)
+
+ def test_subclassed_melt(self):
+ # GH 15564
+ cheese = tm.SubclassedDataFrame({
+ 'first': ['John', 'Mary'],
+ 'last': ['Doe', 'Bo'],
+ 'height': [5.5, 6.0],
+ 'weight': [130, 150]})
+
+ melted = pd.melt(cheese, id_vars=['first', 'last'])
+
+ expected = tm.SubclassedDataFrame([
+ ['John', 'Doe', 'height', 5.5],
+ ['Mary', 'Bo', 'height', 6.0],
+ ['John', 'Doe', 'weight', 130],
+ ['Mary', 'Bo', 'weight', 150]],
+ columns=['first', 'last', 'variable', 'value'])
+
+ tm.assert_frame_equal(melted, expected)
+ tm.assertIsInstance(melted, tm.SubclassedDataFrame)
+
+ def test_subclassed_wide_to_long(self):
+ # GH 9762
+
+ np.random.seed(123)
+ x = np.random.randn(3)
+ df = tm.SubclassedDataFrame({
+ "A1970": {0: "a", 1: "b", 2: "c"},
+ "A1980": {0: "d", 1: "e", 2: "f"},
+ "B1970": {0: 2.5, 1: 1.2, 2: .7},
+ "B1980": {0: 3.2, 1: 1.3, 2: .1},
+ "X": dict(zip(range(3), x))})
+
+ df["id"] = df.index
+ exp_data = {"X": x.tolist() + x.tolist(),
+ "A": ['a', 'b', 'c', 'd', 'e', 'f'],
+ "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
+ "year": ['1970', '1970', '1970', '1980', '1980', '1980'],
+ "id": [0, 1, 2, 0, 1, 2]}
+ expected = tm.SubclassedDataFrame(exp_data)
+ expected = expected.set_index(['id', 'year'])[["X", "A", "B"]]
+ long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year")
+
+ tm.assert_frame_equal(long_frame, expected)
+ tm.assertIsInstance(long_frame, tm.SubclassedDataFrame)
+
def test_to_panel_expanddim(self):
# GH 9762
diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py
index 37c8d7343f7f1..577c8f60e5356 100644
--- a/pandas/tests/series/test_subclass.py
+++ b/pandas/tests/series/test_subclass.py
@@ -32,6 +32,18 @@ def test_to_frame(self):
tm.assert_frame_equal(res, exp)
assert isinstance(res, tm.SubclassedDataFrame)
+ def test_subclass_unstack(self):
+ # GH 15564
+ s = tm.SubclassedSeries(
+ [1, 2, 3, 4], index=[list('aabb'), list('xyxy')])
+
+ res = s.unstack()
+ exp = tm.SubclassedDataFrame(
+ {'x': [1, 3], 'y': [2, 4]}, index=['a', 'b'])
+
+ tm.assert_frame_equal(res, exp)
+ tm.assertIsInstance(res, tm.SubclassedDataFrame)
+
class TestSparseSeriesSubclassing(object):
| - [x] closes #15563
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
This PR enables reshape operations on subclassed `DataFrame` and `Series` objects to preserve subclass families through the use of `_constructor*` properties.
See discussion on PR #15564. Thanks for the help @jreback.
This PR has a cleaner implementation of subclassed unstack operations by modifying the `_Unstacker` initializer to allow an optional `constructor` argument.
Additionally, this PR implements tests for a wider set of reshape operations. It now covers:
* `DataFrame.stack()`, `DataFrame.unstack()`, `DataFrame.pivot()`, and `series.unstack()` for containers with `Index` and `MultiIndex` indices and/or columns
* `pd.melt()`
* `pd.wide_to_long()`
Finally, the pandas internals docs have been edited for clarity and additional examples have been added to showcase subclassed reshape and math operations. | https://api.github.com/repos/pandas-dev/pandas/pulls/15655 | 2017-03-11T20:21:12Z | 2017-07-26T23:57:28Z | null | 2017-07-26T23:57:28Z |
API: Drop the name parameter from Categorical | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index e392023423eb0..f6d5e3df814fc 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -734,7 +734,7 @@ Removal of prior version deprecations/changes
- The deprecated ``irow``, ``icol``, ``iget`` and ``iget_value`` methods are removed
in favor of ``iloc`` and ``iat`` as explained :ref:`here <whatsnew_0170.deprecations>` (:issue:`10711`).
- The deprecated ``DataFrame.iterkv()`` has been removed in favor of ``DataFrame.iteritems()`` (:issue:`10711`)
-
+- The ``Categorical`` constructor has dropped the ``name`` parameter (:issue:`10632`)
.. _whatsnew_0200.performance:
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 47db86ce1e73e..c1e5904693d1c 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -231,8 +231,7 @@ class Categorical(PandasObject):
__array_priority__ = 1000
_typ = 'categorical'
- def __init__(self, values, categories=None, ordered=False,
- name=None, fastpath=False):
+ def __init__(self, values, categories=None, ordered=False, fastpath=False):
self._validate_ordered(ordered)
@@ -244,12 +243,6 @@ def __init__(self, values, categories=None, ordered=False,
self._ordered = ordered
return
- if name is not None:
- msg = ("the 'name' keyword is removed, use 'name' with consumers "
- "of the categorical instead (e.g. 'Series(cat, "
- "name=\"something\")'")
- warn(msg, UserWarning, stacklevel=2)
-
# sanitize input
if is_categorical_dtype(values):
@@ -431,7 +424,7 @@ def from_array(cls, data, **kwargs):
return cls(data, **kwargs)
@classmethod
- def from_codes(cls, codes, categories, ordered=False, name=None):
+ def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
@@ -454,12 +447,6 @@ def from_codes(cls, codes, categories, ordered=False, name=None):
categorical. If not given, the resulting categorical will be
unordered.
"""
- if name is not None:
- msg = ("the 'name' keyword is removed, use 'name' with consumers "
- "of the categorical instead (e.g. 'Series(cat, "
- "name=\"something\")'")
- warn(msg, UserWarning, stacklevel=2)
-
try:
codes = np.asarray(codes, np.int64)
except:
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 404be758a7fbe..4662e8b635d3f 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -589,8 +589,7 @@ def decode(obj):
from_codes = globals()[obj[u'klass']].from_codes
return from_codes(codes=obj[u'codes'],
categories=obj[u'categories'],
- ordered=obj[u'ordered'],
- name=obj[u'name'])
+ ordered=obj[u'ordered'])
elif typ == u'series':
dtype = dtype_for(obj[u'dtype'])
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 91e70e942089c..fad6237d851fb 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -265,12 +265,8 @@ def python_unpickler(path):
def test_pickle_v0_14_1():
- # we have the name warning
- # 10482
- with tm.assert_produces_warning(UserWarning):
- cat = pd.Categorical(values=['a', 'b', 'c'],
- categories=['a', 'b', 'c', 'd'],
- name='foobar', ordered=False)
+ cat = pd.Categorical(values=['a', 'b', 'c'], ordered=False,
+ categories=['a', 'b', 'c', 'd'])
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
@@ -286,12 +282,8 @@ def test_pickle_v0_15_2():
# ordered -> _ordered
# GH 9347
- # we have the name warning
- # 10482
- with tm.assert_produces_warning(UserWarning):
- cat = pd.Categorical(values=['a', 'b', 'c'],
- categories=['a', 'b', 'c', 'd'],
- name='foobar', ordered=False)
+ cat = pd.Categorical(values=['a', 'b', 'c'], ordered=False,
+ categories=['a', 'b', 'c', 'd'])
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 2d5e98d49e152..6c8aeba704c7b 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -682,7 +682,7 @@ def test_print(self):
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
- name='cat', fastpath=True)
+ fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
@@ -1635,15 +1635,6 @@ def test_deprecated_from_array(self):
with tm.assert_produces_warning(FutureWarning):
Categorical.from_array([0, 1])
- def test_removed_names_produces_warning(self):
-
- # 10482
- with tm.assert_produces_warning(UserWarning):
- Categorical([0, 1], name="a")
-
- with tm.assert_produces_warning(UserWarning):
- Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
-
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
| Deprecated in 0.17.0
xref #10632
| https://api.github.com/repos/pandas-dev/pandas/pulls/15654 | 2017-03-11T06:49:06Z | 2017-03-11T17:21:58Z | null | 2017-03-11T18:57:24Z |
ENH: Provide an errors parameter to fillna | diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index bfec1ec3ebe8c..e7ecd829f344c 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1909,3 +1909,27 @@ def pandas_dtype(dtype):
raise TypeError('dtype {0} not understood'.format(dtype))
return npdtype
+
+
+def _is_fillable_value(value):
+ pandas_ts_types = ('Timestamp', 'Period', 'Timedelta')
+ pandas_block_types = ('Series', 'DataFrame')
+
+ if any([isinstance(value, (list, dict)),
+ callable(value),
+ (not (isinstance(value, string_types) or
+ isinstance(value, (int, float, complex, str, None.__class__)) or
+ is_numeric_dtype(value) or
+ is_datetime_or_timedelta_dtype(value) or
+ is_period_dtype(value) or
+ type(value).__name__ in pandas_ts_types) or
+ type(value).__name__ in pandas_block_types)]):
+ return False
+ else:
+ return True
+
+
+def validate_fill_value(value):
+ if not _is_fillable_value(value):
+ raise TypeError('"value" parameter must be a scalar, but '
+ 'you passed a "{0}"'.format(type(value).__name__))
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index af3a873bc2866..0846723170b3b 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -19,7 +19,10 @@
is_object_dtype,
is_integer,
_TD_DTYPE,
- _NS_DTYPE)
+ _NS_DTYPE,
+ is_datetime64_any_dtype, is_float,
+ is_numeric_dtype, is_complex, is_period_arraylike)
+from datetime import datetime, timedelta
from .inference import is_list_like
@@ -394,3 +397,32 @@ def na_value_for_dtype(dtype):
elif is_bool_dtype(dtype):
return False
return np.nan
+
+
+def is_valid_fill_value(value, dtype):
+ """
+ Makes sure the fill value is appropriate for the given dtype.
+
+ Parameters
+ ----------
+ value : scalar
+ dtype: string / dtype
+ """
+ if isinstance(value, dict):
+ return True
+ if not is_scalar(value):
+ # maybe always raise?
+ # raise TypeError('"value" parameter must be a scalar or dict, but '
+ # 'you passed a "{0}"'.format(type(value).__name__))
+ return False
+ elif isnull(value):
+ return True
+ elif is_bool_dtype(dtype):
+ return isinstance(value, (np.bool, bool))
+ elif is_numeric_dtype(dtype):
+ return is_float(value) or is_integer(value) or is_complex(value)
+ elif is_datetime64_any_dtype(dtype):
+ return isinstance(value, (np.datetime64, datetime))
+ elif is_timedelta64_dtype(dtype):
+ return isinstance(value, (np.timedelta64, timedelta))
+ return True
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8b186bab29d5e..0a2b1c2c79d4c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -28,7 +28,7 @@
pandas_dtype)
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.missing import isnull, notnull
-from pandas.core.dtypes.generic import ABCSeries, ABCPanel
+from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame
from pandas.core.common import (_values_from_object,
_maybe_box_datetimelike,
@@ -3735,9 +3735,27 @@ def convert_objects(self, convert_dates=True, convert_numeric=False,
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
- limit=None, downcast=None):
+ limit=None, downcast=None, errors=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
+ # if a singular fill value is provided, validate it
+ # special case: a DataFrame may be passed to a DataFrame
+ # in that case, short-circuit
+ if value is not None and not (isinstance(value, ABCDataFrame) and
+ isinstance(self, ABCDataFrame)):
+ # fill values by column, not all at once, to respect dtypes
+ if not isinstance(value, (dict, ABCSeries)) and \
+ isinstance(self, ABCDataFrame):
+ value = {col: value for col in self.columns}
+ try:
+ missing.validate_fill_value(self, value)
+ except TypeError:
+ if errors == 'ignore':
+ return self
+ elif errors == 'raise':
+ raise
+ # if errors == 'coerce' continue
+
if isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
@@ -3756,7 +3774,8 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
- result = self.T.fillna(method=method, limit=limit).T
+ result = self.T.fillna(method=method, limit=limit,
+ errors=errors).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
@@ -3772,7 +3791,8 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
elif self.ndim == 3:
# fill in 2d chunks
- result = dict([(col, s.fillna(method=method, value=value))
+ result = dict([(col, s.fillna(method=method, value=value,
+ errors=errors))
for col, s in self.iteritems()])
new_obj = self._constructor.\
from_dict(result).__finalize__(self)
@@ -3804,7 +3824,8 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
- downcast=downcast)
+ downcast=downcast,
+ errors=errors)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
@@ -3817,12 +3838,14 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
if k not in result:
continue
obj = result[k]
- obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
- return result
+ obj.fillna(v, limit=limit, inplace=True,
+ downcast=downcast, errors=errors)
+ return None if inplace else result
elif not is_list_like(value):
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
- downcast=downcast)
+ downcast=downcast,
+ errors=errors)
elif isinstance(value, DataFrame) and self.ndim == 2:
new_data = self.where(self.notnull(), value)
else:
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f2a7ac76481d4..cc423bd3694b8 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -362,10 +362,13 @@ def apply(self, func, mgr=None, **kwargs):
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
- mgr=None):
+ errors=None, mgr=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
+ if not errors:
+ errors = 'coerce'
+
inplace = validate_bool_kwarg(inplace, 'inplace')
if not self._can_hold_na:
@@ -399,12 +402,16 @@ def fillna(self, value, limit=None, inplace=False, downcast=None,
if not mask.any():
return self if inplace else self.copy()
- # we cannot coerce the underlying object, so
- # make an ObjectBlock
- return self.to_object_block(mgr=mgr).fillna(original_value,
- limit=limit,
- inplace=inplace,
- downcast=False)
+ if errors == 'coerce':
+ # we cannot coerce the underlying object, so
+ # make an ObjectBlock
+ return self.to_object_block(mgr=mgr).fillna(original_value,
+ limit=limit,
+ inplace=inplace,
+ downcast=False,
+ errors='ignore')
+ else: # errors == 'ignore'
+ return self
def _maybe_downcast(self, blocks, downcast=None):
@@ -2132,11 +2139,14 @@ def _try_coerce_result(self, result):
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
- mgr=None):
+ errors=None, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
+ if errors is not None:
+ raise NotImplementedError("specifying error handling for 'fillna' "
+ "has not been implemented yet")
values = self.values if inplace else self.values.copy()
values = self._try_coerce_result(values.fillna(value=value,
@@ -2626,11 +2636,13 @@ def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None,
- mgr=None):
+ errors=None, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
+ if errors is not None:
+ raise NotImplementedError
values = self.values if inplace else self.values.copy()
values = values.fillna(value, downcast=downcast)
return [self.make_block_same_class(values=values,
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 5aabc9d8730dd..8d64e235a962d 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -20,7 +20,8 @@
_ensure_float64)
from pandas.core.dtypes.cast import infer_dtype_from_array
-from pandas.core.dtypes.missing import isnull
+from pandas.core.dtypes.missing import isnull, is_valid_fill_value
+from pandas.core.dtypes.generic import ABCSeries
def mask_missing(arr, values_to_mask):
@@ -634,6 +635,35 @@ def fill_zeros(result, x, y, name, fill):
return result
+def validate_fill_value(obj, value):
+ """
+
+ Fillna error coercion routine.
+
+ Parameters
+ ----------
+ obj : Series of DataFrame
+ The Series or DataFrame for which a fill value is being evaluated.
+ If obj is a DataFrame this method simply returns True (e.g. the fillna
+ operation is allowed to continue) because it will be broken up and
+ parsed as a sequence of sub-Series later on.
+ value : object
+ The value to be used as a fill for the object.
+
+ Returns
+ -------
+ continue : bool
+ Whether or not, based on the values and the error mode, the fill
+ operation ought to continue.
+ """
+ if isinstance(obj, ABCSeries):
+ if not is_valid_fill_value(value, obj.dtype):
+ raise TypeError('"value" parameter must be compatible '
+ 'with the {0} dtype, but you passed a '
+ '"{1}"'.format(obj.dtype,
+ type(value).__name__))
+
+
def _interp_limit(invalid, fw_limit, bw_limit):
"""Get idx of values that won't be filled b/c they exceed the limits.
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index 90993890b7553..9b5fc09218b28 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -2,8 +2,9 @@
from warnings import catch_warnings
import numpy as np
-from datetime import datetime
+from datetime import datetime, timedelta
from pandas.util import testing as tm
+import pytest
import pandas as pd
from pandas.core import config as cf
@@ -14,7 +15,7 @@
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
- na_value_for_dtype)
+ na_value_for_dtype, is_valid_fill_value)
def test_notnull():
@@ -312,3 +313,35 @@ def test_na_value_for_dtype():
for dtype in ['O']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
+
+
+@pytest.mark.parametrize(('value', 'dtype'),
+ [(False, bool), (np.nan, bool),
+ (0, int), (0.0, int), (0j, int), (np.nan, int),
+ (0, float), (0.0, float), (0j, float),
+ (np.nan, float),
+ (0, complex), (0.0, complex), (0j, complex),
+ (np.nan, complex),
+ (False, str), (0, str), (0.0, str), (0j, str),
+ (np.nan, str), ('0', str),
+ (datetime(1970, 1, 1), np.datetime64),
+ (pd.Timestamp('1970-01-01'), np.datetime64),
+ (timedelta(0), np.timedelta64),
+ (pd.Timedelta(0), np.timedelta64)])
+def test_valid_fill_value(value, dtype):
+ assert is_valid_fill_value(value, dtype)
+
+
+@pytest.mark.parametrize(('value', 'dtype'),
+ [(0, bool), (0.0, bool), (0j, bool), ('0', bool),
+ ('0', int),
+ ('0', float),
+ ('0', complex),
+ ('0', np.dtype('datetime64')),
+ (timedelta(0), np.dtype('datetime64')),
+ (pd.Period('1970-01-01'), np.dtype('datetime64')),
+ ('0', np.dtype('timedelta64')),
+ (datetime(1970, 1, 1), np.dtype('timedelta64')),
+ (pd.Period('1970-01-01'), np.dtype('timedelta64'))])
+def test_invalid_fill_value(value, dtype):
+ assert not is_valid_fill_value(value, dtype)
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 77f0357685cab..248d6ea2f019a 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -518,6 +518,205 @@ def test_fill_value_when_combine_const(self):
res = df.add(2, fill_value=0)
assert_frame_equal(res, exp)
+ def test_fillna_error_modes_numeric_fill(self):
+ # Filling numeric/object cols with a numeric
+ df1 = DataFrame({'a': [nan, 1.0],
+ 'b': [nan, True],
+ 'c': [nan, 1],
+ 'd': [nan, 1j],
+ 'e': [nan, 'foo'],})
+ expected = DataFrame({'a': [0.0, 1.0],
+ 'b': [0, True],
+ 'c': [0.0, 1.0],
+ 'd': [0, 1j],
+ 'e': [0, 'foo']})
+
+ result = df1.fillna(0, errors='coerce')
+ assert_frame_equal(result, expected)
+ result = df1.fillna(0, errors='ignore')
+ assert_frame_equal(result, expected)
+ result = df1.fillna(0, errors='raise')
+ assert_frame_equal(result, expected)
+
+ def test_fillna_error_modes_bool_fill(self):
+ # Filling numeric/object cols with a bool
+ df1 = DataFrame({'a': [nan, 1.0],
+ 'b': [nan, True],
+ 'c': [nan, 1],
+ 'd': [nan, 1j],
+ 'e': [nan, 'foo']})
+
+ result = df1.fillna(False, errors='coerce')
+ expected = DataFrame({'a': [0.0, 1.0],
+ 'b': [False, True],
+ 'c': [0.0, 1.0],
+ 'd': [0.0, 1j],
+ 'e': [False, 'foo']})
+ assert_frame_equal(result, expected)
+
+ result = df1.fillna(False, errors='ignore')
+ expected = DataFrame({'a': [nan, 1.0],
+ 'b': [False, True],
+ 'c': [nan, 1.0],
+ 'd': [nan, 1j],
+ 'e': [False, 'foo']})
+ assert_frame_equal(result, expected)
+
+ with pytest.raises(TypeError):
+ df1.fillna(False, errors='raise')
+
+ def test_fillna_error_modes_obj_fill(self):
+ # Filling numeric/object cols with an obj
+ df1 = DataFrame({'a': [nan, 1.0],
+ 'b': [nan, True],
+ 'c': [nan, 1],
+ 'd': [nan, 1j],
+ 'e': [nan, 'foo']})
+
+ result = df1.fillna('bar', errors='coerce')
+ expected = DataFrame({'a': ['bar', 1.0],
+ 'b': ['bar', True],
+ 'c': ['bar', 1.0],
+ 'd': ['bar', 1j],
+ 'e': ['bar', 'foo']})
+ assert_frame_equal(result, expected)
+
+ result = df1.fillna('bar', errors='ignore')
+ expected = DataFrame({'a': [nan, 1.0],
+ 'b': ['bar', True], # col cast to obj!
+ 'c': [nan, 1.0],
+ 'd': [nan, 1j],
+ 'e': ['bar', 'foo']})
+ assert_frame_equal(result, expected)
+
+ with pytest.raises(TypeError):
+ df1.fillna('bar', errors='raise')
+
+ def test_fillna_error_modes_datetime_fill(self):
+ # Filling numeric/object/datetime cols with a datetime
+ timestamp = Timestamp('1970-01-01')
+
+ df1 = DataFrame({'a': [nan, 1.0],
+ 'b': [nan, True],
+ 'c': [nan, 1],
+ 'd': [nan, 1j],
+ 'e': [nan, 'foo'],
+ 'f': [nan, timestamp]})
+
+ result = df1.fillna(Timestamp('1970-01-01'), errors='coerce')
+ expected = DataFrame({'a': [timestamp, 1.0],
+ 'b': [timestamp, True],
+ 'c': [timestamp, 1.0],
+ 'd': [timestamp, 1j],
+ 'e': [timestamp, 'foo'],
+ 'f': [timestamp, timestamp]})
+ assert_frame_equal(result, expected)
+
+ result = df1.fillna(timestamp, errors='ignore')
+ expected = DataFrame({'a': [nan, 1.0],
+ 'b': [timestamp, True], # col cast to obj!
+ 'c': [nan, 1.0],
+ 'd': [nan, 1j],
+ 'e': [timestamp, 'foo'],
+ 'f': [timestamp, timestamp]})
+ assert_frame_equal(result, expected)
+
+ with pytest.raises(TypeError):
+ df1.fillna(Timestamp('1970-01-01'), errors='raise')
+
+ def test_fillna_error_modes_timedelta_fill(self):
+ # Filling numeric/object/timedelta cols with a timedelta
+ timedelta = pd.Timedelta('1 hour')
+
+ df1 = DataFrame({'a': [nan, 1.0],
+ 'b': [nan, True],
+ 'c': [nan, 1],
+ 'd': [nan, 1j],
+ 'e': [nan, 'foo'],
+ 'f': [nan, timedelta]})
+
+ result = df1.fillna(timedelta, errors='coerce')
+ expected = DataFrame({'a': [timedelta, 1.0],
+ 'b': [timedelta, True],
+ 'c': [timedelta, 1.0],
+ 'd': [timedelta, 1j],
+ 'e': [timedelta, 'foo'],
+ 'f': [timedelta, timedelta]})
+ assert_frame_equal(result, expected)
+
+ result = df1.fillna(pd.Timedelta('1 hour'), errors='ignore')
+ expected = DataFrame({'a': [nan, 1.0],
+ 'b': [pd.Timedelta('1 hour'), True], # col cast to obj!
+ 'c': [nan, 1.0],
+ 'd': [nan, 1j],
+ 'e': [pd.Timedelta('1 hour'), 'foo'],
+ 'f': [pd.Timedelta('1 hour'), pd.Timedelta('1 hour')]})
+ assert_frame_equal(result, expected)
+
+ with pytest.raises(TypeError):
+ df1.fillna(Timestamp('1970-01-01'), errors='raise')
+
+ def test_fillna_error_modes_period_fill(self):
+ # Filling numeric/object/period cols with a period
+ period = pd.Period('1 hour')
+
+ df1 = DataFrame({'a': [nan, 1.0],
+ 'b': [nan, True],
+ 'c': [nan, 1],
+ 'd': [nan, 1j],
+ 'e': [nan, 'foo'],
+ 'f': [nan, period]})
+
+ result = df1.fillna(pd.Period('1 hour'), errors='coerce')
+ expected = DataFrame({'a': [period, 1.0],
+ 'b': [period, True],
+ 'c': [period, 1.0],
+ 'd': [period, 1j],
+ 'e': [period, 'foo'],
+ 'f': [period, period]})
+ assert_frame_equal(result, expected)
+
+ result = df1.fillna(period, errors='ignore')
+ expected = DataFrame({'a': [nan, 1.0],
+ 'b': [period, True], # col cast to obj!
+ 'c': [nan, 1.0],
+ 'd': [nan, 1j],
+ 'e': [period, 'foo'],
+ 'f': [period, period]})
+ assert_frame_equal(result, expected)
+
+ with pytest.raises(TypeError):
+ df1.fillna(period, errors='raise')
+
+ def test_fillna_error_modes_time_dtype_interactions(self):
+ timedelta = pd.Timedelta('1 hour')
+ period = pd.Period('1 hour')
+ timestamp = Timestamp('1970-01-01')
+
+ df1 = DataFrame({'a': [nan, timedelta],
+ 'b': [nan, period],
+ 'c': [nan, timestamp]})
+
+ result = df1.fillna(timedelta, errors='ignore')
+ expected = DataFrame({'a': [timedelta, timedelta],
+ 'b': [timedelta, period], # col cast to obj!
+ 'c': [nan, timestamp]})
+ assert_frame_equal(result, expected)
+
+ result = df1.fillna(period, errors='ignore')
+ expected = DataFrame({'a': [nan, timedelta],
+ 'b': [period, period], # col cast to obj!
+ 'c': [nan, timestamp]})
+ assert_frame_equal(result, expected)
+
+ result = df1.fillna(timestamp, errors='ignore')
+ expected = DataFrame({'a': [nan, timedelta],
+ 'b': [timestamp, period], # col cast to obj!
+ 'c': [timestamp, timestamp]})
+ assert_frame_equal(result, expected)
+
+ # TODO: coerce tests.
+
class TestDataFrameInterpolate(TestData):
| - [ ] progress towards #15533
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15653 | 2017-03-11T01:47:44Z | 2017-08-17T10:31:07Z | null | 2017-08-17T10:31:07Z |
MAINT: Remove testing.assert_isinstance | diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py
index 2fb58ef70e3cb..e5cb953cb35a5 100644
--- a/pandas/tests/test_testing.py
+++ b/pandas/tests/test_testing.py
@@ -765,9 +765,6 @@ def test_warning(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertNotAlmostEquals(1, 2)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- tm.assert_isinstance(Series([1, 2]), Series, msg='xxx')
-
class TestLocale(tm.TestCase):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index ec30a9376a9da..74ff480a9c198 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -991,11 +991,6 @@ def assertIsInstance(obj, cls, msg=''):
raise AssertionError(err_msg.format(msg, cls, type(obj)))
-def assert_isinstance(obj, class_type_or_tuple, msg=''):
- return deprecate('assert_isinstance', assertIsInstance)(
- obj, class_type_or_tuple, msg=msg)
-
-
def assertNotIsInstance(obj, cls, msg=''):
"""Test that obj is not an instance of cls
(which can be a class or a tuple of classes,
| Deprecated in 0.17.0
xref #10458
| https://api.github.com/repos/pandas-dev/pandas/pulls/15652 | 2017-03-11T01:15:31Z | 2017-03-11T17:24:11Z | 2017-03-11T17:24:11Z | 2017-03-11T18:57:22Z |
API: Drop DataFrame.iterkv() | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index dd081ea605c01..3be2eb02038a2 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -692,6 +692,7 @@ Other API Changes
- Reorganization of timeseries development tests (:issue:`14854`)
- Specific support for ``copy.copy()`` and ``copy.deepcopy()`` functions on NDFrame objects (:issue:`15444`)
- ``Series.sort_values()`` accepts a one element list of bool for consistency with the behavior of ``DataFrame.sort_values()`` (:issue:`15604`)
+- ``DataFrame.iterkv()`` has been removed in favor of ``DataFrame.iteritems()`` (:issue:`11121`)
.. _whatsnew_0200.deprecations:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a0111cb9ef7ec..1db9677659ca3 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -899,16 +899,6 @@ def iteritems(self):
for h in self._info_axis:
yield h, self[h]
- # originally used to get around 2to3's changes to iteritems.
- # Now unnecessary. Sidenote: don't want to deprecate this for a while,
- # otherwise libraries that use 2to3 will have issues.
- def iterkv(self, *args, **kwargs):
- "iteritems alias used to get around 2to3. Deprecated"
- warnings.warn("iterkv is deprecated and will be removed in a future "
- "release, use ``iteritems`` instead.", FutureWarning,
- stacklevel=2)
- return self.iteritems(*args, **kwargs)
-
def __len__(self):
"""Returns length of info axis"""
return len(self._info_axis)
diff --git a/pandas/tests/frame/test_misc_api.py b/pandas/tests/frame/test_misc_api.py
index 674202980807a..4df9981c746b1 100644
--- a/pandas/tests/frame/test_misc_api.py
+++ b/pandas/tests/frame/test_misc_api.py
@@ -389,10 +389,6 @@ def test_repr_with_mi_nat(self):
exp = ' X\nNaT a 1\n2013-01-01 b 2'
self.assertEqual(res, exp)
- def test_iterkv_deprecation(self):
- with tm.assert_produces_warning(FutureWarning):
- self.mixed_float.iterkv()
-
def test_iterkv_names(self):
for k, v in compat.iteritems(self.mixed_frame):
self.assertEqual(v.name, k)
| Deprecated since 0.17.0
xref #10711
| https://api.github.com/repos/pandas-dev/pandas/pulls/15650 | 2017-03-10T18:47:45Z | 2017-03-10T21:29:31Z | null | 2017-03-10T21:31:07Z |
0.19.x | diff --git a/.gitignore b/.gitignore
index 19f1cc804dca0..a77e780f3332d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,6 +27,7 @@
*.class
*.dll
*.exe
+*.pxi
*.o
*.py[ocd]
*.so
diff --git a/.travis.yml b/.travis.yml
index 4eefd6ca83694..49765c9df96ea 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -34,6 +34,7 @@ matrix:
compiler: clang
osx_image: xcode6.4
env:
+ - PYTHON_VERSION=3.5
- JOB_NAME: "35_osx"
- NOSE_ARGS="not slow and not network and not disabled"
- BUILD_TYPE=conda
@@ -43,6 +44,7 @@ matrix:
- USE_CACHE=true
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "27_slow_nnet_LOCALE"
- NOSE_ARGS="slow and not network and not disabled"
- LOCALE_OVERRIDE="zh_CN.UTF-8"
@@ -56,6 +58,7 @@ matrix:
- language-pack-zh-hans
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "27_nslow"
- NOSE_ARGS="not slow and not disabled"
- FULL_DEPS=true
@@ -69,6 +72,7 @@ matrix:
- python-gtk2
- python: 3.4
env:
+ - PYTHON_VERSION=3.4
- JOB_NAME: "34_nslow"
- NOSE_ARGS="not slow and not disabled"
- FULL_DEPS=true
@@ -81,6 +85,7 @@ matrix:
- xsel
- python: 3.5
env:
+ - PYTHON_VERSION=3.5
- JOB_NAME: "35_nslow"
- NOSE_ARGS="not slow and not network and not disabled"
- FULL_DEPS=true
@@ -95,6 +100,7 @@ matrix:
# In allow_failures
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "27_slow"
- JOB_TAG=_SLOW
- NOSE_ARGS="slow and not network and not disabled"
@@ -104,6 +110,7 @@ matrix:
# In allow_failures
- python: 3.4
env:
+ - PYTHON_VERSION=3.4
- JOB_NAME: "34_slow"
- JOB_TAG=_SLOW
- NOSE_ARGS="slow and not network and not disabled"
@@ -118,6 +125,7 @@ matrix:
# In allow_failures
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "27_build_test_conda"
- JOB_TAG=_BUILD_TEST
- NOSE_ARGS="not slow and not disabled"
@@ -125,9 +133,23 @@ matrix:
- BUILD_TEST=true
- CACHE_NAME="27_build_test_conda"
- USE_CACHE=true
+# In allow_failures
+ - python: 3.6-dev
+ env:
+ - PYTHON_VERSION=3.6
+ - JOB_NAME: "36_dev"
+ - JOB_TAG=_DEV
+ - NOSE_ARGS="not slow and not network and not disabled"
+ - PANDAS_TESTING_MODE="deprecate"
+ addons:
+ apt:
+ packages:
+ - libatlas-base-dev
+ - gfortran
# In allow_failures
- python: 3.5
env:
+ - PYTHON_VERSION=3.5
- JOB_NAME: "35_numpy_dev"
- JOB_TAG=_NUMPY_DEV
- NOSE_ARGS="not slow and not network and not disabled"
@@ -142,6 +164,7 @@ matrix:
# In allow_failures
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "27_nslow_nnet_COMPAT"
- NOSE_ARGS="not slow and not network and not disabled"
- LOCALE_OVERRIDE="it_IT.UTF-8"
@@ -156,6 +179,7 @@ matrix:
# In allow_failures
- python: 3.5
env:
+ - PYTHON_VERSION=3.5
- JOB_NAME: "35_ascii"
- JOB_TAG=_ASCII
- NOSE_ARGS="not slow and not network and not disabled"
@@ -165,6 +189,7 @@ matrix:
# In allow_failures
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "doc_build"
- FULL_DEPS=true
- DOC_BUILD=true
@@ -174,6 +199,7 @@ matrix:
allow_failures:
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "27_slow"
- JOB_TAG=_SLOW
- NOSE_ARGS="slow and not network and not disabled"
@@ -182,6 +208,7 @@ matrix:
- USE_CACHE=true
- python: 3.4
env:
+ - PYTHON_VERSION=3.4
- JOB_NAME: "34_slow"
- JOB_TAG=_SLOW
- NOSE_ARGS="slow and not network and not disabled"
@@ -195,6 +222,7 @@ matrix:
- xsel
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "27_build_test_conda"
- JOB_TAG=_BUILD_TEST
- NOSE_ARGS="not slow and not disabled"
@@ -202,14 +230,27 @@ matrix:
- BUILD_TEST=true
- CACHE_NAME="27_build_test_conda"
- USE_CACHE=true
- - python: 3.5
+ - python: 3.6-dev
env:
- - JOB_NAME: "35_numpy_dev"
- - JOB_TAG=_NUMPY_DEV
+ - PYTHON_VERSION=3.6
+ - JOB_NAME: "36_dev"
+ - JOB_TAG=_DEV
- NOSE_ARGS="not slow and not network and not disabled"
- PANDAS_TESTING_MODE="deprecate"
- - CACHE_NAME="35_numpy_dev"
- - USE_CACHE=true
+ addons:
+ apt:
+ packages:
+ - libatlas-base-dev
+ - gfortran
+ - python: 3.5
+ env:
+ - PYTHON_VERSION=3.5
+ - JOB_NAME: "35_numpy_dev"
+ - JOB_TAG=_NUMPY_DEV
+ - NOSE_ARGS="not slow and not network and not disabled"
+ - PANDAS_TESTING_MODE="deprecate"
+ - CACHE_NAME="35_numpy_dev"
+ - USE_CACHE=true
addons:
apt:
packages:
@@ -217,6 +258,7 @@ matrix:
- gfortran
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "27_nslow_nnet_COMPAT"
- NOSE_ARGS="not slow and not network and not disabled"
- LOCALE_OVERRIDE="it_IT.UTF-8"
@@ -230,6 +272,7 @@ matrix:
- language-pack-it
- python: 3.5
env:
+ - PYTHON_VERSION=3.5
- JOB_NAME: "35_ascii"
- JOB_TAG=_ASCII
- NOSE_ARGS="not slow and not network and not disabled"
@@ -238,6 +281,7 @@ matrix:
- USE_CACHE=true
- python: 2.7
env:
+ - PYTHON_VERSION=2.7
- JOB_NAME: "doc_build"
- FULL_DEPS=true
- DOC_BUILD=true
@@ -249,7 +293,7 @@ before_install:
- echo "before_install"
- source ci/travis_process_gbq_encryption.sh
- echo $VIRTUAL_ENV
- - export PATH="$HOME/miniconda/bin:$PATH"
+ - export PATH="$HOME/miniconda3/bin:$PATH"
- df -h
- date
- pwd
diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py
index 6eac7b4831f0f..53b7d55368f6a 100644
--- a/asv_bench/benchmarks/algorithms.py
+++ b/asv_bench/benchmarks/algorithms.py
@@ -1,5 +1,6 @@
import numpy as np
import pandas as pd
+from pandas.util import testing as tm
class algorithm(object):
@@ -15,6 +16,14 @@ def setup(self):
self.int = pd.Int64Index(np.arange(N).repeat(5))
self.float = pd.Float64Index(np.random.randn(N).repeat(5))
+ # Convenience naming.
+ self.checked_add = pd.core.nanops._checked_add_with_arr
+
+ self.arr = np.arange(1000000)
+ self.arrpos = np.arange(1000000)
+ self.arrneg = np.arange(-1000000, 0)
+ self.arrmixed = np.array([1, -1]).repeat(500000)
+
def time_int_factorize(self):
self.int.factorize()
@@ -29,3 +38,53 @@ def time_int_duplicated(self):
def time_float_duplicated(self):
self.float.duplicated()
+
+ def time_add_overflow_pos_scalar(self):
+ self.checked_add(self.arr, 1)
+
+ def time_add_overflow_neg_scalar(self):
+ self.checked_add(self.arr, -1)
+
+ def time_add_overflow_zero_scalar(self):
+ self.checked_add(self.arr, 0)
+
+ def time_add_overflow_pos_arr(self):
+ self.checked_add(self.arr, self.arrpos)
+
+ def time_add_overflow_neg_arr(self):
+ self.checked_add(self.arr, self.arrneg)
+
+ def time_add_overflow_mixed_arr(self):
+ self.checked_add(self.arr, self.arrmixed)
+
+
+class hashing(object):
+ goal_time = 0.2
+
+ def setup(self):
+ N = 100000
+
+ self.df = pd.DataFrame(
+ {'A': pd.Series(tm.makeStringIndex(100).take(
+ np.random.randint(0, 100, size=N))),
+ 'B': pd.Series(tm.makeStringIndex(10000).take(
+ np.random.randint(0, 10000, size=N))),
+ 'D': np.random.randn(N),
+ 'E': np.arange(N),
+ 'F': pd.date_range('20110101', freq='s', periods=N),
+ 'G': pd.timedelta_range('1 day', freq='s', periods=N),
+ })
+ self.df['C'] = self.df['B'].astype('category')
+ self.df.iloc[10:20] = np.nan
+
+ def time_frame(self):
+ self.df.hash()
+
+ def time_series_int(self):
+ self.df.E.hash()
+
+ def time_series_string(self):
+ self.df.B.hash()
+
+ def time_series_categorical(self):
+ self.df.C.hash()
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index a21dee2e612d2..df73a474b2683 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -1012,3 +1012,14 @@ def setup(self):
def time_frame_quantile_axis1(self):
self.df.quantile([0.1, 0.5], axis=1)
+
+
+class frame_nlargest(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.df = DataFrame(np.random.randn(1000, 3),
+ columns=list('ABC'))
+
+ def time_frame_nlargest(self):
+ self.df.nlargest(100, 'A')
diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py
index 75b2c2dcacfed..bed2c4d5309cd 100644
--- a/asv_bench/benchmarks/period.py
+++ b/asv_bench/benchmarks/period.py
@@ -47,3 +47,28 @@ def time_period_index_value_counts(self):
self.i.value_counts()
+class period_standard_indexing(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.index = PeriodIndex(start='1985', periods=1000, freq='D')
+ self.series = Series(range(1000), index=self.index)
+ self.period = self.index[500]
+
+ def time_get_loc(self):
+ self.index.get_loc(self.period)
+
+ def time_shape(self):
+ self.index.shape
+
+ def time_shallow_copy(self):
+ self.index._shallow_copy()
+
+ def time_series_loc(self):
+ self.series.loc[self.period]
+
+ def time_align(self):
+ pd.DataFrame({'a': self.series, 'b': self.series[:500]})
+
+ def time_intersection(self):
+ self.index[:750].intersection(self.index[250:])
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py
index 869ddd8d6fa49..66b8af53801ac 100644
--- a/asv_bench/benchmarks/replace.py
+++ b/asv_bench/benchmarks/replace.py
@@ -32,6 +32,30 @@ def time_replace_large_dict(self):
self.s.replace(self.to_rep, inplace=True)
+class replace_convert(object):
+ goal_time = 0.5
+
+ def setup(self):
+ self.n = (10 ** 3)
+ self.to_ts = dict(((i, pd.Timestamp(i)) for i in range(self.n)))
+ self.to_td = dict(((i, pd.Timedelta(i)) for i in range(self.n)))
+ self.s = Series(np.random.randint(self.n, size=(10 ** 3)))
+ self.df = DataFrame({'A': np.random.randint(self.n, size=(10 ** 3)),
+ 'B': np.random.randint(self.n, size=(10 ** 3))})
+
+ def time_replace_series_timestamp(self):
+ self.s.replace(self.to_ts)
+
+ def time_replace_series_timedelta(self):
+ self.s.replace(self.to_td)
+
+ def time_replace_frame_timestamp(self):
+ self.df.replace(self.to_ts)
+
+ def time_replace_frame_timedelta(self):
+ self.df.replace(self.to_td)
+
+
class replace_replacena(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index 4e368c6d7cde2..413c4e044fd3a 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -8,13 +8,28 @@ def setup(self):
self.dr = pd.date_range(
start=datetime(2015,10,26),
end=datetime(2016,1,1),
- freq='10s'
- ) # ~500k long
+ freq='50s'
+ ) # ~100k long
def time_series_constructor_no_data_datetime_index(self):
Series(data=None, index=self.dr)
+class series_constructor_dict_data_datetime_index(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.dr = pd.date_range(
+ start=datetime(2015, 10, 26),
+ end=datetime(2016, 1, 1),
+ freq='50s'
+ ) # ~100k long
+ self.data = {d: v for d, v in zip(self.dr, range(len(self.dr)))}
+
+ def time_series_constructor_no_data_datetime_index(self):
+ Series(data=self.data, index=self.dr)
+
+
class series_isin_int64(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py
index 9719fd87dfb2e..8470525dd01fa 100644
--- a/asv_bench/benchmarks/timedelta.py
+++ b/asv_bench/benchmarks/timedelta.py
@@ -1,5 +1,5 @@
from .pandas_vb_common import *
-from pandas import to_timedelta
+from pandas import to_timedelta, Timestamp
class timedelta_convert_int(object):
@@ -47,3 +47,14 @@ def time_timedelta_convert_coerce(self):
def time_timedelta_convert_ignore(self):
to_timedelta(self.arr, errors='ignore')
+
+
+class timedelta_add_overflow(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.td = to_timedelta(np.arange(1000000))
+ self.ts = Timestamp('2000')
+
+ def test_add_td_ts(self):
+ self.td + self.ts
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index fda6ebb4b437e..8c00924cb07ef 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -284,56 +284,77 @@ class timeseries_asof(object):
goal_time = 0.2
def setup(self):
- self.N = 100000
- self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
- if hasattr(Series, 'convert'):
- Series.resample = Series.convert
- self.ts = Series(np.random.randn(self.N), index=self.rng)
self.N = 10000
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
- self.ts = Series(np.random.randn(self.N), index=self.rng)
self.dates = date_range(start='1/1/1990', periods=(self.N * 10), freq='5s')
+ self.ts = Series(np.random.randn(self.N), index=self.rng)
+ self.ts2 = self.ts.copy()
+ self.ts2[250:5000] = np.nan
+ self.ts3 = self.ts.copy()
+ self.ts3[-5000:] = np.nan
- def time_timeseries_asof(self):
+ # test speed of pre-computing NAs.
+ def time_asof_list(self):
self.ts.asof(self.dates)
+ # should be roughly the same as above.
+ def time_asof_nan_list(self):
+ self.ts2.asof(self.dates)
-class timeseries_asof_nan(object):
- goal_time = 0.2
+ # test speed of the code path for a scalar index
+ # without *while* loop
+ def time_asof_single(self):
+ self.ts.asof(self.dates[0])
- def setup(self):
- self.N = 100000
- self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
- if hasattr(Series, 'convert'):
- Series.resample = Series.convert
- self.ts = Series(np.random.randn(self.N), index=self.rng)
- self.N = 10000
- self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
- self.ts = Series(np.random.randn(self.N), index=self.rng)
- self.dates = date_range(start='1/1/1990', periods=(self.N * 10), freq='5s')
- self.ts[250:5000] = np.nan
+ # test speed of the code path for a scalar index
+ # before the start. should be the same as above.
+ def time_asof_single_early(self):
+ self.ts.asof(self.dates[0] - dt.timedelta(10))
- def time_timeseries_asof_nan(self):
- self.ts.asof(self.dates)
+ # test the speed of the code path for a scalar index
+ # with a long *while* loop. should still be much
+ # faster than pre-computing all the NAs.
+ def time_asof_nan_single(self):
+ self.ts3.asof(self.dates[-1])
-class timeseries_asof_single(object):
+class timeseries_dataframe_asof(object):
goal_time = 0.2
def setup(self):
- self.N = 100000
- self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
- if hasattr(Series, 'convert'):
- Series.resample = Series.convert
- self.ts = Series(np.random.randn(self.N), index=self.rng)
self.N = 10000
+ self.M = 100
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
- self.ts = Series(np.random.randn(self.N), index=self.rng)
self.dates = date_range(start='1/1/1990', periods=(self.N * 10), freq='5s')
+ self.ts = DataFrame(np.random.randn(self.N, self.M), index=self.rng)
+ self.ts2 = self.ts.copy()
+ self.ts2.iloc[250:5000] = np.nan
+ self.ts3 = self.ts.copy()
+ self.ts3.iloc[-5000:] = np.nan
+
+ # test speed of pre-computing NAs.
+ def time_asof_list(self):
+ self.ts.asof(self.dates)
- def time_timeseries_asof_single(self):
+ # should be roughly the same as above.
+ def time_asof_nan_list(self):
+ self.ts2.asof(self.dates)
+
+ # test speed of the code path for a scalar index
+ # with pre-computing all NAs.
+ def time_asof_single(self):
self.ts.asof(self.dates[0])
+ # should be roughly the same as above.
+ def time_asof_nan_single(self):
+ self.ts3.asof(self.dates[-1])
+
+ # test speed of the code path for a scalar index
+ # before the start. should be without the cost of
+ # pre-computing all the NAs.
+ def time_asof_single_early(self):
+ self.ts.asof(self.dates[0] - dt.timedelta(10))
+
class timeseries_custom_bday_apply(object):
goal_time = 0.2
diff --git a/ci/install-2.7_NUMPY_DEV.sh b/ci/install-2.7_NUMPY_DEV.sh
deleted file mode 100644
index 22ac8f6547879..0000000000000
--- a/ci/install-2.7_NUMPY_DEV.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-source activate pandas
-
-echo "install numpy master wheel"
-
-# remove the system installed numpy
-pip uninstall numpy -y
-
-# we need these for numpy
-
-# these wheels don't play nice with the conda libgfortran / openblas
-# time conda install -n pandas libgfortran openblas || exit 1
-
-# install numpy wheel from master
-pip install --pre --upgrade --no-index --timeout=60 --trusted-host travis-dev-wheels.scipy.org -f http://travis-dev-wheels.scipy.org/ numpy
-
-true
diff --git a/ci/install-3.6_DEV.sh b/ci/install-3.6_DEV.sh
new file mode 100644
index 0000000000000..0b95f1cd45cad
--- /dev/null
+++ b/ci/install-3.6_DEV.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+echo "install 3.6 dev"
+
+conda config --set add_pip_as_python_dependency false
+conda create -n pandas python=3.6 -c conda-forge/label/prerelease
+
+source activate pandas
+
+# ensure we have pip
+python -m ensurepip
+
+# install deps
+pip3.6 install nose cython numpy pytz python-dateutil
+
+true
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index 98ce36acc096e..bdd2c01f611b2 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -31,10 +31,7 @@ edit_init
home_dir=$(pwd)
echo "home_dir: [$home_dir]"
-python_major_version="${TRAVIS_PYTHON_VERSION:0:1}"
-[ "$python_major_version" == "2" ] && python_major_version=""
-
-MINICONDA_DIR="$HOME/miniconda"
+MINICONDA_DIR="$HOME/miniconda3"
if [ -d "$MINICONDA_DIR" ] && [ -e "$MINICONDA_DIR/bin/conda" ] && [ "$USE_CACHE" ]; then
echo "Miniconda install already present from cache: $MINICONDA_DIR"
@@ -63,9 +60,9 @@ else
rm -rf "$MINICONDA_DIR"
# install miniconda
if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
- wget http://repo.continuum.io/miniconda/Miniconda-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1
+ wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1
else
- wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh || exit 1
+ wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1
fi
bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1
@@ -84,21 +81,25 @@ else
# Useful for debugging any issues with conda
conda info -a || exit 1
-
- time conda create -n pandas python=$TRAVIS_PYTHON_VERSION nose coverage flake8 || exit 1
-
fi
-# build deps
-REQ="ci/requirements-${TRAVIS_PYTHON_VERSION}${JOB_TAG}.build"
-# may have additional installation instructions for this build
-INSTALL="ci/install-${TRAVIS_PYTHON_VERSION}${JOB_TAG}.sh"
+# may have installation instructions for this build
+INSTALL="ci/install-${PYTHON_VERSION}${JOB_TAG}.sh"
if [ -e ${INSTALL} ]; then
time bash $INSTALL || exit 1
+else
+
+ # create new env
+ time conda create -n pandas python=$PYTHON_VERSION nose coverage flake8 || exit 1
fi
+# build deps
+REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.build"
+
# install deps
-time conda install -n pandas --file=${REQ} || exit 1
+if [ -e ${REQ} ]; then
+ time conda install -n pandas --file=${REQ} || exit 1
+fi
source activate pandas
@@ -106,7 +107,7 @@ if [ "$BUILD_TEST" ]; then
# build testing
pip uninstall --yes cython
- pip install cython==0.15.1
+ pip install cython==0.19.1
( python setup.py build_ext --inplace && python setup.py develop ) || true
else
@@ -117,14 +118,22 @@ else
# we may have run installations
echo "conda installs"
- REQ="ci/requirements-${TRAVIS_PYTHON_VERSION}${JOB_TAG}.run"
- time conda install -n pandas --file=${REQ} || exit 1
+ REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.run"
+ if [ -e ${REQ} ]; then
+ time conda install -n pandas --file=${REQ} || exit 1
+ fi
# we may have additional pip installs
echo "pip installs"
- REQ="ci/requirements-${TRAVIS_PYTHON_VERSION}${JOB_TAG}.pip"
+ REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.pip"
if [ -e ${REQ} ]; then
- pip install --upgrade -r $REQ
+ pip install --upgrade -r $REQ
+ fi
+
+ # may have addtl installation instructions for this build
+ REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.sh"
+ if [ -e ${REQ} ]; then
+ time bash $REQ || exit 1
fi
# remove any installed pandas package
@@ -138,9 +147,5 @@ else
fi
-if [ "$JOB_NAME" == "34_slow" ]; then
- conda install -c conda-forge/label/rc -c conda-forge matplotlib
-fi
-
echo "done"
exit 0
diff --git a/ci/lint.sh b/ci/lint.sh
index a866b04445f96..d6390a16b763e 100755
--- a/ci/lint.sh
+++ b/ci/lint.sh
@@ -7,27 +7,27 @@ source activate pandas
RET=0
if [ "$LINT" ]; then
- echo "Linting"
- for path in 'api' 'core' 'indexes' 'types' 'formats' 'io' 'stats' 'compat' 'sparse' 'tools' 'tseries' 'tests' 'computation' 'util'
- do
- echo "linting -> pandas/$path"
- flake8 pandas/$path --filename '*.py'
- if [ $? -ne "0" ]; then
- RET=1
- fi
-
- done
+ # pandas/rpy is deprecated and will be removed.
+ # pandas/src is C code, so no need to search there.
+ echo "Linting *.py"
+ flake8 pandas --filename=*.py --exclude pandas/rpy,pandas/src
+ if [ $? -ne "0" ]; then
+ RET=1
+ fi
echo "Linting *.py DONE"
echo "Linting *.pyx"
- flake8 pandas --filename '*.pyx' --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126
+ flake8 pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126
+ if [ $? -ne "0" ]; then
+ RET=1
+ fi
echo "Linting *.pyx DONE"
echo "Linting *.pxi.in"
for path in 'src'
do
echo "linting -> pandas/$path"
- flake8 pandas/$path --filename '*.pxi.in' --select=E501,E302,E203,E111,E114,E221,E303,E231,E126
+ flake8 pandas/$path --filename=*.pxi.in --select=E501,E302,E203,E111,E114,E221,E303,E231,E126
if [ $? -ne "0" ]; then
RET=1
fi
diff --git a/ci/requirements-2.7_NUMPY_DEV.build b/ci/requirements-2.7_NUMPY_DEV.build
deleted file mode 100644
index d15edbfa3d2c1..0000000000000
--- a/ci/requirements-2.7_NUMPY_DEV.build
+++ /dev/null
@@ -1,3 +0,0 @@
-python-dateutil
-pytz
-cython
diff --git a/ci/requirements-2.7_NUMPY_DEV.run b/ci/requirements-2.7_NUMPY_DEV.run
deleted file mode 100644
index 0aa987baefb1d..0000000000000
--- a/ci/requirements-2.7_NUMPY_DEV.run
+++ /dev/null
@@ -1,2 +0,0 @@
-python-dateutil
-pytz
diff --git a/ci/requirements-3.4.build b/ci/requirements-3.4.build
index 4a4bd9d433428..e6e59dcba63fe 100644
--- a/ci/requirements-3.4.build
+++ b/ci/requirements-3.4.build
@@ -1,3 +1,3 @@
numpy=1.8.1
-cython
+cython=0.24.1
libgfortran=1.0
diff --git a/ci/requirements-3.4_SLOW.sh b/ci/requirements-3.4_SLOW.sh
new file mode 100644
index 0000000000000..bc8fb79147d2c
--- /dev/null
+++ b/ci/requirements-3.4_SLOW.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+source activate pandas
+
+echo "install 34_slow"
+
+conda install -n pandas -c conda-forge/label/rc -c conda-forge matplotlib
diff --git a/ci/install-3.5_NUMPY_DEV.sh b/ci/requirements-3.5_NUMPY_DEV.sh
similarity index 100%
rename from ci/install-3.5_NUMPY_DEV.sh
rename to ci/requirements-3.5_NUMPY_DEV.sh
diff --git a/ci/requirements-3.5_OSX.pip b/ci/requirements-3.5_OSX.pip
index 8a7f51f1bea9c..d1fc1fe24a079 100644
--- a/ci/requirements-3.5_OSX.pip
+++ b/ci/requirements-3.5_OSX.pip
@@ -1 +1 @@
-python-dateutil>=2.5.0
+python-dateutil==2.5.3
diff --git a/ci/travis_encrypt_gbq.sh b/ci/travis_encrypt_gbq.sh
index 719db67f384e0..e404ca73a405e 100755
--- a/ci/travis_encrypt_gbq.sh
+++ b/ci/travis_encrypt_gbq.sh
@@ -1,11 +1,10 @@
#!/bin/bash
GBQ_JSON_FILE=$1
-GBQ_PROJECT_ID=$2
-if [[ $# -ne 2 ]]; then
+if [[ $# -ne 1 ]]; then
echo -e "Too few arguments.\nUsage: ./travis_encrypt_gbq.sh "\
- "<gbq-json-credentials-file> <gbq-project-id>"
+ "<gbq-json-credentials-file>"
exit 1
fi
@@ -23,9 +22,9 @@ echo "Encrypting $GBQ_JSON_FILE..."
read -d "\n" TRAVIS_KEY TRAVIS_IV <<<$(travis encrypt-file $GBQ_JSON_FILE \
travis_gbq.json.enc -f | grep -o "\w*_iv\|\w*_key");
-echo "Adding your secure key and project id to travis_gbq_config.txt ..."
-echo -e "TRAVIS_IV_ENV=$TRAVIS_IV\nTRAVIS_KEY_ENV=$TRAVIS_KEY\n"\
-"GBQ_PROJECT_ID='$GBQ_PROJECT_ID'" > travis_gbq_config.txt
+echo "Adding your secure key to travis_gbq_config.txt ..."
+echo -e "TRAVIS_IV_ENV=$TRAVIS_IV\nTRAVIS_KEY_ENV=$TRAVIS_KEY"\
+> travis_gbq_config.txt
echo "Done. Removing file $GBQ_JSON_FILE"
rm $GBQ_JSON_FILE
diff --git a/ci/travis_gbq_config.txt b/ci/travis_gbq_config.txt
index 3b68d62f177cc..0b28cdedbd0d7 100644
--- a/ci/travis_gbq_config.txt
+++ b/ci/travis_gbq_config.txt
@@ -1,3 +1,2 @@
TRAVIS_IV_ENV=encrypted_1d9d7b1f171b_iv
TRAVIS_KEY_ENV=encrypted_1d9d7b1f171b_key
-GBQ_PROJECT_ID='pandas-travis'
diff --git a/ci/travis_process_gbq_encryption.sh b/ci/travis_process_gbq_encryption.sh
index 7ff4c08f78e37..9967d40e49f0a 100755
--- a/ci/travis_process_gbq_encryption.sh
+++ b/ci/travis_process_gbq_encryption.sh
@@ -2,10 +2,12 @@
source ci/travis_gbq_config.txt
-if [[ -n ${!TRAVIS_IV_ENV} ]]; then
+if [[ -n ${SERVICE_ACCOUNT_KEY} ]]; then
+ echo "${SERVICE_ACCOUNT_KEY}" > ci/travis_gbq.json;
+elif [[ -n ${!TRAVIS_IV_ENV} ]]; then
openssl aes-256-cbc -K ${!TRAVIS_KEY_ENV} -iv ${!TRAVIS_IV_ENV} \
-in ci/travis_gbq.json.enc -out ci/travis_gbq.json -d;
- export GBQ_PROJECT_ID=$GBQ_PROJECT_ID;
+ export GBQ_PROJECT_ID='pandas-travis';
echo 'Successfully decrypted gbq credentials'
fi
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf
new file mode 100644
index 0000000000000..a2b222c683564
Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf differ
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx
new file mode 100644
index 0000000000000..5202256006ddf
Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx differ
diff --git a/doc/cheatsheet/README.txt b/doc/cheatsheet/README.txt
new file mode 100644
index 0000000000000..e2f6ec042e9cc
--- /dev/null
+++ b/doc/cheatsheet/README.txt
@@ -0,0 +1,4 @@
+The Pandas Cheat Sheet was created using Microsoft Powerpoint 2013.
+To create the PDF version, within Powerpoint, simply do a "Save As"
+and pick "PDF' as the format.
+
diff --git a/doc/source/api.rst b/doc/source/api.rst
index a510f663d19ee..638abd5421862 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -27,6 +27,7 @@ Flat File
read_table
read_csv
read_fwf
+ read_msgpack
Clipboard
~~~~~~~~~
diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index 1414d2dd3c8dc..d727424750be5 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -214,6 +214,11 @@ computing common *window* or *rolling* statistics. Among these are count, sum,
mean, median, correlation, variance, covariance, standard deviation, skewness,
and kurtosis.
+Starting in version 0.18.1, the ``rolling()`` and ``expanding()``
+functions can be used directly from DataFrameGroupBy objects,
+see the :ref:`groupby docs <groupby.transform.window_resample>`.
+
+
.. note::
The API for window statistics is quite similar to the way one works with ``GroupBy`` objects, see the documentation :ref:`here <groupby>`
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index a8a47a9d979c0..44ee6223d5ee1 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -643,20 +643,22 @@ on Travis-CI and are only accessible from the pandas-dev/pandas repository. The
credentials won't be available on forks of pandas. Here are the steps to run
gbq integration tests on a forked repository:
-#. First, complete all the steps in the `Encrypting Files Prerequisites
- <https://docs.travis-ci.com/user/encrypting-files/>`__ section.
-#. Sign into `Travis <https://travis-ci.org/>`__ using your GitHub account.
-#. Enable your forked repository of pandas for testing in `Travis
- <https://travis-ci.org/profile/>`__.
-#. Run the following command from terminal where the current working directory
- is the ``ci`` folder::
-
- ./travis_encrypt_gbq.sh <gbq-json-credentials-file> <gbq-project-id>
-
-#. Create a new branch from the branch used in your pull request. Commit the
- encrypted file called ``travis_gbq.json.enc`` as well as the file
- ``travis_gbq_config.txt``, in an otherwise empty commit. DO NOT commit the
- ``*.json`` file which contains your unencrypted private key.
+#. Go to `Travis CI <https://travis-ci.org/>`__ and sign in with your GitHub
+ account.
+#. Click on the ``+`` icon next to the ``My Repositories`` list and enable
+ Travis builds for your fork.
+#. Click on the gear icon to edit your travis build, and add two environment
+ variables:
+
+ - ``GBQ_PROJECT_ID`` with the value being the ID of your BigQuery project.
+
+ - ``SERVICE_ACCOUNT_KEY`` with the value being the contents of the JSON key
+ that you downloaded for your service account. Use single quotes around
+ your JSON key to ensure that it is treated as a string.
+
+ For both environment variables, keep the "Display value in build log" option
+ DISABLED. These variables contain sensitive data and you do not want their
+ contents being exposed in build logs.
#. Your branch should be tested automatically once it is pushed. You can check
the status by visiting your Travis branches page which exists at the
following location: https://travis-ci.org/your-user-name/pandas/branches .
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index a37b1e89c7cc3..087b265ee83f2 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -143,7 +143,7 @@ both "column wise min/max and global min/max coloring."
API
-----
-`pandas-datareader <https://github.com/pandas-dev/pandas-datareader>`__
+`pandas-datareader <https://github.com/pydata/pandas-datareader>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``pandas-datareader`` is a remote data access library for pandas. ``pandas.io`` from pandas < 0.17.0 is now refactored/split-off to and importable from ``pandas_datareader`` (PyPI:``pandas-datareader``). Many/most of the supported APIs have at least a documentation paragraph in the `pandas-datareader docs <https://pandas-datareader.readthedocs.io/en/latest/>`_:
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index d23e0ca59254d..b96660be97d71 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -111,5 +111,5 @@ Visualizing Data in Qt applications
-----------------------------------
There is no support for such visualization in pandas. However, the external
-package `pandas-qt <https://github.com/datalyze-solutions/pandas-qt>`_ does
+package `qtpandas <https://github.com/draperjames/qtpandas>`_ does
provide this functionality.
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index c5a77770085d6..f3fcd6901a440 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -614,6 +614,54 @@ and that the transformed data contains no NAs.
grouped.ffill()
+
+.. _groupby.transform.window_resample:
+
+New syntax to window and resample operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. versionadded:: 0.18.1
+
+Working with the resample, expanding or rolling operations on the groupby
+level used to require the application of helper functions. However,
+now it is possible to use ``resample()``, ``expanding()`` and
+``rolling()`` as methods on groupbys.
+
+The example below will apply the ``rolling()`` method on the samples of
+the column B based on the groups of column A.
+
+.. ipython:: python
+
+ df_re = pd.DataFrame({'A': [1] * 10 + [5] * 10,
+ 'B': np.arange(20)})
+ df_re
+
+ df_re.groupby('A').rolling(4).B.mean()
+
+
+The ``expanding()`` method will accumulate a given operation
+(``sum()`` in the example) for all the members of each particular
+group.
+
+.. ipython:: python
+
+ df_re.groupby('A').expanding().sum()
+
+
+Suppose you want to use the ``resample()`` method to get a daily
+frequency in each group of your dataframe and wish to complete the
+missing values with the ``ffill()`` method.
+
+.. ipython:: python
+
+ df_re = pd.DataFrame({'date': pd.date_range(start='2016-01-01',
+ periods=4,
+ freq='W'),
+ 'group': [1, 1, 2, 2],
+ 'val': [5, 6, 7, 8]}).set_index('date')
+ df_re
+
+ df_re.groupby('group').resample('1D').ffill()
+
.. _groupby.filter:
Filtration
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 923c22aa9048f..55b6b5fa69efb 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -18,7 +18,7 @@ Instructions for installing from source,
Python version support
----------------------
-Officially Python 2.7, 3.4, and 3.5
+Officially Python 2.7, 3.4, 3.5, and 3.6
Installing pandas
-----------------
diff --git a/doc/source/io.rst b/doc/source/io.rst
index ae71587c8b46b..ba1bd328d2991 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2789,7 +2789,7 @@ both on the writing (serialization), and reading (deserialization).
| 0.17 / Python 3 | >=0.18 / any Python |
+----------------------+------------------------+
| 0.18 | >= 0.18 |
- +======================+========================+
+ +----------------------+------------------------+
Reading (files packed by older versions) is backward-compatibile, except for files packed with 0.17 in Python 2, in which case only they can only be unpacked in Python 2.
diff --git a/doc/source/release.rst b/doc/source/release.rst
index d210065f04459..a0aa7e032fcf6 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -37,6 +37,104 @@ analysis / manipulation tool available in any language.
* Binary installers on PyPI: http://pypi.python.org/pypi/pandas
* Documentation: http://pandas.pydata.org
+
+pandas 0.19.2
+-------------
+
+**Release date:** December 24, 2016
+
+This is a minor bug-fix release in the 0.19.x series and includes some small regression fixes,
+bug fixes and performance improvements.
+
+Highlights include:
+
+- Compatibility with Python 3.6
+- Added a `Pandas Cheat Sheet <https://github.com/pandas-dev/pandas/tree/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf>`__. (:issue:`13202`).
+
+See the :ref:`v0.19.1 Whatsnew <whatsnew_0192>` page for an overview of all
+bugs that have been fixed in 0.19.2.
+
+Thanks
+~~~~~~
+
+- Ajay Saxena
+- Ben Kandel
+- Chris
+- Chris Ham
+- Christopher C. Aycock
+- Daniel Himmelstein
+- Dave Willmer
+- Dr-Irv
+- gfyoung
+- hesham shabana
+- Jeff Carey
+- Jeff Reback
+- Joe Jevnik
+- Joris Van den Bossche
+- Julian Santander
+- Kerby Shedden
+- Keshav Ramaswamy
+- Kevin Sheppard
+- Luca Scarabello
+- Matti Picus
+- Matt Roeschke
+- Maximilian Roos
+- Mykola Golubyev
+- Nate Yoder
+- Nicholas Ver Halen
+- Pawel Kordek
+- Pietro Battiston
+- Rodolfo Fernandez
+- sinhrks
+- Tara Adiseshan
+- Tom Augspurger
+- wandersoncferreira
+- Yaroslav Halchenko
+
+
+pandas 0.19.1
+-------------
+
+**Release date:** November 3, 2016
+
+This is a minor bug-fix release from 0.19.0 and includes some small regression fixes,
+bug fixes and performance improvements.
+
+See the :ref:`v0.19.1 Whatsnew <whatsnew_0191>` page for an overview of all
+bugs that have been fixed in 0.19.1.
+
+Thanks
+~~~~~~
+
+- Adam Chainz
+- Anthonios Partheniou
+- Arash Rouhani
+- Ben Kandel
+- Brandon M. Burroughs
+- Chris
+- chris-b1
+- Chris Warth
+- David Krych
+- dubourg
+- gfyoung
+- Iván Vallés Pérez
+- Jeff Reback
+- Joe Jevnik
+- Jon M. Mease
+- Joris Van den Bossche
+- Josh Owen
+- Keshav Ramaswamy
+- Larry Ren
+- mattrijk
+- Michael Felt
+- paul-mannino
+- Piotr Chromiec
+- Robert Bradshaw
+- Sinhrks
+- Thiago Serafim
+- Tom Bird
+
+
pandas 0.19.0
-------------
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 4132d25e9be48..037dc53540fab 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1287,6 +1287,9 @@ limited to, financial applications.
``.resample()`` is a time-based groupby, followed by a reduction method on each of its groups.
+Starting in version 0.18.1, the ``resample()`` function can be used directly from
+DataFrameGroupBy objects, see the :ref:`groupby docs <groupby.transform.window_resample>`.
+
.. note::
``.resample()`` is similar to using a ``.rolling()`` operation with a time-based offset, see a discussion `here <stats.moments.ts-versus-resampling>`
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst
index 2a1f2cc47d48e..616e1f5c8efc7 100644
--- a/doc/source/whatsnew.rst
+++ b/doc/source/whatsnew.rst
@@ -18,6 +18,8 @@ What's New
These are new features and improvements of note in each release.
+.. include:: whatsnew/v0.19.2.txt
+
.. include:: whatsnew/v0.19.1.txt
.. include:: whatsnew/v0.19.0.txt
diff --git a/doc/source/whatsnew/v0.13.0.txt b/doc/source/whatsnew/v0.13.0.txt
index 0944d849cfafd..6ecd4b487c798 100644
--- a/doc/source/whatsnew/v0.13.0.txt
+++ b/doc/source/whatsnew/v0.13.0.txt
@@ -600,7 +600,7 @@ Enhancements
.. ipython:: python
t = Timestamp('20130101 09:01:02')
- t + pd.datetools.Nano(123)
+ t + pd.tseries.offsets.Nano(123)
- A new method, ``isin`` for DataFrames, which plays nicely with boolean indexing. The argument to ``isin``, what we're comparing the DataFrame to, can be a DataFrame, Series, dict, or array of values. See :ref:`the docs<indexing.basics.indexing_isin>` for more.
diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 2786cb781a9ee..db5bd22393e64 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -1,15 +1,12 @@
.. _whatsnew_0191:
-v0.19.1 (????, 2016)
----------------------
+v0.19.1 (November 3, 2016)
+--------------------------
-This is a minor bug-fix release from 0.19.0 and includes a large number of
-bug fixes along with several new features, enhancements, and performance improvements.
+This is a minor bug-fix release from 0.19.0 and includes some small regression fixes,
+bug fixes and performance improvements.
We recommend that all users upgrade to this version.
-Highlights include:
-
-
.. contents:: What's new in v0.19.1
:local:
:backlinks: none
@@ -20,10 +17,11 @@ Highlights include:
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- - Fixed performance regression in factorization of ``Period`` data (:issue:`14338`)
-
-
-
+- Fixed performance regression in factorization of ``Period`` data (:issue:`14338`)
+- Fixed performance regression in ``Series.asof(where)`` when ``where`` is a scalar (:issue:`14461`)
+- Improved performance in ``DataFrame.asof(where)`` when ``where`` is a scalar (:issue:`14461`)
+- Improved performance in ``.to_json()`` when ``lines=True`` (:issue:`14408`)
+- Improved performance in certain types of `loc` indexing with a MultiIndex (:issue:`14551`).
.. _whatsnew_0191.bug_fixes:
@@ -31,19 +29,33 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
-
-
-
+- Source installs from PyPI will now again work without ``cython`` installed, as in previous versions (:issue:`14204`)
+- Compat with Cython 0.25 for building (:issue:`14496`)
+- Fixed regression where user-provided file handles were closed in ``read_csv`` (c engine) (:issue:`14418`).
+- Fixed regression in ``DataFrame.quantile`` when missing values where present in some columns (:issue:`14357`).
+- Fixed regression in ``Index.difference`` where the ``freq`` of a ``DatetimeIndex`` was incorrectly set (:issue:`14323`)
+- Added back ``pandas.core.common.array_equivalent`` with a deprecation warning (:issue:`14555`).
+- Bug in ``pd.read_csv`` for the C engine in which quotation marks were improperly parsed in skipped rows (:issue:`14459`)
+- Bug in ``pd.read_csv`` for Python 2.x in which Unicode quote characters were no longer being respected (:issue:`14477`)
+- Fixed regression in ``Index.append`` when categorical indices were appended (:issue:`14545`).
+- Fixed regression in ``pd.DataFrame`` where constructor fails when given dict with ``None`` value (:issue:`14381`)
+- Fixed regression in ``DatetimeIndex._maybe_cast_slice_bound`` when index is empty (:issue:`14354`).
- Bug in localizing an ambiguous timezone when a boolean is passed (:issue:`14402`)
-
-
-
-
-
-
-
-
+- Bug in ``TimedeltaIndex`` addition with a Datetime-like object where addition overflow in the negative direction was not being caught (:issue:`14068`, :issue:`14453`)
+- Bug in string indexing against data with ``object`` ``Index`` may raise ``AttributeError`` (:issue:`14424`)
+- Corrrecly raise ``ValueError`` on empty input to ``pd.eval()`` and ``df.query()`` (:issue:`13139`)
+- Bug in ``RangeIndex.intersection`` when result is a empty set (:issue:`14364`).
+- Bug in groupby-transform broadcasting that could cause incorrect dtype coercion (:issue:`14457`)
+- Bug in ``Series.__setitem__`` which allowed mutating read-only arrays (:issue:`14359`).
+- Bug in ``DataFrame.insert`` where multiple calls with duplicate columns can fail (:issue:`14291`)
+- ``pd.merge()`` will raise ``ValueError`` with non-boolean parameters in passed boolean type arguments (:issue:`14434`)
+- Bug in ``Timestamp`` where dates very near the minimum (1677-09) could underflow on creation (:issue:`14415`)
- Bug in ``pd.concat`` where names of the ``keys`` were not propagated to the resulting ``MultiIndex`` (:issue:`14252`)
+- Bug in ``pd.concat`` where ``axis`` cannot take string parameters ``'rows'`` or ``'columns'`` (:issue:`14369`)
+- Bug in ``pd.concat`` with dataframes heterogeneous in length and tuple ``keys`` (:issue:`14438`)
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
- Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`)
- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`)
+- Bug in ``df.groupby`` where ``TypeError`` raised when ``pd.Grouper(key=...)`` is passed in a list (:issue:`14334`)
+- Bug in ``pd.pivot_table`` may raise ``TypeError`` or ``ValueError`` when ``index`` or ``columns``
+ is not scalar and ``values`` is not specified (:issue:`14380`)
\ No newline at end of file
diff --git a/doc/source/whatsnew/v0.19.2.txt b/doc/source/whatsnew/v0.19.2.txt
new file mode 100644
index 0000000000000..722e494c9e614
--- /dev/null
+++ b/doc/source/whatsnew/v0.19.2.txt
@@ -0,0 +1,82 @@
+.. _whatsnew_0192:
+
+v0.19.2 (December 24, 2016)
+---------------------------
+
+This is a minor bug-fix release in the 0.19.x series and includes some small regression fixes,
+bug fixes and performance improvements.
+We recommend that all users upgrade to this version.
+
+Highlights include:
+
+- Compatibility with Python 3.6
+- Added a `Pandas Cheat Sheet <https://github.com/pandas-dev/pandas/tree/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf>`__. (:issue:`13202`).
+
+
+.. contents:: What's new in v0.19.2
+ :local:
+ :backlinks: none
+
+
+.. _whatsnew_0192.enhancements:
+
+Enhancements
+~~~~~~~~~~~~
+
+The ``pd.merge_asof()``, added in 0.19.0, gained some improvements:
+
+- ``pd.merge_asof()`` gained ``left_index``/``right_index`` and ``left_by``/``right_by`` arguments (:issue:`14253`)
+- ``pd.merge_asof()`` can take multiple columns in ``by`` parameter and has specialized dtypes for better performace (:issue:`13936`)
+
+
+.. _whatsnew_0192.performance:
+
+Performance Improvements
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Performance regression with ``PeriodIndex`` (:issue:`14822`)
+- Performance regression in indexing with getitem (:issue:`14930`)
+- Improved performance of ``.replace()`` (:issue:`12745`)
+- Improved performance ``Series`` creation with a datetime index and dictionary data (:issue:`14894`)
+
+
+.. _whatsnew_0192.bug_fixes:
+
+Bug Fixes
+~~~~~~~~~
+- Compat with python 3.6 for pickling of some offsets (:issue:`14685`)
+- Compat with python 3.6 for some indexing exception types (:issue:`14684`, :issue:`14689`)
+- Compat with python 3.6 for deprecation warnings in the test suite (:issue:`14681`)
+- Compat with python 3.6 for Timestamp pickles (:issue:`14689`)
+- Compat with ``dateutil==2.6.0``; segfault reported in the testing suite (:issue:`14621`)
+- Allow ``nanoseconds`` in ``Timestamp.replace`` as a kwarg (:issue:`14621`)
+- Bug in ``pd.read_csv`` in which aliasing was being done for ``na_values`` when passed in as a dictionary (:issue:`14203`)
+- Bug in ``pd.read_csv`` in which column indices for a dict-like ``na_values`` were not being respected (:issue:`14203`)
+- Bug in ``pd.read_csv`` where reading files fails, if the number of headers is equal to the number of lines in the file (:issue:`14515`)
+- Bug in ``pd.read_csv`` for the Python engine in which an unhelpful error message was being raised when multi-char delimiters were not being respected with quotes (:issue:`14582`)
+- Fix bugs (:issue:`14734`, :issue:`13654`) in ``pd.read_sas`` and ``pandas.io.sas.sas7bdat.SAS7BDATReader`` that caused problems when reading a SAS file incrementally.
+- Bug in ``pd.read_csv`` for the Python engine in which an unhelpful error message was being raised when ``skipfooter`` was not being respected by Python's CSV library (:issue:`13879`)
+- Bug in ``.fillna()`` in which timezone aware datetime64 values were incorrectly rounded (:issue:`14872`)
+- Bug in ``.groupby(..., sort=True)`` of a non-lexsorted MultiIndex when grouping with multiple levels (:issue:`14776`)
+- Bug in ``pd.cut`` with negative values and a single bin (:issue:`14652`)
+- Bug in ``pd.to_numeric`` where a 0 was not unsigned on a ``downcast='unsigned'`` argument (:issue:`14401`)
+- Bug in plotting regular and irregular timeseries using shared axes
+ (``sharex=True`` or ``ax.twinx()``) (:issue:`13341`, :issue:`14322`).
+- Bug in not propogating exceptions in parsing invalid datetimes, noted in python 3.6 (:issue:`14561`)
+- Bug in resampling a ``DatetimeIndex`` in local TZ, covering a DST change, which would raise ``AmbiguousTimeError`` (:issue:`14682`)
+- Bug in indexing that transformed ``RecursionError`` into ``KeyError`` or ``IndexingError`` (:issue:`14554`)
+- Bug in ``HDFStore`` when writing a ``MultiIndex`` when using ``data_columns=True`` (:issue:`14435`)
+- Bug in ``HDFStore.append()`` when writing a ``Series`` and passing a ``min_itemsize`` argument containing a value for the ``index`` (:issue:`11412`)
+- Bug when writing to a ``HDFStore`` in ``table`` format with a ``min_itemsize`` value for the ``index`` and without asking to append (:issue:`10381`)
+- Bug in ``Series.groupby.nunique()`` raising an ``IndexError`` for an empty ``Series`` (:issue:`12553`)
+- Bug in ``DataFrame.nlargest`` and ``DataFrame.nsmallest`` when the index had duplicate values (:issue:`13412`)
+- Bug in clipboard functions on linux with python2 with unicode and separators (:issue:`13747`)
+- Bug in clipboard functions on Windows 10 and python 3 (:issue:`14362`, :issue:`12807`)
+- Bug in ``.to_clipboard()`` and Excel compat (:issue:`12529`)
+- Bug in ``DataFrame.combine_first()`` for integer columns (:issue:`14687`).
+- Bug in ``pd.read_csv()`` in which the ``dtype`` parameter was not being respected for empty data (:issue:`14712`)
+- Bug in ``pd.read_csv()`` in which the ``nrows`` parameter was not being respected for large input when using the C engine for parsing (:issue:`7626`)
+- Bug in ``pd.merge_asof()`` could not handle timezone-aware DatetimeIndex when a tolerance was specified (:issue:`14844`)
+- Explicit check in ``to_stata`` and ``StataWriter`` for out-of-range values when writing doubles (:issue:`14618`)
+- Bug in ``.plot(kind='kde')`` which did not drop missing values to generate the KDE Plot, instead generating an empty plot. (:issue:`14821`)
+- Bug in ``unstack()`` if called with a list of column(s) as an argument, regardless of the dtypes of all columns, they get coerced to ``object`` (:issue:`11847`)
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 0354a8046e873..e163939e117f4 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -12,7 +12,7 @@ Highlights include:
Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations <whatsnew_0200.deprecations>` before updating.
-.. contents:: What's new in v0.19.0
+.. contents:: What's new in v0.20.0
:local:
:backlinks: none
diff --git a/pandas/api/tests/test_api.py b/pandas/api/tests/test_api.py
index d4d8b7e4e9747..49aa31c375e25 100644
--- a/pandas/api/tests/test_api.py
+++ b/pandas/api/tests/test_api.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
+import numpy as np
+
import pandas as pd
from pandas.core import common as com
from pandas import api
@@ -184,6 +186,11 @@ def test_deprecation_core_common(self):
for t in self.allowed:
self.check_deprecation(getattr(com, t), getattr(types, t))
+ def test_deprecation_core_common_array_equivalent(self):
+
+ with tm.assert_produces_warning(DeprecationWarning):
+ com.array_equivalent(np.array([1, 2]), np.array([1, 2]))
+
def test_deprecation_core_common_moved(self):
# these are in pandas.types.common
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 1b8930dcae0f1..532f960468204 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -41,6 +41,7 @@
PY2 = sys.version_info[0] == 2
PY3 = (sys.version_info[0] >= 3)
PY35 = (sys.version_info >= (3, 5))
+PY36 = (sys.version_info >= (3, 6))
try:
import __builtin__ as builtins
diff --git a/pandas/computation/eval.py b/pandas/computation/eval.py
index 6c5c631a6bf0e..fffde4d9db867 100644
--- a/pandas/computation/eval.py
+++ b/pandas/computation/eval.py
@@ -233,6 +233,7 @@ def eval(expr, parser='pandas', engine=None, truediv=True,
"""
first_expr = True
if isinstance(expr, string_types):
+ _check_expression(expr)
exprs = [e for e in expr.splitlines() if e != '']
else:
exprs = [expr]
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index f480eae2dd04d..ffa2cb0684b72 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -1891,6 +1891,18 @@ def test_bad_resolver_raises():
yield check_bad_resolver_raises, engine, parser
+def check_empty_string_raises(engine, parser):
+ # GH 13139
+ tm.skip_if_no_ne(engine)
+ with tm.assertRaisesRegexp(ValueError, 'expr cannot be an empty string'):
+ pd.eval('', engine=engine, parser=parser)
+
+
+def test_empty_string_raises():
+ for engine, parser in ENGINES_PARSERS:
+ yield check_empty_string_raises, engine, parser
+
+
def check_more_than_one_expression_raises(engine, parser):
tm.skip_if_no_ne(engine)
with tm.assertRaisesRegexp(SyntaxError,
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 8644d4568e44d..effca6398419e 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -684,11 +684,12 @@ def select_n_slow(dropped, n, keep, method):
_select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest}
-def select_n(series, n, keep, method):
- """Implement n largest/smallest.
+def select_n_series(series, n, keep, method):
+ """Implement n largest/smallest for pandas Series
Parameters
----------
+ series : pandas.Series object
n : int
keep : {'first', 'last'}, default 'first'
method : str, {'nlargest', 'nsmallest'}
@@ -717,6 +718,31 @@ def select_n(series, n, keep, method):
return dropped.iloc[inds]
+def select_n_frame(frame, columns, n, method, keep):
+ """Implement n largest/smallest for pandas DataFrame
+
+ Parameters
+ ----------
+ frame : pandas.DataFrame object
+ columns : list or str
+ n : int
+ keep : {'first', 'last'}, default 'first'
+ method : str, {'nlargest', 'nsmallest'}
+
+ Returns
+ -------
+ nordered : DataFrame
+ """
+ from pandas.core.series import Series
+ if not is_list_like(columns):
+ columns = [columns]
+ columns = list(columns)
+ ser = getattr(frame[columns[0]], method)(n, keep=keep)
+ if isinstance(ser, Series):
+ ser = ser.to_frame()
+ return ser.merge(frame, on=columns[0], left_index=True)[frame.columns]
+
+
def _finalize_nsmallest(arr, kth_val, n, keep, narr):
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')][:n]
diff --git a/pandas/core/base.py b/pandas/core/base.py
index b9a70292498e4..c2c3059272872 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -814,7 +814,7 @@ def transpose(self, *args, **kwargs):
@property
def shape(self):
""" return a tuple of the shape of the underlying data """
- return self.values.shape
+ return self._values.shape
@property
def ndim(self):
@@ -842,22 +842,22 @@ def data(self):
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
- return self.values.itemsize
+ return self._values.itemsize
@property
def nbytes(self):
""" return the number of bytes in the underlying data """
- return self.values.nbytes
+ return self._values.nbytes
@property
def strides(self):
""" return the strides of the underlying data """
- return self.values.strides
+ return self._values.strides
@property
def size(self):
""" return the number of elements in the underlying data """
- return self.values.size
+ return self._values.size
@property
def flags(self):
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 9efaff6060909..fd1a23a5bab7f 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -2055,14 +2055,14 @@ def _factorize_from_iterables(iterables):
Returns
-------
- codes_tuple : tuple of ndarrays
- categories_tuple : tuple of Indexes
+ codes_list : list of ndarrays
+ categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
- # For consistency, it should return a list of 2 tuples.
- return [(), ()]
- return lzip(*[_factorize_from_iterable(it) for it in iterables])
+ # For consistency, it should return a list of 2 lists.
+ return [[], []]
+ return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 341bd3b4cc845..295947bbc1166 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -64,6 +64,15 @@ def wrapper(*args, **kwargs):
setattr(m, t, outer(t))
+# deprecate array_equivalent
+
+def array_equivalent(*args, **kwargs):
+ warnings.warn("'pandas.core.common.array_equivalent' is deprecated and "
+ "is no longer public API", DeprecationWarning, stacklevel=2)
+ from pandas.types import missing
+ return missing.array_equivalent(*args, **kwargs)
+
+
class PandasError(Exception):
pass
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1798a35168265..823ed3b6ab8bb 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2487,7 +2487,7 @@ def _set_item(self, key, value):
# check if we are modifying a copy
# try to set first as we want an invalid
- # value exeption to occur first
+ # value exception to occur first
if len(self):
self._check_setitem_copy()
@@ -2503,10 +2503,10 @@ def insert(self, loc, column, value, allow_duplicates=False):
loc : int
Must have 0 <= loc <= len(columns)
column : object
- value : int, Series, or array-like
+ value : scalar, Series, or array-like
"""
self._ensure_valid_index(value)
- value = self._sanitize_column(column, value)
+ value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
@@ -2590,9 +2590,25 @@ def assign(self, **kwargs):
return data
- def _sanitize_column(self, key, value):
- # Need to make sure new columns (which go into the BlockManager as new
- # blocks) are always copied
+ def _sanitize_column(self, key, value, broadcast=True):
+ """
+ Ensures new columns (which go into the BlockManager as new blocks) are
+ always copied and converted into an array.
+
+ Parameters
+ ----------
+ key : object
+ value : scalar, Series, or array-like
+ broadcast : bool, default True
+ If ``key`` matches multiple duplicate column names in the
+ DataFrame, this parameter indicates whether ``value`` should be
+ tiled so that the returned array contains a (duplicated) column for
+ each occurrence of the key. If False, ``value`` will not be tiled.
+
+ Returns
+ -------
+ sanitized_column : numpy-array
+ """
def reindexer(value):
# reindex if necessary
@@ -2665,7 +2681,7 @@ def reindexer(value):
return value
# broadcast across multiple columns if necessary
- if key in self.columns and value.ndim == 1:
+ if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
@@ -3374,15 +3390,6 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
- def _nsorted(self, columns, n, method, keep):
- if not is_list_like(columns):
- columns = [columns]
- columns = list(columns)
- ser = getattr(self[columns[0]], method)(n, keep=keep)
- ascending = dict(nlargest=False, nsmallest=True)[method]
- return self.loc[ser.index].sort_values(columns, ascending=ascending,
- kind='mergesort')
-
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
@@ -3415,7 +3422,7 @@ def nlargest(self, n, columns, keep='first'):
1 10 b 2
2 8 d NaN
"""
- return self._nsorted(columns, n, 'nlargest', keep)
+ return algos.select_n_frame(self, columns, n, 'nlargest', keep)
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
@@ -3449,7 +3456,7 @@ def nsmallest(self, n, columns, keep='first'):
0 1 a 1
2 8 d NaN
"""
- return self._nsorted(columns, n, 'nsmallest', keep)
+ return algos.select_n_frame(self, columns, n, 'nsmallest', keep)
def swaplevel(self, i=-2, j=-1, axis=0):
"""
@@ -3711,10 +3718,8 @@ def combine(self, other, func, fill_value=None, overwrite=True):
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
- if notnull(series).all():
- new_dtype = this_dtype
- otherSeries = otherSeries.astype(new_dtype)
- else:
+ new_dtype = this_dtype
+ if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = _find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
@@ -3868,9 +3873,8 @@ def last_valid_index(self):
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
- unique values from index / columns to form axes and return either
- DataFrame or Panel, depending on whether you request a single value
- column (DataFrame) or all columns (Panel)
+ unique values from index / columns to form axes of the resulting
+ DataFrame.
Parameters
----------
@@ -3880,7 +3884,20 @@ def pivot(self, index=None, columns=None, values=None):
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
- Column name to use for populating new frame's values
+ Column name to use for populating new frame's values. If not
+ specified, all remaining columns will be used and the result will
+ have hierarchically indexed columns
+
+ Returns
+ -------
+ pivoted : DataFrame
+
+ See also
+ --------
+ DataFrame.pivot_table : generalization of pivot that can handle
+ duplicate values for one index/column pair
+ DataFrame.unstack : pivot based on the index values instead of a
+ column
Notes
-----
@@ -3889,30 +3906,30 @@ def pivot(self, index=None, columns=None, values=None):
Examples
--------
+
+ >>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
+ 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
+ 'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
- 0 one A 1.
- 1 one B 2.
- 2 one C 3.
- 3 two A 4.
- 4 two B 5.
- 5 two C 6.
-
- >>> df.pivot('foo', 'bar', 'baz')
+ 0 one A 1
+ 1 one B 2
+ 2 one C 3
+ 3 two A 4
+ 4 two B 5
+ 5 two C 6
+
+ >>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
- >>> df.pivot('foo', 'bar')['baz']
+ >>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
- Returns
- -------
- pivoted : DataFrame
- If no values column specified, will have hierarchically indexed
- columns
+
"""
from pandas.core.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 697438df87d4f..500d10c532b88 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3477,20 +3477,27 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
res = self if inplace else self.copy()
for c, src in compat.iteritems(to_replace):
if c in value and c in self:
+ # object conversion is handled in
+ # series.replace which is called recursivelly
res[c] = res[c].replace(to_replace=src,
value=value[c],
- inplace=False, regex=regex)
+ inplace=False,
+ regex=regex)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
- for k, src in compat.iteritems(to_replace):
- if k in self:
- new_data = new_data.replace(to_replace=src,
- value=value,
- filter=[k],
- inplace=inplace,
- regex=regex)
+ keys = [(k, src) for k, src in compat.iteritems(to_replace)
+ if k in self]
+ keys_len = len(keys) - 1
+ for i, (k, src) in enumerate(keys):
+ convert = i == keys_len
+ new_data = new_data.replace(to_replace=src,
+ value=value,
+ filter=[k],
+ inplace=inplace,
+ regex=regex,
+ convert=convert)
else:
raise TypeError('value argument must be scalar, dict, or '
'Series')
@@ -3735,10 +3742,10 @@ def asof(self, where, subset=None):
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
- if isinstance(self, ABCSeries):
+ is_series = isinstance(self, ABCSeries)
+ if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
- nulls = self.isnull()
elif self.ndim > 2:
raise NotImplementedError("asof is not implemented "
"for {type}".format(type(self)))
@@ -3747,9 +3754,9 @@ def asof(self, where, subset=None):
subset = self.columns
if not is_list_like(subset):
subset = [subset]
- nulls = self[subset].isnull().any(1)
- if not is_list_like(where):
+ is_list = is_list_like(where)
+ if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
@@ -3758,16 +3765,26 @@ def asof(self, where, subset=None):
if where < start:
return np.nan
- loc = self.index.searchsorted(where, side='right')
- if loc > 0:
- loc -= 1
- while nulls[loc] and loc > 0:
- loc -= 1
- return self.iloc[loc]
+ # It's always much faster to use a *while* loop here for
+ # Series than pre-computing all the NAs. However a
+ # *while* loop is extremely expensive for DataFrame
+ # so we later pre-compute all the NAs and use the same
+ # code path whether *where* is a scalar or list.
+ # See PR: https://github.com/pandas-dev/pandas/pull/14476
+ if is_series:
+ loc = self.index.searchsorted(where, side='right')
+ if loc > 0:
+ loc -= 1
+
+ values = self._values
+ while loc > 0 and isnull(values[loc]):
+ loc -= 1
+ return values[loc]
if not isinstance(where, Index):
- where = Index(where)
+ where = Index(where) if is_list else Index([where])
+ nulls = self.isnull() if is_series else self[subset].isnull().any(1)
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
@@ -3775,7 +3792,7 @@ def asof(self, where, subset=None):
data = self.take(locs, is_copy=False)
data.index = where
data.loc[missing] = np.nan
- return data
+ return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
@@ -3998,6 +4015,8 @@ def asfreq(self, freq, method=None, how=None, normalize=False):
-------
converted : type of caller
+ Notes
+ -----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 5223c0ac270f3..c706a971c0d2a 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -175,8 +175,8 @@ class Grouper(object):
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
- of available frequencies, please see
- `here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html>`_.
+ of available frequencies, please see `here
+ <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
@@ -861,7 +861,17 @@ def reset_identity(values):
if isinstance(result, Series):
result = result.reindex(ax)
else:
- result = result.reindex_axis(ax, axis=self.axis)
+
+ # this is a very unfortunate situation
+ # we have a multi-index that is NOT lexsorted
+ # and we have a result which is duplicated
+ # we can't reindex, so we resort to this
+ # GH 14776
+ if isinstance(ax, MultiIndex) and not ax.is_unique:
+ result = result.take(result.index.get_indexer_for(
+ ax.values).unique(), axis=self.axis)
+ else:
+ result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
@@ -2208,7 +2218,10 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
index._get_grouper_for_level(self.grouper, level)
else:
- if isinstance(self.grouper, (list, tuple)):
+ if self.grouper is None and self.name is not None:
+ self.grouper = self.obj[self.name]
+
+ elif isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
@@ -2448,7 +2461,10 @@ def is_in_obj(gpr):
elif is_in_axis(gpr): # df.groupby('name')
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
-
+ elif isinstance(gpr, Grouper) and gpr.key is not None:
+ # Add key to exclusions
+ exclusions.append(gpr.key)
+ in_axis, name = False, None
else:
in_axis, name = False, None
@@ -2892,6 +2908,7 @@ def true_and_notnull(x, *args, **kwargs):
def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
+
val = self.obj.get_values()
try:
@@ -2922,7 +2939,10 @@ def nunique(self, dropna=True):
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
- res = out if ids[0] != -1 else out[1:]
+ if len(ids):
+ res = out if ids[0] != -1 else out[1:]
+ else:
+ res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
@@ -3454,7 +3474,6 @@ def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
-
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
@@ -3475,14 +3494,24 @@ def _transform_general(self, func, *args, **kwargs):
else:
res = path(group)
- # broadcasting
if isinstance(res, Series):
- if res.index.is_(obj.index):
- group.T.values[:] = res
+
+ # we need to broadcast across the
+ # other dimension; this will preserve dtypes
+ # GH14457
+ if not np.prod(group.shape):
+ continue
+ elif res.index.is_(obj.index):
+ r = concat([res] * len(group.columns), axis=1)
+ r.columns = group.columns
+ r.index = group.index
else:
- group.values[:] = res
+ r = DataFrame(
+ np.concatenate([res.values] * len(group.index)
+ ).reshape(group.shape),
+ columns=group.columns, index=group.index)
- applied.append(group)
+ applied.append(r)
else:
applied.append(res)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 35fcf0d49d0d6..3d1737b2bd3bb 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -11,6 +11,7 @@
is_sequence,
is_scalar,
is_sparse,
+ _is_unorderable_exception,
_ensure_platform_int)
from pandas.types.missing import isnull, _infer_fill_value
@@ -847,7 +848,7 @@ def _multi_take(self, tup):
[(a, self._convert_for_reindex(t, axis=o._get_axis_number(a)))
for t, a in zip(tup, o._AXIS_ORDERS)])
return o.reindex(**d)
- except:
+ except(KeyError, IndexingError):
raise self._exception
def _convert_for_reindex(self, key, axis=0):
@@ -1411,7 +1412,7 @@ def error():
except TypeError as e:
# python 3 type errors should be raised
- if 'unorderable' in str(e): # pragma: no cover
+ if _is_unorderable_exception(e):
error()
raise
except:
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 11721a5bdac29..2992038e6158f 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -6,7 +6,6 @@
from collections import defaultdict
import numpy as np
-from numpy import percentile as _quantile
from pandas.core.base import PandasObject
@@ -623,7 +622,6 @@ def replace(self, to_replace, value, inplace=False, filter=None,
original_to_replace = to_replace
mask = isnull(self.values)
-
# try to replace, if we raise an error, convert to ObjectBlock and
# retry
try:
@@ -1315,16 +1313,38 @@ def quantile(self, qs, interpolation='linear', axis=0, mgr=None):
values = self.get_values()
values, _, _, _ = self._try_coerce_args(values, values)
- mask = isnull(self.values)
- if not lib.isscalar(mask) and mask.any():
- # even though this could be a 2-d mask it appears
- # as a 1-d result
- mask = mask.reshape(values.shape)
- result_shape = tuple([values.shape[0]] + [-1] * (self.ndim - 1))
- values = _block_shape(values[~mask], ndim=self.ndim)
- if self.ndim > 1:
- values = values.reshape(result_shape)
+ def _nanpercentile1D(values, mask, q, **kw):
+ values = values[~mask]
+
+ if len(values) == 0:
+ if is_scalar(q):
+ return self._na_value
+ else:
+ return np.array([self._na_value] * len(q),
+ dtype=values.dtype)
+
+ return np.percentile(values, q, **kw)
+
+ def _nanpercentile(values, q, axis, **kw):
+
+ mask = isnull(self.values)
+ if not is_scalar(mask) and mask.any():
+ if self.ndim == 1:
+ return _nanpercentile1D(values, mask, q, **kw)
+ else:
+ # for nonconsolidatable blocks mask is 1D, but values 2D
+ if mask.ndim < values.ndim:
+ mask = mask.reshape(values.shape)
+ if axis == 0:
+ values = values.T
+ mask = mask.T
+ result = [_nanpercentile1D(val, m, q, **kw) for (val, m)
+ in zip(list(values), list(mask))]
+ result = np.array(result, dtype=values.dtype, copy=False).T
+ return result
+ else:
+ return np.percentile(values, q, axis=axis, **kw)
from pandas import Float64Index
is_empty = values.shape[axis] == 0
@@ -1343,13 +1363,13 @@ def quantile(self, qs, interpolation='linear', axis=0, mgr=None):
else:
try:
- result = _quantile(values, np.array(qs) * 100,
- axis=axis, **kw)
+ result = _nanpercentile(values, np.array(qs) * 100,
+ axis=axis, **kw)
except ValueError:
# older numpies don't handle an array for q
- result = [_quantile(values, q * 100,
- axis=axis, **kw) for q in qs]
+ result = [_nanpercentile(values, q * 100,
+ axis=axis, **kw) for q in qs]
result = np.array(result, copy=False)
if self.ndim > 1:
@@ -1368,7 +1388,7 @@ def quantile(self, qs, interpolation='linear', axis=0, mgr=None):
else:
result = np.array([self._na_value] * len(self))
else:
- result = _quantile(values, qs * 100, axis=axis, **kw)
+ result = _nanpercentile(values, qs * 100, axis=axis, **kw)
ndim = getattr(result, 'ndim', None) or 0
result = self._try_coerce_result(result)
@@ -1773,13 +1793,14 @@ def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
- regex=False, mgr=None):
+ regex=False, convert=True, mgr=None):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
- regex=regex, mgr=mgr)
+ regex=regex, convert=convert,
+ mgr=mgr)
class ObjectBlock(Block):
@@ -3192,6 +3213,7 @@ def comp(s):
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
+ src_len = len(src_list) - 1
for blk in self.blocks:
# its possible to get multiple result blocks here
@@ -3201,8 +3223,9 @@ def comp(s):
new_rb = []
for b in rb:
if b.dtype == np.object_:
+ convert = i == src_len
result = b.replace(s, d, inplace=inplace, regex=regex,
- mgr=mgr)
+ mgr=mgr, convert=convert)
new_rb = _extend_blocks(result, new_rb)
else:
# get our mask for this element, sized to this
@@ -4766,7 +4789,12 @@ def _putmask_smart(v, m, n):
# change the dtype
dtype, _ = _maybe_promote(n.dtype)
- nv = v.astype(dtype)
+
+ if is_extension_type(v.dtype) and is_object_dtype(dtype):
+ nv = v.get_values(dtype)
+ else:
+ nv = v.astype(dtype)
+
try:
nv[m] = n[m]
except ValueError:
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index b847415f274db..f92fe07999134 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -10,9 +10,8 @@
from pandas.compat import range, string_types
from pandas.types.common import (is_numeric_v_string_like,
is_float_dtype, is_datetime64_dtype,
- is_integer_dtype, _ensure_float64,
- is_scalar,
- _DATELIKE_DTYPES)
+ is_datetime64tz_dtype, is_integer_dtype,
+ _ensure_float64, is_scalar)
from pandas.types.missing import isnull
@@ -449,7 +448,7 @@ def pad_1d(values, limit=None, mask=None, dtype=None):
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
- elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
+ elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
@@ -474,7 +473,7 @@ def backfill_1d(values, limit=None, mask=None, dtype=None):
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
- elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
+ elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
@@ -500,7 +499,7 @@ def pad_2d(values, limit=None, mask=None, dtype=None):
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
- elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
+ elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
@@ -530,7 +529,7 @@ def backfill_2d(values, limit=None, mask=None, dtype=None):
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
- elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
+ elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 564586eec5a8e..d7d68ad536be5 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -11,6 +11,7 @@
import pandas.hashtable as _hash
from pandas import compat, lib, algos, tslib
+from pandas.compat.numpy import _np_version_under1p10
from pandas.types.common import (_ensure_int64, _ensure_object,
_ensure_float64, _get_dtype,
is_float, is_scalar,
@@ -829,9 +830,37 @@ def _checked_add_with_arr(arr, b):
Raises
------
- OverflowError if any x + y exceeds the maximum int64 value.
+ OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
- if (np.iinfo(np.int64).max - b < arr).any():
- raise OverflowError("Python int too large to "
- "convert to C long")
+ # For performance reasons, we broadcast 'b' to the new array 'b2'
+ # so that it has the same size as 'arr'.
+ if _np_version_under1p10:
+ if lib.isscalar(b):
+ b2 = np.empty(arr.shape)
+ b2.fill(b)
+ else:
+ b2 = b
+ else:
+ b2 = np.broadcast_to(b, arr.shape)
+
+ # gh-14324: For each element in 'arr' and its corresponding element
+ # in 'b2', we check the sign of the element in 'b2'. If it is positive,
+ # we then check whether its sum with the element in 'arr' exceeds
+ # np.iinfo(np.int64).max. If so, we have an overflow error. If it
+ # it is negative, we then check whether its sum with the element in
+ # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
+ # error as well.
+ mask1 = b2 > 0
+ mask2 = b2 < 0
+
+ if not mask1.any():
+ to_raise = (np.iinfo(np.int64).min - b2 > arr).any()
+ elif not mask2.any():
+ to_raise = (np.iinfo(np.int64).max - b2 < arr).any()
+ else:
+ to_raise = ((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]).any() or
+ (np.iinfo(np.int64).min - b2[mask2] > arr[mask2]).any())
+
+ if to_raise:
+ raise OverflowError("Overflow in int64 addition")
return arr + b
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 7cff1104c50be..7dac217d073c4 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -1176,6 +1176,13 @@ def na_op(x, y):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
xrav = xrav[mask]
+
+ # we may need to manually
+ # broadcast a 1 element array
+ if yrav.shape != mask.shape:
+ yrav = np.empty(mask.shape, dtype=yrav.dtype)
+ yrav.fill(yrav.item())
+
yrav = yrav[mask]
if np.prod(xrav.shape) and np.prod(yrav.shape):
with np.errstate(all='ignore'):
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index fa5d16bd85e98..0670d2948b729 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -277,7 +277,8 @@ def _unstack_multiple(data, clocs):
verify_integrity=False)
if isinstance(data, Series):
- dummy = Series(data.values, index=dummy_index)
+ dummy = data.copy()
+ dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__')
new_levels = clevels
new_names = cnames
@@ -292,7 +293,8 @@ def _unstack_multiple(data, clocs):
return result
- dummy = DataFrame(data.values, index=dummy_index, columns=data.columns)
+ dummy = data.copy()
+ dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__')
if isinstance(unstacked, Series):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1c6b13885dd01..92c8be3e38587 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -25,6 +25,7 @@
is_iterator,
is_dict_like,
is_scalar,
+ _is_unorderable_exception,
_ensure_platform_int)
from pandas.types.generic import ABCSparseArray, ABCDataFrame
from pandas.types.cast import (_maybe_upcast, _infer_dtype_from_scalar,
@@ -186,7 +187,8 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
if len(data):
# coerce back to datetime objects for lookup
data = _dict_compat(data)
- data = lib.fast_multiget(data, index.astype('O'),
+ data = lib.fast_multiget(data,
+ index.asobject.values,
default=np.nan)
else:
data = np.nan
@@ -753,7 +755,7 @@ def setitem(key, value):
raise ValueError("Can only tuple-index with a MultiIndex")
# python 3 type errors should be raised
- if 'unorderable' in str(e): # pragma: no cover
+ if _is_unorderable_exception(e):
raise IndexError(key)
if com.is_bool_indexer(key):
@@ -1940,7 +1942,7 @@ def nlargest(self, n=5, keep='first'):
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nlargest(10) # only sorts up to the N requested
"""
- return algos.select_n(self, n=n, keep=keep, method='nlargest')
+ return algos.select_n_series(self, n=n, keep=keep, method='nlargest')
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@@ -1978,7 +1980,7 @@ def nsmallest(self, n=5, keep='first'):
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nsmallest(10) # only sorts up to the N requested
"""
- return algos.select_n(self, n=n, keep=keep, method='nsmallest')
+ return algos.select_n_series(self, n=n, keep=keep, method='nsmallest')
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
@@ -2915,8 +2917,8 @@ def create_from_value(value, index, dtype):
return subarr
- # scalar like
- if subarr.ndim == 0:
+ # scalar like, GH
+ if getattr(subarr, 'ndim', 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
diff --git a/pandas/index.pyx b/pandas/index.pyx
index a6eb74727a999..a245e85d80f96 100644
--- a/pandas/index.pyx
+++ b/pandas/index.pyx
@@ -82,20 +82,13 @@ cdef class IndexEngine:
cdef:
bint unique, monotonic_inc, monotonic_dec
- bint initialized, monotonic_check, unique_check
+ bint need_monotonic_check, need_unique_check
def __init__(self, vgetter, n):
self.vgetter = vgetter
self.over_size_threshold = n >= _SIZE_CUTOFF
-
- self.initialized = 0
- self.monotonic_check = 0
- self.unique_check = 0
-
- self.unique = 0
- self.monotonic_inc = 0
- self.monotonic_dec = 0
+ self.clear_mapping()
def __contains__(self, object val):
self._ensure_mapping_populated()
@@ -213,16 +206,20 @@ cdef class IndexEngine:
property is_unique:
def __get__(self):
- if not self.initialized:
- self.initialize()
+ if self.need_unique_check:
+ self._do_unique_check()
- self.unique_check = 1
return self.unique == 1
+ cdef inline _do_unique_check(self):
+
+ # this de-facto the same
+ self._ensure_mapping_populated()
+
property is_monotonic_increasing:
def __get__(self):
- if not self.monotonic_check:
+ if self.need_monotonic_check:
self._do_monotonic_check()
return self.monotonic_inc == 1
@@ -230,7 +227,7 @@ cdef class IndexEngine:
property is_monotonic_decreasing:
def __get__(self):
- if not self.monotonic_check:
+ if self.need_monotonic_check:
self._do_monotonic_check()
return self.monotonic_dec == 1
@@ -246,13 +243,12 @@ cdef class IndexEngine:
self.monotonic_dec = 0
is_unique = 0
- self.monotonic_check = 1
+ self.need_monotonic_check = 0
# we can only be sure of uniqueness if is_unique=1
if is_unique:
- self.initialized = 1
self.unique = 1
- self.unique_check = 1
+ self.need_unique_check = 0
cdef _get_index_values(self):
return self.vgetter()
@@ -266,30 +262,32 @@ cdef class IndexEngine:
cdef _check_type(self, object val):
hash(val)
+ property is_mapping_populated:
+
+ def __get__(self):
+ return self.mapping is not None
+
cdef inline _ensure_mapping_populated(self):
- # need to reset if we have previously
- # set the initialized from monotonic checks
- if self.unique_check:
- self.initialized = 0
- if not self.initialized:
- self.initialize()
-
- cdef initialize(self):
- values = self._get_index_values()
+ # this populates the mapping
+ # if its not already populated
+ # also satisfies the need_unique_check
- self.mapping = self._make_hash_table(len(values))
- self.mapping.map_locations(values)
+ if not self.is_mapping_populated:
- if len(self.mapping) == len(values):
- self.unique = 1
+ values = self._get_index_values()
+
+ self.mapping = self._make_hash_table(len(values))
+ self.mapping.map_locations(values)
+
+ if len(self.mapping) == len(values):
+ self.unique = 1
- self.initialized = 1
+ self.need_unique_check = 0
def clear_mapping(self):
self.mapping = None
- self.initialized = 0
- self.monotonic_check = 0
- self.unique_check = 0
+ self.need_monotonic_check = 1
+ self.need_unique_check = 1
self.unique = 0
self.monotonic_inc = 0
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 1c24a0db34b2b..54eaf86315a88 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -1464,13 +1464,13 @@ def append(self, other):
names = set([obj.name for obj in to_concat])
name = None if len(names) > 1 else self.name
- typs = _concat.get_dtype_kinds(to_concat)
-
- if 'category' in typs:
- # if any of the to_concat is category
+ if self.is_categorical():
+ # if calling index is category, don't check dtype of others
from pandas.indexes.category import CategoricalIndex
return CategoricalIndex._append_same_dtype(self, to_concat, name)
+ typs = _concat.get_dtype_kinds(to_concat)
+
if len(typs) == 1:
return self._append_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
@@ -2003,7 +2003,7 @@ def difference(self, other):
except TypeError:
pass
- return this._shallow_copy(the_diff, name=result_name)
+ return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
@@ -2966,6 +2966,11 @@ def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
+ def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
+ # this is for partial string indexing,
+ # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
+ raise NotImplementedError
+
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered Index, compute the slice indexer for input labels and
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index a9f452db69659..f9576d92d8a49 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -1907,6 +1907,13 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
return np.array(labels == loc, dtype=bool)
else:
# sorted, so can return slice object -> view
+ try:
+ loc = labels.dtype.type(loc)
+ except TypeError:
+ # this occurs when loc is a slice (partial string indexing)
+ # but the TypeError raised by searchsorted in this case
+ # is catched in Index._has_valid_type()
+ pass
i = labels.searchsorted(loc, side='left')
j = labels.searchsorted(loc, side='right')
return slice(i, j)
diff --git a/pandas/indexes/range.py b/pandas/indexes/range.py
index 76166e7155bd0..7a7902b503bd6 100644
--- a/pandas/indexes/range.py
+++ b/pandas/indexes/range.py
@@ -315,6 +315,9 @@ def intersection(self, other):
if not isinstance(other, RangeIndex):
return super(RangeIndex, self).intersection(other)
+ if not len(self) or not len(other):
+ return RangeIndex._simple_new(None)
+
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(min(self._start, self._stop + 1),
@@ -322,7 +325,7 @@ def intersection(self, other):
int_high = min(max(self._stop, self._start + 1),
max(other._stop, other._start + 1))
if int_high <= int_low:
- return RangeIndex()
+ return RangeIndex._simple_new(None)
# Method hint: linear Diophantine equation
# solve intersection problem
@@ -332,7 +335,7 @@ def intersection(self, other):
# check whether element sets intersect
if (self._start - other._start) % gcd:
- return RangeIndex()
+ return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
diff --git a/pandas/io/clipboard.py b/pandas/io/clipboard.py
index 2109e1c5d6d4c..bbcbae845d8d1 100644
--- a/pandas/io/clipboard.py
+++ b/pandas/io/clipboard.py
@@ -1,6 +1,6 @@
""" io on the clipboard """
from pandas import compat, get_option, option_context, DataFrame
-from pandas.compat import StringIO
+from pandas.compat import StringIO, PY2
def read_clipboard(**kwargs): # pragma: no cover
@@ -14,6 +14,14 @@ def read_clipboard(**kwargs): # pragma: no cover
-------
parsed : DataFrame
"""
+ encoding = kwargs.pop('encoding', 'utf-8')
+
+ # only utf-8 is valid for passed value because that's what clipboard
+ # supports
+ if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
+ raise NotImplementedError(
+ 'reading from clipboard only supports utf-8 encoding')
+
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
@@ -74,6 +82,12 @@ def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
- Windows:
- OS X:
"""
+ encoding = kwargs.pop('encoding', 'utf-8')
+
+ # testing if an invalid encoding is passed to clipboard
+ if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
+ raise ValueError('clipboard only supports utf-8 encoding')
+
from pandas.util.clipboard import clipboard_set
if excel is None:
excel = True
@@ -83,8 +97,12 @@ def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
if sep is None:
sep = '\t'
buf = StringIO()
- obj.to_csv(buf, sep=sep, **kwargs)
- clipboard_set(buf.getvalue())
+ # clipboard_set (pyperclip) expects unicode
+ obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs)
+ text = buf.getvalue()
+ if PY2:
+ text = text.decode('utf-8')
+ clipboard_set(text)
return
except:
pass
diff --git a/pandas/io/json.py b/pandas/io/json.py
index 66a8e76c09a6f..878506a6ddc05 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -123,32 +123,38 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
- orient
-
- * `Series`
-
+ orient : string,
+ Indication of expected JSON string format.
+ Compatible JSON strings can be produced by ``to_json()`` with a
+ corresponding orient value.
+ The set of possible orients is:
+
+ - ``'split'`` : dict like
+ ``{index -> [index], columns -> [columns], data -> [values]}``
+ - ``'records'`` : list like
+ ``[{column -> value}, ... , {column -> value}]``
+ - ``'index'`` : dict like ``{index -> {column -> value}}``
+ - ``'columns'`` : dict like ``{column -> {index -> value}}``
+ - ``'values'`` : just the values array
+
+ The allowed and default values depend on the value
+ of the `typ` parameter.
+
+ * when ``typ == 'series'``,
+
+ - allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- - allowed values are: ``{'split','records','index'}``
- The Series index must be unique for orient ``'index'``.
- * `DataFrame`
+ * when ``typ == 'frame'``,
+ - allowed orients are ``{'split','records','index',
+ 'columns','values'}``
- default is ``'columns'``
- - allowed values are: {'split','records','index','columns','values'}
- - The DataFrame index must be unique for orients 'index' and
- 'columns'.
- - The DataFrame columns must be unique for orients 'index',
- 'columns', and 'records'.
-
- * The format of the JSON string
-
- - split : dict like
- ``{index -> [index], columns -> [columns], data -> [values]}``
- - records : list like
- ``[{column -> value}, ... , {column -> value}]``
- - index : dict like ``{index -> {column -> value}}``
- - columns : dict like ``{column -> {index -> value}}``
- - values : just the values array
+ - The DataFrame index must be unique for orients ``'index'`` and
+ ``'columns'``.
+ - The DataFrame columns must be unique for orients ``'index'``,
+ ``'columns'``, and ``'records'``.
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
@@ -197,7 +203,48 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
Returns
-------
- result : Series or DataFrame
+ result : Series or DataFrame, depending on the value of `typ`.
+
+ See Also
+ --------
+ DataFrame.to_json
+
+ Examples
+ --------
+
+ >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
+ ... index=['row 1', 'row 2'],
+ ... columns=['col 1', 'col 2'])
+
+ Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
+
+ >>> df.to_json(orient='split')
+ '{"columns":["col 1","col 2"],
+ "index":["row 1","row 2"],
+ "data":[["a","b"],["c","d"]]}'
+ >>> pd.read_json(_, orient='split')
+ col 1 col 2
+ row 1 a b
+ row 2 c d
+
+ Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
+
+ >>> df.to_json(orient='index')
+ '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
+ >>> pd.read_json(_, orient='index')
+ col 1 col 2
+ row 1 a b
+ row 2 c d
+
+ Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
+ Note that index labels are not preserved with this encoding.
+
+ >>> df.to_json(orient='records')
+ '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
+ >>> pd.read_json(_, orient='records')
+ col 1 col 2
+ 0 a b
+ 1 c d
"""
filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf,
@@ -605,25 +652,9 @@ def _convert_to_line_delimits(s):
if not s[0] == '[' and s[-1] == ']':
return s
s = s[1:-1]
- num_open_brackets_seen = 0
- commas_to_replace = []
- in_quotes = False
- for idx, char in enumerate(s): # iter through to find all
- if char == '"' and idx > 0 and s[idx - 1] != '\\':
- in_quotes = ~in_quotes
- elif char == ',': # commas that should be \n
- if num_open_brackets_seen == 0 and not in_quotes:
- commas_to_replace.append(idx)
- elif char == '{':
- if not in_quotes:
- num_open_brackets_seen += 1
- elif char == '}':
- if not in_quotes:
- num_open_brackets_seen -= 1
- s_arr = np.array(list(s)) # Turn to an array to set
- s_arr[commas_to_replace] = '\n' # all commas at once.
- s = ''.join(s_arr)
- return s
+
+ from pandas.lib import convert_json_to_lines
+ return convert_json_to_lines(s)
def nested_to_record(ds, prefix="", level=0):
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index f8cf04e08ab03..a5943ef518622 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -20,6 +20,7 @@
is_float,
is_scalar)
from pandas.core.index import Index, MultiIndex, RangeIndex
+from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.common import AbstractMethodError
from pandas.core.config import get_option
@@ -1456,6 +1457,8 @@ def __init__(self, src, **kwds):
def close(self):
for f in self.handles:
f.close()
+
+ # close additional handles opened by C parser (for compression)
try:
self._reader.close()
except:
@@ -1507,10 +1510,11 @@ def read(self, nrows=None):
if self._first_chunk:
self._first_chunk = False
names = self._maybe_dedup_names(self.orig_names)
-
index, columns, col_dict = _get_empty_meta(
names, self.index_col, self.index_names,
dtype=self.kwds.get('dtype'))
+ columns = self._maybe_make_multi_index_columns(
+ columns, self.col_names)
if self.usecols is not None:
columns = self._filter_usecols(columns)
@@ -1759,6 +1763,9 @@ def __init__(self, f, **kwds):
self.delimiter = kwds['delimiter']
self.quotechar = kwds['quotechar']
+ if isinstance(self.quotechar, compat.text_type):
+ self.quotechar = str(self.quotechar)
+
self.escapechar = kwds['escapechar']
self.doublequote = kwds['doublequote']
self.skipinitialspace = kwds['skipinitialspace']
@@ -1974,8 +1981,11 @@ def read(self, rows=None):
if not len(content): # pragma: no cover
# DataFrame with the right metadata, even though it's length 0
names = self._maybe_dedup_names(self.orig_names)
- return _get_empty_meta(names, self.index_col,
- self.index_names)
+ index, columns, col_dict = _get_empty_meta(
+ names, self.index_col, self.index_names)
+ columns = self._maybe_make_multi_index_columns(
+ columns, self.col_names)
+ return index, columns, col_dict
# handle new style for names in index
count_empty_content_vals = count_empty_vals(content[0])
@@ -2030,8 +2040,27 @@ def _convert_data(self, data):
col = self.orig_names[col]
clean_conv[col] = f
- return self._convert_to_ndarrays(data, self.na_values, self.na_fvalues,
- self.verbose, clean_conv)
+ # Apply NA values.
+ clean_na_values = {}
+ clean_na_fvalues = {}
+
+ if isinstance(self.na_values, dict):
+ for col in self.na_values:
+ na_value = self.na_values[col]
+ na_fvalue = self.na_fvalues[col]
+
+ if isinstance(col, int) and col not in self.orig_names:
+ col = self.orig_names[col]
+
+ clean_na_values[col] = na_value
+ clean_na_fvalues[col] = na_fvalue
+ else:
+ clean_na_values = self.na_values
+ clean_na_fvalues = self.na_fvalues
+
+ return self._convert_to_ndarrays(data, clean_na_values,
+ clean_na_fvalues, self.verbose,
+ clean_conv)
def _to_recarray(self, data, columns):
dtypes = []
@@ -2078,6 +2107,12 @@ def _infer_columns(self):
# We have an empty file, so check
# if columns are provided. That will
# serve as the 'line' for parsing
+ if have_mi_columns and hr > 0:
+ if clear_buffer:
+ self._clear_buffer()
+ columns.append([None] * len(columns[-1]))
+ return columns, num_original_columns
+
if not self.names:
raise EmptyDataError(
"No columns to parse from file")
@@ -2311,14 +2346,23 @@ def _next_line(self):
try:
orig_line = next(self.data)
except csv.Error as e:
+ msg = str(e)
+
if 'NULL byte' in str(e):
- raise csv.Error(
- 'NULL byte detected. This byte '
- 'cannot be processed in Python\'s '
- 'native csv library at the moment, '
- 'so please pass in engine=\'c\' instead.')
- else:
- raise
+ msg = ('NULL byte detected. This byte '
+ 'cannot be processed in Python\'s '
+ 'native csv library at the moment, '
+ 'so please pass in engine=\'c\' instead')
+
+ if self.skipfooter > 0:
+ reason = ('Error could possibly be due to '
+ 'parsing errors in the skipped footer rows '
+ '(the skipfooter keyword is only applied '
+ 'after Python\'s csv library has parsed '
+ 'all rows).')
+ msg += '. ' + reason
+
+ raise csv.Error(msg)
line = self._check_comments([orig_line])[0]
self.pos += 1
if (not self.skip_blank_lines and
@@ -2499,6 +2543,11 @@ def _rows_to_cols(self, content):
msg = ('Expected %d fields in line %d, saw %d' %
(col_len, row_num + 1, zip_len))
+ if len(self.delimiter) > 1 and self.quoting != csv.QUOTE_NONE:
+ # see gh-13374
+ reason = ('Error could possibly be due to quotes being '
+ 'ignored when a multi-char delimiter is used.')
+ msg += '. ' + reason
raise ValueError(msg)
if self.usecols:
@@ -2719,6 +2768,7 @@ def _clean_na_values(na_values, keep_default_na=True):
na_values = []
na_fvalues = set()
elif isinstance(na_values, dict):
+ na_values = na_values.copy() # Prevent aliasing.
if keep_default_na:
for k, v in compat.iteritems(na_values):
if not is_list_like(v):
@@ -2776,19 +2826,27 @@ def _clean_index_names(columns, index_col):
def _get_empty_meta(columns, index_col, index_names, dtype=None):
columns = list(columns)
- if dtype is None:
- dtype = {}
+ # Convert `dtype` to a defaultdict of some kind.
+ # This will enable us to write `dtype[col_name]`
+ # without worrying about KeyError issues later on.
+ if not isinstance(dtype, dict):
+ # if dtype == None, default will be np.object.
+ default_dtype = dtype or np.object
+ dtype = defaultdict(lambda: default_dtype)
else:
- if not isinstance(dtype, dict):
- dtype = defaultdict(lambda: dtype)
+ # Save a copy of the dictionary.
+ _dtype = dtype.copy()
+ dtype = defaultdict(lambda: np.object)
+
# Convert column indexes to column names.
- dtype = dict((columns[k] if is_integer(k) else k, v)
- for k, v in compat.iteritems(dtype))
+ for k, v in compat.iteritems(_dtype):
+ col = columns[k] if is_integer(k) else k
+ dtype[col] = v
if index_col is None or index_col is False:
index = Index([])
else:
- index = [np.empty(0, dtype=dtype.get(index_name, np.object))
+ index = [Series([], dtype=dtype[index_name])
for index_name in index_names]
index = MultiIndex.from_arrays(index, names=index_names)
index_col.sort()
@@ -2796,7 +2854,7 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None):
columns.pop(n - i)
col_dict = dict((col_name,
- np.empty(0, dtype=dtype.get(col_name, np.object)))
+ Series([], dtype=dtype[col_name]))
for col_name in columns)
return index, columns, col_dict
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b8c2b146b6259..e474aeab1f6ca 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3315,7 +3315,7 @@ def validate_data_columns(self, data_columns, min_itemsize):
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
- data_columns = axis_labels
+ data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
@@ -3429,9 +3429,8 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
j = len(self.index_axes)
# check for column conflicts
- if validate:
- for a in self.axes:
- a.maybe_set_size(min_itemsize=min_itemsize)
+ for a in self.axes:
+ a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
@@ -4153,7 +4152,7 @@ def write(self, obj, data_columns=None, **kwargs):
obj = DataFrame({name: obj}, index=obj.index)
obj.columns = [name]
return super(AppendableSeriesTable, self).write(
- obj=obj, data_columns=obj.columns, **kwargs)
+ obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(self, columns=None, **kwargs):
@@ -4254,7 +4253,7 @@ def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
- data_columns = obj.columns[:]
+ data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
for n in self.levels:
if n not in data_columns:
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 2a82fd7a53222..91f417abc0502 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -225,6 +225,12 @@ def _get_properties(self):
self.os_name = self.os_name.decode(
self.encoding or self.default_encoding)
+ def __next__(self):
+ da = self.read(nrows=self.chunksize or 1)
+ if da is None:
+ raise StopIteration
+ return da
+
# Read a single float of the given width (4 or 8).
def _read_float(self, offset, width):
if width not in (4, 8):
@@ -591,6 +597,10 @@ def read(self, nrows=None):
if self._current_row_in_file_index >= self.row_count:
return None
+ m = self.row_count - self._current_row_in_file_index
+ if nrows > m:
+ nrows = m
+
nd = (self.column_types == b'd').sum()
ns = (self.column_types == b's').sum()
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 47642c2e2bc28..c9f8d32e1b504 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -507,10 +507,11 @@ def _engine_builder(con):
if isinstance(con, string_types):
try:
import sqlalchemy
- con = sqlalchemy.create_engine(con)
- return con
except ImportError:
_SQLALCHEMY_INSTALLED = False
+ else:
+ con = sqlalchemy.create_engine(con)
+ return con
return con
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 985ea9c051505..c35e07be2c31a 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -511,6 +511,9 @@ def _cast_to_stata_types(data):
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
+ float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
+ float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
+
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
@@ -541,6 +544,19 @@ def _cast_to_stata_types(data):
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
+ elif dtype in (np.float32, np.float64):
+ value = data[col].max()
+ if np.isinf(value):
+ msg = 'Column {0} has a maximum value of infinity which is ' \
+ 'outside the range supported by Stata.'
+ raise ValueError(msg.format(col))
+ if dtype == np.float32 and value > float32_max:
+ data[col] = data[col].astype(np.float64)
+ elif dtype == np.float64:
+ if value > float64_max:
+ msg = 'Column {0} has a maximum value ({1}) outside the ' \
+ 'range supported by Stata ({1})'
+ raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
@@ -1210,18 +1226,18 @@ def _read_old_header(self, first_char):
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
- typlist.append(tp - 127) # string
+ typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
- .format(','.join(typlist)))
+ .format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
- .format(','.join(typlist)))
+ .format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
@@ -2048,6 +2064,7 @@ def _prepare_pandas(self, data):
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
+ # Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
diff --git a/pandas/io/tests/json/test_pandas.py b/pandas/io/tests/json/test_pandas.py
index 117ac2324d0e0..e6e6f33669e17 100644
--- a/pandas/io/tests/json/test_pandas.py
+++ b/pandas/io/tests/json/test_pandas.py
@@ -167,7 +167,7 @@ def _check_orient(df, orient, dtype=None, numpy=False,
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
- raise
+ raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
@@ -971,7 +971,7 @@ def test_to_jsonl(self):
def test_latin_encoding(self):
if compat.PY2:
self.assertRaisesRegexp(
- TypeError, '\[unicode\] is not implemented as a table column')
+ TypeError, r'\[unicode\] is not implemented as a table column')
return
# GH 13774
diff --git a/pandas/io/tests/parser/c_parser_only.py b/pandas/io/tests/parser/c_parser_only.py
index 09d521e5a7e46..d6be9d5b35445 100644
--- a/pandas/io/tests/parser/c_parser_only.py
+++ b/pandas/io/tests/parser/c_parser_only.py
@@ -71,11 +71,11 @@ def test_dtype_and_names_error(self):
3.0 3
"""
# base cases
- result = self.read_csv(StringIO(data), sep='\s+', header=None)
+ result = self.read_csv(StringIO(data), sep=r'\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
- result = self.read_csv(StringIO(data), sep='\s+',
+ result = self.read_csv(StringIO(data), sep=r'\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
@@ -83,7 +83,7 @@ def test_dtype_and_names_error(self):
# fallback casting
result = self.read_csv(StringIO(
- data), sep='\s+', header=None,
+ data), sep=r'\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]],
columns=['a', 'b'])
@@ -97,7 +97,7 @@ def test_dtype_and_names_error(self):
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
- self.read_csv(StringIO(data), sep='\s+', header=None,
+ self.read_csv(StringIO(data), sep=r'\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_passing_dtype(self):
@@ -561,3 +561,66 @@ def test_internal_null_byte(self):
result = self.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(result, expected)
+
+ def test_empty_dtype(self):
+ # see gh-14712
+ data = 'a,b'
+
+ expected = pd.DataFrame(columns=['a', 'b'], dtype=np.float64)
+ result = self.read_csv(StringIO(data), header=0, dtype=np.float64)
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame({'a': pd.Categorical([]),
+ 'b': pd.Categorical([])},
+ index=[])
+ result = self.read_csv(StringIO(data), header=0,
+ dtype='category')
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
+ result = self.read_csv(StringIO(data), header=0,
+ dtype='datetime64[ns]')
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame({'a': pd.Series([], dtype='timedelta64[ns]'),
+ 'b': pd.Series([], dtype='timedelta64[ns]')},
+ index=[])
+ result = self.read_csv(StringIO(data), header=0,
+ dtype='timedelta64[ns]')
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame(columns=['a', 'b'])
+ expected['a'] = expected['a'].astype(np.float64)
+ result = self.read_csv(StringIO(data), header=0,
+ dtype={'a': np.float64})
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame(columns=['a', 'b'])
+ expected['a'] = expected['a'].astype(np.float64)
+ result = self.read_csv(StringIO(data), header=0,
+ dtype={0: np.float64})
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame(columns=['a', 'b'])
+ expected['a'] = expected['a'].astype(np.int32)
+ expected['b'] = expected['b'].astype(np.float64)
+ result = self.read_csv(StringIO(data), header=0,
+ dtype={'a': np.int32, 1: np.float64})
+ tm.assert_frame_equal(result, expected)
+
+ def test_read_nrows_large(self):
+ # gh-7626 - Read only nrows of data in for large inputs (>262144b)
+ header_narrow = '\t'.join(['COL_HEADER_' + str(i)
+ for i in range(10)]) + '\n'
+ data_narrow = '\t'.join(['somedatasomedatasomedata1'
+ for i in range(10)]) + '\n'
+ header_wide = '\t'.join(['COL_HEADER_' + str(i)
+ for i in range(15)]) + '\n'
+ data_wide = '\t'.join(['somedatasomedatasomedata2'
+ for i in range(15)]) + '\n'
+ test_input = (header_narrow + data_narrow * 1050 +
+ header_wide + data_wide * 2)
+
+ df = self.read_csv(StringIO(test_input), sep='\t', nrows=1010)
+
+ self.assertTrue(df.size == 1010 * 10)
diff --git a/pandas/io/tests/parser/common.py b/pandas/io/tests/parser/common.py
index 0219e16391be8..39addbf46314b 100644
--- a/pandas/io/tests/parser/common.py
+++ b/pandas/io/tests/parser/common.py
@@ -606,6 +606,28 @@ def test_multi_index_no_level_names(self):
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
+ def test_multi_index_blank_df(self):
+ # GH 14545
+ data = """a,b
+"""
+ df = self.read_csv(StringIO(data), header=[0])
+ expected = DataFrame(columns=['a', 'b'])
+ tm.assert_frame_equal(df, expected)
+ round_trip = self.read_csv(StringIO(
+ expected.to_csv(index=False)), header=[0])
+ tm.assert_frame_equal(round_trip, expected)
+
+ data_multiline = """a,b
+c,d
+"""
+ df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
+ cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
+ expected2 = DataFrame(columns=cols)
+ tm.assert_frame_equal(df2, expected2)
+ round_trip = self.read_csv(StringIO(
+ expected2.to_csv(index=False)), header=[0, 1])
+ tm.assert_frame_equal(round_trip, expected2)
+
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
@@ -630,10 +652,10 @@ def test_read_csv_parse_simple_list(self):
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
- 'pandas/io/tests/parser/data/salary.table.csv')
+ 'pandas/io/tests/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
- localtable = os.path.join(dirpath, 'salary.table.csv')
+ localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@@ -641,7 +663,7 @@ def test_url(self):
@tm.slow
def test_file(self):
dirpath = tm.get_data_path()
- localtable = os.path.join(dirpath, 'salary.table.csv')
+ localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
@@ -836,7 +858,7 @@ def test_integer_overflow_bug(self):
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
- result = self.read_csv(StringIO(data), header=None, sep='\s+')
+ result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
@@ -852,7 +874,7 @@ def test_catch_too_many_names(self):
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
- result = self.read_table(StringIO(data), sep='\s+')
+ result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
@@ -1052,7 +1074,7 @@ def test_uneven_lines_with_usecols(self):
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
- msg = "Expected \d+ fields in line \d+, saw \d+"
+ msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assertRaisesRegexp(ValueError, msg):
df = self.read_csv(StringIO(csv))
@@ -1122,7 +1144,7 @@ def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assertRaisesRegexp(ValueError, 'you can only specify one'):
- self.read_table(StringIO(data), sep='\s', delim_whitespace=True)
+ self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
@@ -1157,7 +1179,7 @@ def test_empty_lines(self):
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
- df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
+ df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
@@ -1189,14 +1211,14 @@ def test_regex_separator(self):
b 1 2 3 4
c 1 2 3 4
"""
- df = self.read_table(StringIO(data), sep='\s+')
+ df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
- result = self.read_table(StringIO(data), sep='\s+')
+ result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@@ -1580,7 +1602,7 @@ def test_temporary_file(self):
new_file.flush()
new_file.seek(0)
- result = self.read_csv(new_file, sep='\s+', header=None)
+ result = self.read_csv(new_file, sep=r'\s+', header=None)
new_file.close()
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
@@ -1602,3 +1624,26 @@ def test_internal_eof_byte(self):
expected = pd.DataFrame([["1\x1a", 2]], columns=['a', 'b'])
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
+
+ def test_file_handles(self):
+ # GH 14418 - don't close user provided file handles
+
+ fh = StringIO('a,b\n1,2')
+ self.read_csv(fh)
+ self.assertFalse(fh.closed)
+
+ with open(self.csv1, 'r') as f:
+ self.read_csv(f)
+ self.assertFalse(f.closed)
+
+ # mmap not working with python engine
+ if self.engine != 'python':
+
+ import mmap
+ with open(self.csv1, 'r') as f:
+ m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
+ self.read_csv(m)
+ # closed attribute new in python 3.2
+ if PY3:
+ self.assertFalse(m.closed)
+ m.close()
diff --git a/pandas/io/tests/parser/data/salary.table.csv b/pandas/io/tests/parser/data/salaries.csv
similarity index 100%
rename from pandas/io/tests/parser/data/salary.table.csv
rename to pandas/io/tests/parser/data/salaries.csv
diff --git a/pandas/io/tests/parser/data/salaries.csv.bz2 b/pandas/io/tests/parser/data/salaries.csv.bz2
new file mode 100644
index 0000000000000..a68b4e62bf34a
Binary files /dev/null and b/pandas/io/tests/parser/data/salaries.csv.bz2 differ
diff --git a/pandas/io/tests/parser/data/salary.table.gz b/pandas/io/tests/parser/data/salaries.csv.gz
similarity index 100%
rename from pandas/io/tests/parser/data/salary.table.gz
rename to pandas/io/tests/parser/data/salaries.csv.gz
diff --git a/pandas/io/tests/parser/data/salaries.csv.xz b/pandas/io/tests/parser/data/salaries.csv.xz
new file mode 100644
index 0000000000000..40df8e8f936dc
Binary files /dev/null and b/pandas/io/tests/parser/data/salaries.csv.xz differ
diff --git a/pandas/io/tests/parser/data/salaries.csv.zip b/pandas/io/tests/parser/data/salaries.csv.zip
new file mode 100644
index 0000000000000..294f65b36771d
Binary files /dev/null and b/pandas/io/tests/parser/data/salaries.csv.zip differ
diff --git a/pandas/io/tests/parser/header.py b/pandas/io/tests/parser/header.py
index 33a4d71fc03b6..dc6d2ad1daa47 100644
--- a/pandas/io/tests/parser/header.py
+++ b/pandas/io/tests/parser/header.py
@@ -15,7 +15,7 @@
class HeaderTests(object):
def test_read_with_bad_header(self):
- errmsg = "but only \d+ lines in file"
+ errmsg = r"but only \d+ lines in file"
with tm.assertRaisesRegexp(ValueError, errmsg):
s = StringIO(',,')
diff --git a/pandas/io/tests/parser/na_values.py b/pandas/io/tests/parser/na_values.py
index 92107cf2e82a7..e245bc5589145 100644
--- a/pandas/io/tests/parser/na_values.py
+++ b/pandas/io/tests/parser/na_values.py
@@ -266,3 +266,26 @@ def test_na_values_scalar(self):
out = self.read_csv(StringIO(data), names=names,
na_values={'a': 2, 'b': 1})
tm.assert_frame_equal(out, expected)
+
+ def test_na_values_dict_aliasing(self):
+ na_values = {'a': 2, 'b': 1}
+ na_values_copy = na_values.copy()
+
+ names = ['a', 'b']
+ data = '1,2\n2,1'
+
+ expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
+ out = self.read_csv(StringIO(data), names=names, na_values=na_values)
+
+ tm.assert_frame_equal(out, expected)
+ tm.assert_dict_equal(na_values, na_values_copy)
+
+ def test_na_values_dict_col_index(self):
+ # see gh-14203
+
+ data = 'a\nfoo\n1'
+ na_values = {0: 'foo'}
+
+ out = self.read_csv(StringIO(data), na_values=na_values)
+ expected = DataFrame({'a': [np.nan, 1]})
+ tm.assert_frame_equal(out, expected)
diff --git a/pandas/io/tests/parser/python_parser_only.py b/pandas/io/tests/parser/python_parser_only.py
index 3214aa39358e8..ad62aaa275127 100644
--- a/pandas/io/tests/parser/python_parser_only.py
+++ b/pandas/io/tests/parser/python_parser_only.py
@@ -7,6 +7,7 @@
arguments when parsing.
"""
+import csv
import sys
import nose
@@ -162,7 +163,7 @@ def test_read_table_buglet_4x_multiindex(self):
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
- df = self.read_table(StringIO(text), sep='\s+')
+ df = self.read_table(StringIO(text), sep=r'\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# see gh-6893
@@ -170,7 +171,7 @@ def test_read_table_buglet_4x_multiindex(self):
expected = DataFrame.from_records(
[(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
- actual = self.read_table(StringIO(data), sep='\s+')
+ actual = self.read_table(StringIO(data), sep=r'\s+')
tm.assert_frame_equal(actual, expected)
def test_skipfooter_with_decimal(self):
@@ -204,3 +205,34 @@ def test_encoding_non_utf8_multichar_sep(self):
sep=sep, names=['a', 'b'],
encoding=encoding)
tm.assert_frame_equal(result, expected)
+
+ def test_multi_char_sep_quotes(self):
+ # see gh-13374
+
+ data = 'a,,b\n1,,a\n2,,"2,,b"'
+ msg = 'ignored when a multi-char delimiter is used'
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(StringIO(data), sep=',,')
+
+ # We expect no match, so there should be an assertion
+ # error out of the inner context manager.
+ with tm.assertRaises(AssertionError):
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(StringIO(data), sep=',,',
+ quoting=csv.QUOTE_NONE)
+
+ def test_skipfooter_bad_row(self):
+ # see gh-13879
+
+ data = 'a,b,c\ncat,foo,bar\ndog,foo,"baz'
+ msg = 'parsing errors in the skipped footer rows'
+
+ with tm.assertRaisesRegexp(csv.Error, msg):
+ self.read_csv(StringIO(data), skipfooter=1)
+
+ # We expect no match, so there should be an assertion
+ # error out of the inner context manager.
+ with tm.assertRaises(AssertionError):
+ with tm.assertRaisesRegexp(csv.Error, msg):
+ self.read_csv(StringIO(data))
diff --git a/pandas/io/tests/parser/quoting.py b/pandas/io/tests/parser/quoting.py
index d0f1493be0621..765cec8243a0a 100644
--- a/pandas/io/tests/parser/quoting.py
+++ b/pandas/io/tests/parser/quoting.py
@@ -9,7 +9,7 @@
import pandas.util.testing as tm
from pandas import DataFrame
-from pandas.compat import StringIO
+from pandas.compat import PY3, StringIO, u
class QuotingTests(object):
@@ -138,3 +138,16 @@ def test_double_quote(self):
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=False)
tm.assert_frame_equal(result, expected)
+
+ def test_quotechar_unicode(self):
+ # See gh-14477
+ data = 'a\n1'
+ expected = DataFrame({'a': [1]})
+
+ result = self.read_csv(StringIO(data), quotechar=u('"'))
+ tm.assert_frame_equal(result, expected)
+
+ # Compared to Python 3.x, Python 2.x does not handle unicode well.
+ if PY3:
+ result = self.read_csv(StringIO(data), quotechar=u('\u0394'))
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/io/tests/parser/skiprows.py b/pandas/io/tests/parser/skiprows.py
index c9f50dec6c01e..9f01adb6fabcb 100644
--- a/pandas/io/tests/parser/skiprows.py
+++ b/pandas/io/tests/parser/skiprows.py
@@ -190,3 +190,11 @@ def test_skiprows_lineterminator(self):
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
+
+ def test_skiprows_infield_quote(self):
+ # see gh-14459
+ data = 'a"\nb"\na\n1'
+ expected = DataFrame({'a': [1]})
+
+ df = self.read_csv(StringIO(data), skiprows=2)
+ tm.assert_frame_equal(df, expected)
diff --git a/pandas/io/tests/parser/test_network.py b/pandas/io/tests/parser/test_network.py
index 7e2f039853e2f..9b02096dd0f26 100644
--- a/pandas/io/tests/parser/test_network.py
+++ b/pandas/io/tests/parser/test_network.py
@@ -18,13 +18,13 @@ class TestUrlGz(tm.TestCase):
def setUp(self):
dirpath = tm.get_data_path()
- localtable = os.path.join(dirpath, 'salary.table.csv')
+ localtable = os.path.join(dirpath, 'salaries.csv')
self.local_table = read_table(localtable)
@tm.network
def test_url_gz(self):
url = ('https://raw.github.com/pandas-dev/pandas/'
- 'master/pandas/io/tests/parser/data/salary.table.gz')
+ 'master/pandas/io/tests/parser/data/salaries.csv.gz')
url_table = read_table(url, compression="gzip", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
diff --git a/pandas/io/tests/parser/test_unsupported.py b/pandas/io/tests/parser/test_unsupported.py
index ef8f7967193ff..2fc238acd54e3 100644
--- a/pandas/io/tests/parser/test_unsupported.py
+++ b/pandas/io/tests/parser/test_unsupported.py
@@ -50,7 +50,7 @@ def test_c_engine(self):
read_table(StringIO(data), sep=None,
delim_whitespace=False, dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, msg):
- read_table(StringIO(data), sep='\s', dtype={'a': float})
+ read_table(StringIO(data), sep=r'\s', dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, msg):
read_table(StringIO(data), skipfooter=1, dtype={'a': float})
@@ -59,7 +59,7 @@ def test_c_engine(self):
read_table(StringIO(data), engine='c',
sep=None, delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, msg):
- read_table(StringIO(data), engine='c', sep='\s')
+ read_table(StringIO(data), engine='c', sep=r'\s')
with tm.assertRaisesRegexp(ValueError, msg):
read_table(StringIO(data), engine='c', skipfooter=1)
@@ -67,7 +67,7 @@ def test_c_engine(self):
with tm.assert_produces_warning(parsers.ParserWarning):
read_table(StringIO(data), sep=None, delim_whitespace=False)
with tm.assert_produces_warning(parsers.ParserWarning):
- read_table(StringIO(data), sep='\s')
+ read_table(StringIO(data), sep=r'\s')
with tm.assert_produces_warning(parsers.ParserWarning):
read_table(StringIO(data), skipfooter=1)
@@ -79,9 +79,9 @@ def test_c_engine(self):
msg = 'Error tokenizing data'
with tm.assertRaisesRegexp(CParserError, msg):
- read_table(StringIO(text), sep='\s+')
+ read_table(StringIO(text), sep=r'\s+')
with tm.assertRaisesRegexp(CParserError, msg):
- read_table(StringIO(text), engine='c', sep='\s+')
+ read_table(StringIO(text), engine='c', sep=r'\s+')
msg = "Only length-1 thousands markers supported"
data = """A|B|C
diff --git a/pandas/io/tests/parser/usecols.py b/pandas/io/tests/parser/usecols.py
index 16a19c50be960..5051171ccb8f0 100644
--- a/pandas/io/tests/parser/usecols.py
+++ b/pandas/io/tests/parser/usecols.py
@@ -139,7 +139,7 @@ def test_usecols_regex_sep(self):
# see gh-2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
- df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
+ df = self.read_csv(StringIO(data), sep=r'\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
diff --git a/pandas/io/tests/sas/test_sas7bdat.py b/pandas/io/tests/sas/test_sas7bdat.py
index 06eb9774679b1..e20ea48247119 100644
--- a/pandas/io/tests/sas/test_sas7bdat.py
+++ b/pandas/io/tests/sas/test_sas7bdat.py
@@ -47,7 +47,9 @@ def test_from_buffer(self):
with open(fname, 'rb') as f:
byts = f.read()
buf = io.BytesIO(byts)
- df = pd.read_sas(buf, format="sas7bdat", encoding='utf-8')
+ rdr = pd.read_sas(buf, format="sas7bdat",
+ iterator=True, encoding='utf-8')
+ df = rdr.read()
tm.assert_frame_equal(df, df0, check_exact=False)
def test_from_iterator(self):
@@ -55,16 +57,35 @@ def test_from_iterator(self):
df0 = self.data[j]
for k in self.test_ix[j]:
fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k)
- with open(fname, 'rb') as f:
- byts = f.read()
- buf = io.BytesIO(byts)
- rdr = pd.read_sas(buf, format="sas7bdat",
- iterator=True, encoding='utf-8')
+ rdr = pd.read_sas(fname, iterator=True, encoding='utf-8')
df = rdr.read(2)
tm.assert_frame_equal(df, df0.iloc[0:2, :])
df = rdr.read(3)
tm.assert_frame_equal(df, df0.iloc[2:5, :])
+ def test_iterator_loop(self):
+ # github #13654
+ for j in 0, 1:
+ for k in self.test_ix[j]:
+ for chunksize in 3, 5, 10, 11:
+ fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k)
+ rdr = pd.read_sas(fname, chunksize=10, encoding='utf-8')
+ y = 0
+ for x in rdr:
+ y += x.shape[0]
+ self.assertTrue(y == rdr.row_count)
+
+ def test_iterator_read_too_much(self):
+ # github #14734
+ k = self.test_ix[0][0]
+ fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k)
+ rdr = pd.read_sas(fname, format="sas7bdat",
+ iterator=True, encoding='utf-8')
+ d1 = rdr.read(rdr.row_count + 20)
+ rdr = pd.read_sas(fname, iterator=True, encoding="utf-8")
+ d2 = rdr.read(rdr.row_count + 20)
+ tm.assert_frame_equal(d1, d2)
+
def test_encoding_options():
dirpath = tm.get_data_path()
diff --git a/pandas/io/tests/sas/test_xport.py b/pandas/io/tests/sas/test_xport.py
index d0627a80f9604..fe2f7cb4bf4be 100644
--- a/pandas/io/tests/sas/test_xport.py
+++ b/pandas/io/tests/sas/test_xport.py
@@ -35,6 +35,13 @@ def test1_basic(self):
# Read full file
data = read_sas(self.file01, format="xport")
tm.assert_frame_equal(data, data_csv)
+ num_rows = data.shape[0]
+
+ # Test reading beyond end of file
+ reader = read_sas(self.file01, format="xport", iterator=True)
+ data = reader.read(num_rows + 100)
+ self.assertTrue(data.shape[0] == num_rows)
+ reader.close()
# Test incremental read with `read` method.
reader = read_sas(self.file01, format="xport", iterator=True)
@@ -48,6 +55,14 @@ def test1_basic(self):
reader.close()
tm.assert_frame_equal(data, data_csv.iloc[0:10, :])
+ # Test read in loop
+ m = 0
+ reader = read_sas(self.file01, format="xport", chunksize=100)
+ for x in reader:
+ m += x.shape[0]
+ reader.close()
+ self.assertTrue(m == num_rows)
+
# Read full file with `read_sas` method
data = read_sas(self.file01)
tm.assert_frame_equal(data, data_csv)
diff --git a/pandas/io/tests/test_clipboard.py b/pandas/io/tests/test_clipboard.py
index a7da27a2f75dd..e1f1e5340251e 100644
--- a/pandas/io/tests/test_clipboard.py
+++ b/pandas/io/tests/test_clipboard.py
@@ -9,16 +9,16 @@
from pandas import read_clipboard
from pandas import get_option
from pandas.util import testing as tm
-from pandas.util.testing import makeCustomDataframe as mkdf, disabled
+from pandas.util.testing import makeCustomDataframe as mkdf
+from pandas.util.clipboard.exceptions import PyperclipException
try:
- import pandas.util.clipboard # noqa
-except OSError:
- raise nose.SkipTest("no clipboard found")
+ DataFrame({'A': [1, 2]}).to_clipboard()
+except PyperclipException:
+ raise nose.SkipTest("clipboard primitives not installed")
-@disabled
class TestClipboard(tm.TestCase):
@classmethod
@@ -52,6 +52,9 @@ def setUpClass(cls):
# Test for non-ascii text: GH9263
cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(),
'es': 'en español'.split()})
+ # unicode round trip test for GH 13747, GH 12529
+ cls.data['utf8'] = pd.DataFrame({'a': ['µasd', 'Ωœ∑´'],
+ 'b': ['øπ∆˚¬', 'œ∑´®']})
cls.data_types = list(cls.data.keys())
@classmethod
@@ -59,13 +62,14 @@ def tearDownClass(cls):
super(TestClipboard, cls).tearDownClass()
del cls.data_types, cls.data
- def check_round_trip_frame(self, data_type, excel=None, sep=None):
+ def check_round_trip_frame(self, data_type, excel=None, sep=None,
+ encoding=None):
data = self.data[data_type]
- data.to_clipboard(excel=excel, sep=sep)
+ data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
if sep is not None:
- result = read_clipboard(sep=sep, index_col=0)
+ result = read_clipboard(sep=sep, index_col=0, encoding=encoding)
else:
- result = read_clipboard()
+ result = read_clipboard(encoding=encoding)
tm.assert_frame_equal(data, result, check_dtype=False)
def test_round_trip_frame_sep(self):
@@ -113,3 +117,16 @@ def test_read_clipboard_infer_excel(self):
exp = pd.read_clipboard()
tm.assert_frame_equal(res, exp)
+
+ def test_invalid_encoding(self):
+ # test case for testing invalid encoding
+ data = self.data['string']
+ with tm.assertRaises(ValueError):
+ data.to_clipboard(encoding='ascii')
+ with tm.assertRaises(NotImplementedError):
+ pd.read_clipboard(encoding='ascii')
+
+ def test_round_trip_valid_encodings(self):
+ for enc in ['UTF-8', 'utf-8', 'utf8']:
+ for dt in self.data_types:
+ self.check_round_trip_frame(dt, encoding=enc)
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 998e71076b7c0..056b7a5322e26 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -1801,8 +1801,8 @@ def wrapped(self, *args, **kwargs):
if openpyxl_compat.is_compat(major_ver=major_ver):
orig_method(self, *args, **kwargs)
else:
- msg = ('Installed openpyxl is not supported at this '
- 'time\. Use.+')
+ msg = (r'Installed openpyxl is not supported at this '
+ r'time\. Use.+')
with tm.assertRaisesRegexp(ValueError, msg):
orig_method(self, *args, **kwargs)
return wrapped
diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py
index cca1580b84195..28820fd71af27 100644
--- a/pandas/io/tests/test_gbq.py
+++ b/pandas/io/tests/test_gbq.py
@@ -824,6 +824,9 @@ def test_upload_data_if_table_exists_append(self):
private_key=_get_private_key_path())
def test_upload_data_if_table_exists_replace(self):
+
+ raise nose.SkipTest("buggy test")
+
destination_table = DESTINATION_TABLE + "4"
test_size = 10
@@ -1121,6 +1124,9 @@ def tearDown(self):
pass
def test_upload_data_as_service_account_with_key_contents(self):
+ raise nose.SkipTest(
+ "flaky test")
+
destination_table = DESTINATION_TABLE + "12"
test_size = 10
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index 7b4e775db9476..c202c60f5213d 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -354,7 +354,7 @@ def test_regex_idempotency(self):
def test_negative_skiprows(self):
with tm.assertRaisesRegexp(ValueError,
- '\(you passed a negative value\)'):
+ r'\(you passed a negative value\)'):
self.read_html(self.spam_data, 'Water', skiprows=-1)
@network
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 213bc53e3aab4..79b6dc51009cf 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -987,7 +987,7 @@ def test_latin_encoding(self):
if compat.PY2:
self.assertRaisesRegexp(
- TypeError, '\[unicode\] is not implemented as a table column')
+ TypeError, r'\[unicode\] is not implemented as a table column')
return
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
@@ -1362,6 +1362,32 @@ def check_col(key, name, size):
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(ValueError, store.append, 'df_new', df_new)
+ # min_itemsize on Series index (GH 11412)
+ df = tm.makeMixedDataFrame().set_index('C')
+ store.append('ss', df['B'], min_itemsize={'index': 4})
+ tm.assert_series_equal(store.select('ss'), df['B'])
+
+ # same as above, with data_columns=True
+ store.append('ss2', df['B'], data_columns=True,
+ min_itemsize={'index': 4})
+ tm.assert_series_equal(store.select('ss2'), df['B'])
+
+ # min_itemsize in index without appending (GH 10381)
+ store.put('ss3', df, format='table',
+ min_itemsize={'index': 6})
+ # just make sure there is a longer string:
+ df2 = df.copy().reset_index().assign(C='longer').set_index('C')
+ store.append('ss3', df2)
+ tm.assert_frame_equal(store.select('ss3'),
+ pd.concat([df, df2]))
+
+ # same as above, with a Series
+ store.put('ss4', df['B'], format='table',
+ min_itemsize={'index': 6})
+ store.append('ss4', df2['B'])
+ tm.assert_series_equal(store.select('ss4'),
+ pd.concat([df['B'], df2['B']]))
+
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
@@ -1416,6 +1442,26 @@ def check_col(key, name, size):
self.assertRaises(ValueError, store.append, 'df',
df, min_itemsize={'foo': 20, 'foobar': 20})
+ def test_to_hdf_with_min_itemsize(self):
+
+ with ensure_clean_path(self.path) as path:
+
+ # min_itemsize in index with to_hdf (GH 10381)
+ df = tm.makeMixedDataFrame().set_index('C')
+ df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6})
+ # just make sure there is a longer string:
+ df2 = df.copy().reset_index().assign(C='longer').set_index('C')
+ df2.to_hdf(path, 'ss3', append=True, format='table')
+ tm.assert_frame_equal(pd.read_hdf(path, 'ss3'),
+ pd.concat([df, df2]))
+
+ # same as above, with a Series
+ df['B'].to_hdf(path, 'ss4', format='table',
+ min_itemsize={'index': 6})
+ df2['B'].to_hdf(path, 'ss4', append=True, format='table')
+ tm.assert_series_equal(pd.read_hdf(path, 'ss4'),
+ pd.concat([df['B'], df2['B']]))
+
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
@@ -1818,6 +1864,19 @@ def test_select_columns_in_where(self):
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"), s)
+ def test_mi_data_columns(self):
+ # GH 14435
+ idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5),
+ range(5)], names=['date', 'id'])
+ df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
+
+ with ensure_clean_store(self.path) as store:
+ store.append('df', df, data_columns=True)
+
+ actual = store.select('df', where='id == 1')
+ expected = df.iloc[[1], :]
+ tm.assert_frame_equal(actual, expected)
+
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index af8989baabbc0..cb08944e8dc57 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -36,7 +36,7 @@
from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
-from pandas.compat import StringIO, range, lrange, string_types
+from pandas.compat import StringIO, range, lrange, string_types, PY36
from pandas.tseries.tools import format as date_format
import pandas.io.sql as sql
@@ -944,7 +944,7 @@ def test_sqlalchemy_type_mapping(self):
self.assertTrue(isinstance(
table.table.c['time'].type, sqltypes.DateTime))
- def test_to_sql_read_sql_with_database_uri(self):
+ def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
@@ -963,6 +963,12 @@ def test_to_sql_read_sql_with_database_uri(self):
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
+ # using driver that will not be installed on Travis to trigger error
+ # in sqlalchemy.create_engine -> test passing of this error to user
+ db_uri = "postgresql+pg8000://user:pass@host/dbname"
+ with tm.assertRaisesRegexp(ImportError, "pg8000"):
+ sql.read_sql("select * from table", db_uri)
+
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
@@ -1995,6 +2001,8 @@ def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
+ if PY36:
+ raise nose.SkipTest("not working on python > 3.5")
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 1849b32a4a7c8..cd972868a6e32 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -11,8 +11,6 @@
import nose
import numpy as np
-from pandas.tslib import NaT
-
import pandas as pd
import pandas.util.testing as tm
from pandas import compat
@@ -21,6 +19,7 @@
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
+from pandas.tslib import NaT
from pandas.types.common import is_categorical_dtype
@@ -1234,6 +1233,52 @@ def test_stata_111(self):
original = original[['y', 'x', 'w', 'z']]
tm.assert_frame_equal(original, df)
+ def test_out_of_range_double(self):
+ # GH 14618
+ df = DataFrame({'ColumnOk': [0.0,
+ np.finfo(np.double).eps,
+ 4.49423283715579e+307],
+ 'ColumnTooBig': [0.0,
+ np.finfo(np.double).eps,
+ np.finfo(np.double).max]})
+ with tm.assertRaises(ValueError) as cm:
+ with tm.ensure_clean() as path:
+ df.to_stata(path)
+ tm.assertTrue('ColumnTooBig' in cm.exception)
+
+ df.loc[2, 'ColumnTooBig'] = np.inf
+ with tm.assertRaises(ValueError) as cm:
+ with tm.ensure_clean() as path:
+ df.to_stata(path)
+ tm.assertTrue('ColumnTooBig' in cm.exception)
+ tm.assertTrue('infinity' in cm.exception)
+
+ def test_out_of_range_float(self):
+ original = DataFrame({'ColumnOk': [0.0,
+ np.finfo(np.float32).eps,
+ np.finfo(np.float32).max / 10.0],
+ 'ColumnTooBig': [0.0,
+ np.finfo(np.float32).eps,
+ np.finfo(np.float32).max]})
+ original.index.name = 'index'
+ for col in original:
+ original[col] = original[col].astype(np.float32)
+
+ with tm.ensure_clean() as path:
+ original.to_stata(path)
+ reread = read_stata(path)
+ original['ColumnTooBig'] = original['ColumnTooBig'].astype(
+ np.float64)
+ tm.assert_frame_equal(original,
+ reread.set_index('index'))
+
+ original.loc[2, 'ColumnTooBig'] = np.inf
+ with tm.assertRaises(ValueError) as cm:
+ with tm.ensure_clean() as path:
+ original.to_stata(path)
+ tm.assertTrue('ColumnTooBig' in cm.exception)
+ tm.assertTrue('infinity' in cm.exception)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index e7672de5c835e..b09a1c2755a06 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -65,13 +65,8 @@ cdef int64_t NPY_NAT = util.get_nat()
ctypedef unsigned char UChar
cimport util
-from util cimport is_array, _checknull, _checknan
-
-cdef extern from "headers/stdint.h":
- enum: UINT8_MAX
- enum: INT64_MAX
- enum: INT64_MIN
-
+from util cimport (is_array, _checknull, _checknan, INT64_MAX,
+ INT64_MIN, UINT8_MAX)
cdef extern from "math.h":
double sqrt(double x)
@@ -980,7 +975,9 @@ def astype_intsafe(ndarray[object] arr, new_dtype):
if is_datelike and checknull(v):
result[i] = NPY_NAT
else:
- util.set_value_at(result, i, v)
+ # we can use the unsafe version because we know `result` is mutable
+ # since it was created from `np.empty`
+ util.set_value_at_unsafe(result, i, v)
return result
@@ -991,7 +988,9 @@ cpdef ndarray[object] astype_unicode(ndarray arr):
ndarray[object] result = np.empty(n, dtype=object)
for i in range(n):
- util.set_value_at(result, i, unicode(arr[i]))
+ # we can use the unsafe version because we know `result` is mutable
+ # since it was created from `np.empty`
+ util.set_value_at_unsafe(result, i, unicode(arr[i]))
return result
@@ -1002,7 +1001,9 @@ cpdef ndarray[object] astype_str(ndarray arr):
ndarray[object] result = np.empty(n, dtype=object)
for i in range(n):
- util.set_value_at(result, i, str(arr[i]))
+ # we can use the unsafe version because we know `result` is mutable
+ # since it was created from `np.empty`
+ util.set_value_at_unsafe(result, i, str(arr[i]))
return result
@@ -1087,6 +1088,44 @@ def string_array_replace_from_nan_rep(
return arr
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def convert_json_to_lines(object arr):
+ """
+ replace comma separated json with line feeds, paying special attention
+ to quotes & brackets
+ """
+ cdef:
+ Py_ssize_t i = 0, num_open_brackets_seen = 0, in_quotes = 0, length
+ ndarray[uint8_t] narr
+ unsigned char v, comma, left_bracket, right_brack, newline
+
+ newline = ord('\n')
+ comma = ord(',')
+ left_bracket = ord('{')
+ right_bracket = ord('}')
+ quote = ord('"')
+ backslash = ord('\\')
+
+ narr = np.frombuffer(arr.encode('utf-8'), dtype='u1').copy()
+ length = narr.shape[0]
+ for i in range(length):
+ v = narr[i]
+ if v == quote and i > 0 and narr[i - 1] != backslash:
+ in_quotes = ~in_quotes
+ if v == comma: # commas that should be \n
+ if num_open_brackets_seen == 0 and not in_quotes:
+ narr[i] = newline
+ elif v == left_bracket:
+ if not in_quotes:
+ num_open_brackets_seen += 1
+ elif v == right_bracket:
+ if not in_quotes:
+ num_open_brackets_seen -= 1
+
+ return narr.tostring().decode('utf-8')
+
+
@cython.boundscheck(False)
@cython.wraparound(False)
def write_csv_rows(list data, ndarray data_index,
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 12525c7a9c587..286b58083dd4f 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -272,7 +272,7 @@ cdef class TextReader:
parser_t *parser
object file_handle, na_fvalues
object true_values, false_values
- object dsource
+ object handle
bint na_filter, verbose, has_usecols, has_mi_columns
int parser_start
list clocks
@@ -554,9 +554,9 @@ cdef class TextReader:
def close(self):
# we need to properly close an open derived
# filehandle here, e.g. and UTFRecoder
- if self.dsource is not None:
+ if self.handle is not None:
try:
- self.dsource.close()
+ self.handle.close()
except:
pass
@@ -570,7 +570,8 @@ cdef class TextReader:
if not QUOTE_MINIMAL <= quoting <= QUOTE_NONE:
raise TypeError('bad "quoting" value')
- if not isinstance(quote_char, (str, bytes)) and quote_char is not None:
+ if not isinstance(quote_char, (str, compat.text_type,
+ bytes)) and quote_char is not None:
dtype = type(quote_char).__name__
raise TypeError('"quotechar" must be string, '
'not {dtype}'.format(dtype=dtype))
@@ -640,6 +641,7 @@ cdef class TextReader:
else:
raise ValueError('Unrecognized compression type: %s' %
self.compression)
+ self.handle = source
if isinstance(source, basestring):
if not isinstance(source, bytes):
@@ -683,8 +685,6 @@ cdef class TextReader:
raise IOError('Expected file path name or file-like object,'
' got %s type' % type(source))
- self.dsource = source
-
cdef _get_header(self):
# header is now a list of lists, so field_count should use header[0]
@@ -714,7 +714,9 @@ cdef class TextReader:
start = self.parser.line_start[0]
# e.g., if header=3 and file only has 2 lines
- elif self.parser.lines < hr + 1:
+ elif (self.parser.lines < hr + 1
+ and not isinstance(self.orig_header, list)) or (
+ self.parser.lines < hr):
msg = self.orig_header
if isinstance(msg, list):
msg = "[%s], len of %d," % (
@@ -937,7 +939,7 @@ cdef class TextReader:
raise_parser_error('Error tokenizing data', self.parser)
footer = self.skipfooter
- if self.parser_start == self.parser.lines:
+ if self.parser_start >= self.parser.lines:
raise StopIteration
self._end_clock('Tokenization')
@@ -1241,19 +1243,23 @@ cdef class TextReader:
return None, set()
if isinstance(self.na_values, dict):
+ key = None
values = None
+
if name is not None and name in self.na_values:
- values = self.na_values[name]
- if values is not None and not isinstance(values, list):
- values = list(values)
- fvalues = self.na_fvalues[name]
- if fvalues is not None and not isinstance(fvalues, set):
- fvalues = set(fvalues)
- else:
- if i in self.na_values:
- return self.na_values[i], self.na_fvalues[i]
- else:
- return _NA_VALUES, set()
+ key = name
+ elif i in self.na_values:
+ key = i
+ else: # No na_values provided for this column.
+ return _NA_VALUES, set()
+
+ values = self.na_values[key]
+ if values is not None and not isinstance(values, list):
+ values = list(values)
+
+ fvalues = self.na_fvalues[key]
+ if fvalues is not None and not isinstance(fvalues, set):
+ fvalues = set(fvalues)
return _ensure_encoded(values), fvalues
else:
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index 8eeff045d1fac..56020e32b9963 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -302,7 +302,21 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
- def _sanitize_column(self, key, value):
+ def _sanitize_column(self, key, value, **kwargs):
+ """
+ Creates a new SparseArray from the input value.
+
+ Parameters
+ ----------
+ key : object
+ value : scalar, Series, or array-like
+ kwargs : dict
+
+ Returns
+ -------
+ sanitized_column : SparseArray
+
+ """
sp_maker = lambda x, index=None: SparseArray(
x, index=index, fill_value=self._default_fill_value,
kind=self._default_kind)
diff --git a/pandas/sparse/tests/test_array.py b/pandas/sparse/tests/test_array.py
index dd86e9e791e5e..2b284ac631d3f 100644
--- a/pandas/sparse/tests/test_array.py
+++ b/pandas/sparse/tests/test_array.py
@@ -182,7 +182,7 @@ def test_bad_take(self):
self.assertRaises(IndexError, lambda: self.arr.take(-11))
def test_take_invalid_kwargs(self):
- msg = "take\(\) got an unexpected keyword argument 'foo'"
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, self.arr.take,
[2, 3], foo=2)
diff --git a/pandas/src/algos_common_helper.pxi b/pandas/src/algos_common_helper.pxi
deleted file mode 100644
index 9dede87e0c15b..0000000000000
--- a/pandas/src/algos_common_helper.pxi
+++ /dev/null
@@ -1,2764 +0,0 @@
-"""
-Template for each `dtype` helper function using 1-d template
-
-# 1-d template
-- map_indices
-- pad
-- pad_1d
-- pad_2d
-- backfill
-- backfill_1d
-- backfill_2d
-- is_monotonic
-- arrmap
-
-WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
-"""
-
-#----------------------------------------------------------------------
-# 1-d template
-#----------------------------------------------------------------------
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cpdef map_indices_float64(ndarray[float64_t] index):
- """
- Produce a dict mapping the values of the input array to their respective
- locations.
-
- Example:
- array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1}
-
- Better to do this with Cython because of the enormous speed boost.
- """
- cdef Py_ssize_t i, length
- cdef dict result = {}
-
- length = len(index)
-
- for i in range(length):
- result[index[i]] = i
-
- return result
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_float64(ndarray[float64_t] old, ndarray[float64_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef float64_t cur, next
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[nright - 1] < old[0]:
- return indexer
-
- i = j = 0
-
- cur = old[0]
-
- while j <= nright - 1 and new[j] < cur:
- j += 1
-
- while True:
- if j == nright:
- break
-
- if i == nleft - 1:
- while j < nright:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] > cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
- break
-
- next = old[i + 1]
-
- while j < nright and cur <= new[j] < next:
- if new[j] == cur:
- indexer[j] = i
- elif fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
-
- fill_count = 0
- i += 1
- cur = next
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_inplace_float64(ndarray[float64_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef float64_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[0]
- for i in range(N):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_2d_inplace_float64(ndarray[float64_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef float64_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, 0]
- for i in range(N):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-"""
-Backfilling logic for generating fill vector
-
-Diagram of what's going on
-
-Old New Fill vector Mask
- . 0 1
- . 0 1
- . 0 1
-A A 0 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
-B B 1 1
- . 2 1
- . 2 1
- . 2 1
-C C 2 1
- . 0
- . 0
-D
-"""
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_float64(ndarray[float64_t] old, ndarray[float64_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef float64_t cur, prev
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]:
- return indexer
-
- i = nleft - 1
- j = nright - 1
-
- cur = old[nleft - 1]
-
- while j >= 0 and new[j] > cur:
- j -= 1
-
- while True:
- if j < 0:
- break
-
- if i == 0:
- while j >= 0:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
- break
-
- prev = old[i - 1]
-
- while j >= 0 and prev < new[j] <= cur:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
-
- fill_count = 0
- i -= 1
- cur = prev
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_inplace_float64(ndarray[float64_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef float64_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[N - 1]
- for i in range(N - 1, -1, -1):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_2d_inplace_float64(ndarray[float64_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef float64_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, N - 1]
- for i in range(N - 1, -1, -1):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def is_monotonic_float64(ndarray[float64_t] arr, bint timelike):
- """
- Returns
- -------
- is_monotonic_inc, is_monotonic_dec, is_unique
- """
- cdef:
- Py_ssize_t i, n
- float64_t prev, cur
- bint is_monotonic_inc = 1
- bint is_monotonic_dec = 1
- bint is_unique = 1
-
- n = len(arr)
-
- if n == 1:
- if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
- # single value is NaN
- return False, False, True
- else:
- return True, True, True
- elif n < 2:
- return True, True, True
-
- if timelike and arr[0] == iNaT:
- return False, False, True
-
- with nogil:
- prev = arr[0]
- for i in range(1, n):
- cur = arr[i]
- if timelike and cur == iNaT:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if cur < prev:
- is_monotonic_inc = 0
- elif cur > prev:
- is_monotonic_dec = 0
- elif cur == prev:
- is_unique = 0
- else:
- # cur or prev is NaN
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if not is_monotonic_inc and not is_monotonic_dec:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- prev = cur
- return is_monotonic_inc, is_monotonic_dec, \
- is_unique and (is_monotonic_inc or is_monotonic_dec)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def arrmap_float64(ndarray[float64_t] index, object func):
- cdef Py_ssize_t length = index.shape[0]
- cdef Py_ssize_t i = 0
-
- cdef ndarray[object] result = np.empty(length, dtype=np.object_)
-
- from pandas.lib import maybe_convert_objects
-
- for i in range(length):
- result[i] = func(index[i])
-
- return maybe_convert_objects(result)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cpdef map_indices_float32(ndarray[float32_t] index):
- """
- Produce a dict mapping the values of the input array to their respective
- locations.
-
- Example:
- array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1}
-
- Better to do this with Cython because of the enormous speed boost.
- """
- cdef Py_ssize_t i, length
- cdef dict result = {}
-
- length = len(index)
-
- for i in range(length):
- result[index[i]] = i
-
- return result
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_float32(ndarray[float32_t] old, ndarray[float32_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef float32_t cur, next
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[nright - 1] < old[0]:
- return indexer
-
- i = j = 0
-
- cur = old[0]
-
- while j <= nright - 1 and new[j] < cur:
- j += 1
-
- while True:
- if j == nright:
- break
-
- if i == nleft - 1:
- while j < nright:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] > cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
- break
-
- next = old[i + 1]
-
- while j < nright and cur <= new[j] < next:
- if new[j] == cur:
- indexer[j] = i
- elif fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
-
- fill_count = 0
- i += 1
- cur = next
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_inplace_float32(ndarray[float32_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef float32_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[0]
- for i in range(N):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_2d_inplace_float32(ndarray[float32_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef float32_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, 0]
- for i in range(N):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-"""
-Backfilling logic for generating fill vector
-
-Diagram of what's going on
-
-Old New Fill vector Mask
- . 0 1
- . 0 1
- . 0 1
-A A 0 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
-B B 1 1
- . 2 1
- . 2 1
- . 2 1
-C C 2 1
- . 0
- . 0
-D
-"""
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_float32(ndarray[float32_t] old, ndarray[float32_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef float32_t cur, prev
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]:
- return indexer
-
- i = nleft - 1
- j = nright - 1
-
- cur = old[nleft - 1]
-
- while j >= 0 and new[j] > cur:
- j -= 1
-
- while True:
- if j < 0:
- break
-
- if i == 0:
- while j >= 0:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
- break
-
- prev = old[i - 1]
-
- while j >= 0 and prev < new[j] <= cur:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
-
- fill_count = 0
- i -= 1
- cur = prev
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_inplace_float32(ndarray[float32_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef float32_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[N - 1]
- for i in range(N - 1, -1, -1):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_2d_inplace_float32(ndarray[float32_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef float32_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, N - 1]
- for i in range(N - 1, -1, -1):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def is_monotonic_float32(ndarray[float32_t] arr, bint timelike):
- """
- Returns
- -------
- is_monotonic_inc, is_monotonic_dec, is_unique
- """
- cdef:
- Py_ssize_t i, n
- float32_t prev, cur
- bint is_monotonic_inc = 1
- bint is_monotonic_dec = 1
- bint is_unique = 1
-
- n = len(arr)
-
- if n == 1:
- if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
- # single value is NaN
- return False, False, True
- else:
- return True, True, True
- elif n < 2:
- return True, True, True
-
- if timelike and arr[0] == iNaT:
- return False, False, True
-
- with nogil:
- prev = arr[0]
- for i in range(1, n):
- cur = arr[i]
- if timelike and cur == iNaT:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if cur < prev:
- is_monotonic_inc = 0
- elif cur > prev:
- is_monotonic_dec = 0
- elif cur == prev:
- is_unique = 0
- else:
- # cur or prev is NaN
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if not is_monotonic_inc and not is_monotonic_dec:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- prev = cur
- return is_monotonic_inc, is_monotonic_dec, \
- is_unique and (is_monotonic_inc or is_monotonic_dec)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def arrmap_float32(ndarray[float32_t] index, object func):
- cdef Py_ssize_t length = index.shape[0]
- cdef Py_ssize_t i = 0
-
- cdef ndarray[object] result = np.empty(length, dtype=np.object_)
-
- from pandas.lib import maybe_convert_objects
-
- for i in range(length):
- result[i] = func(index[i])
-
- return maybe_convert_objects(result)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cpdef map_indices_object(ndarray[object] index):
- """
- Produce a dict mapping the values of the input array to their respective
- locations.
-
- Example:
- array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1}
-
- Better to do this with Cython because of the enormous speed boost.
- """
- cdef Py_ssize_t i, length
- cdef dict result = {}
-
- length = len(index)
-
- for i in range(length):
- result[index[i]] = i
-
- return result
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_object(ndarray[object] old, ndarray[object] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef object cur, next
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[nright - 1] < old[0]:
- return indexer
-
- i = j = 0
-
- cur = old[0]
-
- while j <= nright - 1 and new[j] < cur:
- j += 1
-
- while True:
- if j == nright:
- break
-
- if i == nleft - 1:
- while j < nright:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] > cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
- break
-
- next = old[i + 1]
-
- while j < nright and cur <= new[j] < next:
- if new[j] == cur:
- indexer[j] = i
- elif fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
-
- fill_count = 0
- i += 1
- cur = next
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_inplace_object(ndarray[object] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef object val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[0]
- for i in range(N):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_2d_inplace_object(ndarray[object, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef object val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, 0]
- for i in range(N):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-"""
-Backfilling logic for generating fill vector
-
-Diagram of what's going on
-
-Old New Fill vector Mask
- . 0 1
- . 0 1
- . 0 1
-A A 0 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
-B B 1 1
- . 2 1
- . 2 1
- . 2 1
-C C 2 1
- . 0
- . 0
-D
-"""
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_object(ndarray[object] old, ndarray[object] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef object cur, prev
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]:
- return indexer
-
- i = nleft - 1
- j = nright - 1
-
- cur = old[nleft - 1]
-
- while j >= 0 and new[j] > cur:
- j -= 1
-
- while True:
- if j < 0:
- break
-
- if i == 0:
- while j >= 0:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
- break
-
- prev = old[i - 1]
-
- while j >= 0 and prev < new[j] <= cur:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
-
- fill_count = 0
- i -= 1
- cur = prev
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_inplace_object(ndarray[object] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef object val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[N - 1]
- for i in range(N - 1, -1, -1):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_2d_inplace_object(ndarray[object, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef object val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, N - 1]
- for i in range(N - 1, -1, -1):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def is_monotonic_object(ndarray[object] arr, bint timelike):
- """
- Returns
- -------
- is_monotonic_inc, is_monotonic_dec, is_unique
- """
- cdef:
- Py_ssize_t i, n
- object prev, cur
- bint is_monotonic_inc = 1
- bint is_monotonic_dec = 1
- bint is_unique = 1
-
- n = len(arr)
-
- if n == 1:
- if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
- # single value is NaN
- return False, False, True
- else:
- return True, True, True
- elif n < 2:
- return True, True, True
-
- if timelike and arr[0] == iNaT:
- return False, False, True
-
-
- prev = arr[0]
- for i in range(1, n):
- cur = arr[i]
- if timelike and cur == iNaT:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if cur < prev:
- is_monotonic_inc = 0
- elif cur > prev:
- is_monotonic_dec = 0
- elif cur == prev:
- is_unique = 0
- else:
- # cur or prev is NaN
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if not is_monotonic_inc and not is_monotonic_dec:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- prev = cur
- return is_monotonic_inc, is_monotonic_dec, \
- is_unique and (is_monotonic_inc or is_monotonic_dec)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def arrmap_object(ndarray[object] index, object func):
- cdef Py_ssize_t length = index.shape[0]
- cdef Py_ssize_t i = 0
-
- cdef ndarray[object] result = np.empty(length, dtype=np.object_)
-
- from pandas.lib import maybe_convert_objects
-
- for i in range(length):
- result[i] = func(index[i])
-
- return maybe_convert_objects(result)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cpdef map_indices_int32(ndarray[int32_t] index):
- """
- Produce a dict mapping the values of the input array to their respective
- locations.
-
- Example:
- array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1}
-
- Better to do this with Cython because of the enormous speed boost.
- """
- cdef Py_ssize_t i, length
- cdef dict result = {}
-
- length = len(index)
-
- for i in range(length):
- result[index[i]] = i
-
- return result
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_int32(ndarray[int32_t] old, ndarray[int32_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef int32_t cur, next
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[nright - 1] < old[0]:
- return indexer
-
- i = j = 0
-
- cur = old[0]
-
- while j <= nright - 1 and new[j] < cur:
- j += 1
-
- while True:
- if j == nright:
- break
-
- if i == nleft - 1:
- while j < nright:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] > cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
- break
-
- next = old[i + 1]
-
- while j < nright and cur <= new[j] < next:
- if new[j] == cur:
- indexer[j] = i
- elif fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
-
- fill_count = 0
- i += 1
- cur = next
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_inplace_int32(ndarray[int32_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef int32_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[0]
- for i in range(N):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_2d_inplace_int32(ndarray[int32_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef int32_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, 0]
- for i in range(N):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-"""
-Backfilling logic for generating fill vector
-
-Diagram of what's going on
-
-Old New Fill vector Mask
- . 0 1
- . 0 1
- . 0 1
-A A 0 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
-B B 1 1
- . 2 1
- . 2 1
- . 2 1
-C C 2 1
- . 0
- . 0
-D
-"""
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_int32(ndarray[int32_t] old, ndarray[int32_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef int32_t cur, prev
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]:
- return indexer
-
- i = nleft - 1
- j = nright - 1
-
- cur = old[nleft - 1]
-
- while j >= 0 and new[j] > cur:
- j -= 1
-
- while True:
- if j < 0:
- break
-
- if i == 0:
- while j >= 0:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
- break
-
- prev = old[i - 1]
-
- while j >= 0 and prev < new[j] <= cur:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
-
- fill_count = 0
- i -= 1
- cur = prev
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_inplace_int32(ndarray[int32_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef int32_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[N - 1]
- for i in range(N - 1, -1, -1):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_2d_inplace_int32(ndarray[int32_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef int32_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, N - 1]
- for i in range(N - 1, -1, -1):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def is_monotonic_int32(ndarray[int32_t] arr, bint timelike):
- """
- Returns
- -------
- is_monotonic_inc, is_monotonic_dec, is_unique
- """
- cdef:
- Py_ssize_t i, n
- int32_t prev, cur
- bint is_monotonic_inc = 1
- bint is_monotonic_dec = 1
- bint is_unique = 1
-
- n = len(arr)
-
- if n == 1:
- if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
- # single value is NaN
- return False, False, True
- else:
- return True, True, True
- elif n < 2:
- return True, True, True
-
- if timelike and arr[0] == iNaT:
- return False, False, True
-
- with nogil:
- prev = arr[0]
- for i in range(1, n):
- cur = arr[i]
- if timelike and cur == iNaT:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if cur < prev:
- is_monotonic_inc = 0
- elif cur > prev:
- is_monotonic_dec = 0
- elif cur == prev:
- is_unique = 0
- else:
- # cur or prev is NaN
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if not is_monotonic_inc and not is_monotonic_dec:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- prev = cur
- return is_monotonic_inc, is_monotonic_dec, \
- is_unique and (is_monotonic_inc or is_monotonic_dec)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def arrmap_int32(ndarray[int32_t] index, object func):
- cdef Py_ssize_t length = index.shape[0]
- cdef Py_ssize_t i = 0
-
- cdef ndarray[object] result = np.empty(length, dtype=np.object_)
-
- from pandas.lib import maybe_convert_objects
-
- for i in range(length):
- result[i] = func(index[i])
-
- return maybe_convert_objects(result)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cpdef map_indices_int64(ndarray[int64_t] index):
- """
- Produce a dict mapping the values of the input array to their respective
- locations.
-
- Example:
- array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1}
-
- Better to do this with Cython because of the enormous speed boost.
- """
- cdef Py_ssize_t i, length
- cdef dict result = {}
-
- length = len(index)
-
- for i in range(length):
- result[index[i]] = i
-
- return result
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_int64(ndarray[int64_t] old, ndarray[int64_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef int64_t cur, next
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[nright - 1] < old[0]:
- return indexer
-
- i = j = 0
-
- cur = old[0]
-
- while j <= nright - 1 and new[j] < cur:
- j += 1
-
- while True:
- if j == nright:
- break
-
- if i == nleft - 1:
- while j < nright:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] > cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
- break
-
- next = old[i + 1]
-
- while j < nright and cur <= new[j] < next:
- if new[j] == cur:
- indexer[j] = i
- elif fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
-
- fill_count = 0
- i += 1
- cur = next
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_inplace_int64(ndarray[int64_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef int64_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[0]
- for i in range(N):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_2d_inplace_int64(ndarray[int64_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef int64_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, 0]
- for i in range(N):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-"""
-Backfilling logic for generating fill vector
-
-Diagram of what's going on
-
-Old New Fill vector Mask
- . 0 1
- . 0 1
- . 0 1
-A A 0 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
-B B 1 1
- . 2 1
- . 2 1
- . 2 1
-C C 2 1
- . 0
- . 0
-D
-"""
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_int64(ndarray[int64_t] old, ndarray[int64_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef int64_t cur, prev
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]:
- return indexer
-
- i = nleft - 1
- j = nright - 1
-
- cur = old[nleft - 1]
-
- while j >= 0 and new[j] > cur:
- j -= 1
-
- while True:
- if j < 0:
- break
-
- if i == 0:
- while j >= 0:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
- break
-
- prev = old[i - 1]
-
- while j >= 0 and prev < new[j] <= cur:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
-
- fill_count = 0
- i -= 1
- cur = prev
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_inplace_int64(ndarray[int64_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef int64_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[N - 1]
- for i in range(N - 1, -1, -1):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_2d_inplace_int64(ndarray[int64_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef int64_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, N - 1]
- for i in range(N - 1, -1, -1):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def is_monotonic_int64(ndarray[int64_t] arr, bint timelike):
- """
- Returns
- -------
- is_monotonic_inc, is_monotonic_dec, is_unique
- """
- cdef:
- Py_ssize_t i, n
- int64_t prev, cur
- bint is_monotonic_inc = 1
- bint is_monotonic_dec = 1
- bint is_unique = 1
-
- n = len(arr)
-
- if n == 1:
- if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
- # single value is NaN
- return False, False, True
- else:
- return True, True, True
- elif n < 2:
- return True, True, True
-
- if timelike and arr[0] == iNaT:
- return False, False, True
-
- with nogil:
- prev = arr[0]
- for i in range(1, n):
- cur = arr[i]
- if timelike and cur == iNaT:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if cur < prev:
- is_monotonic_inc = 0
- elif cur > prev:
- is_monotonic_dec = 0
- elif cur == prev:
- is_unique = 0
- else:
- # cur or prev is NaN
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if not is_monotonic_inc and not is_monotonic_dec:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- prev = cur
- return is_monotonic_inc, is_monotonic_dec, \
- is_unique and (is_monotonic_inc or is_monotonic_dec)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def arrmap_int64(ndarray[int64_t] index, object func):
- cdef Py_ssize_t length = index.shape[0]
- cdef Py_ssize_t i = 0
-
- cdef ndarray[object] result = np.empty(length, dtype=np.object_)
-
- from pandas.lib import maybe_convert_objects
-
- for i in range(length):
- result[i] = func(index[i])
-
- return maybe_convert_objects(result)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cpdef map_indices_bool(ndarray[uint8_t] index):
- """
- Produce a dict mapping the values of the input array to their respective
- locations.
-
- Example:
- array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1}
-
- Better to do this with Cython because of the enormous speed boost.
- """
- cdef Py_ssize_t i, length
- cdef dict result = {}
-
- length = len(index)
-
- for i in range(length):
- result[index[i]] = i
-
- return result
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_bool(ndarray[uint8_t] old, ndarray[uint8_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef uint8_t cur, next
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[nright - 1] < old[0]:
- return indexer
-
- i = j = 0
-
- cur = old[0]
-
- while j <= nright - 1 and new[j] < cur:
- j += 1
-
- while True:
- if j == nright:
- break
-
- if i == nleft - 1:
- while j < nright:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] > cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
- break
-
- next = old[i + 1]
-
- while j < nright and cur <= new[j] < next:
- if new[j] == cur:
- indexer[j] = i
- elif fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j += 1
-
- fill_count = 0
- i += 1
- cur = next
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_inplace_bool(ndarray[uint8_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef uint8_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[0]
- for i in range(N):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def pad_2d_inplace_bool(ndarray[uint8_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef uint8_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, 0]
- for i in range(N):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-"""
-Backfilling logic for generating fill vector
-
-Diagram of what's going on
-
-Old New Fill vector Mask
- . 0 1
- . 0 1
- . 0 1
-A A 0 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
- . 1 1
-B B 1 1
- . 2 1
- . 2 1
- . 2 1
-C C 2 1
- . 0
- . 0
-D
-"""
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_bool(ndarray[uint8_t] old, ndarray[uint8_t] new,
- limit=None):
- cdef Py_ssize_t i, j, nleft, nright
- cdef ndarray[int64_t, ndim=1] indexer
- cdef uint8_t cur, prev
- cdef int lim, fill_count = 0
-
- nleft = len(old)
- nright = len(new)
- indexer = np.empty(nright, dtype=np.int64)
- indexer.fill(-1)
-
- if limit is None:
- lim = nright
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]:
- return indexer
-
- i = nleft - 1
- j = nright - 1
-
- cur = old[nleft - 1]
-
- while j >= 0 and new[j] > cur:
- j -= 1
-
- while True:
- if j < 0:
- break
-
- if i == 0:
- while j >= 0:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
- break
-
- prev = old[i - 1]
-
- while j >= 0 and prev < new[j] <= cur:
- if new[j] == cur:
- indexer[j] = i
- elif new[j] < cur and fill_count < lim:
- indexer[j] = i
- fill_count += 1
- j -= 1
-
- fill_count = 0
- i -= 1
- cur = prev
-
- return indexer
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_inplace_bool(ndarray[uint8_t] values,
- ndarray[uint8_t, cast=True] mask,
- limit=None):
- cdef Py_ssize_t i, N
- cdef uint8_t val
- cdef int lim, fill_count = 0
-
- N = len(values)
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- val = values[N - 1]
- for i in range(N - 1, -1, -1):
- if mask[i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[i] = val
- else:
- fill_count = 0
- val = values[i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def backfill_2d_inplace_bool(ndarray[uint8_t, ndim=2] values,
- ndarray[uint8_t, ndim=2] mask,
- limit=None):
- cdef Py_ssize_t i, j, N, K
- cdef uint8_t val
- cdef int lim, fill_count = 0
-
- K, N = (<object> values).shape
-
- # GH 2778
- if N == 0:
- return
-
- if limit is None:
- lim = N
- else:
- if limit < 0:
- raise ValueError('Limit must be non-negative')
- lim = limit
-
- for j in range(K):
- fill_count = 0
- val = values[j, N - 1]
- for i in range(N - 1, -1, -1):
- if mask[j, i]:
- if fill_count >= lim:
- continue
- fill_count += 1
- values[j, i] = val
- else:
- fill_count = 0
- val = values[j, i]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def is_monotonic_bool(ndarray[uint8_t] arr, bint timelike):
- """
- Returns
- -------
- is_monotonic_inc, is_monotonic_dec, is_unique
- """
- cdef:
- Py_ssize_t i, n
- uint8_t prev, cur
- bint is_monotonic_inc = 1
- bint is_monotonic_dec = 1
- bint is_unique = 1
-
- n = len(arr)
-
- if n == 1:
- if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
- # single value is NaN
- return False, False, True
- else:
- return True, True, True
- elif n < 2:
- return True, True, True
-
- if timelike and arr[0] == iNaT:
- return False, False, True
-
- with nogil:
- prev = arr[0]
- for i in range(1, n):
- cur = arr[i]
- if timelike and cur == iNaT:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if cur < prev:
- is_monotonic_inc = 0
- elif cur > prev:
- is_monotonic_dec = 0
- elif cur == prev:
- is_unique = 0
- else:
- # cur or prev is NaN
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- if not is_monotonic_inc and not is_monotonic_dec:
- is_monotonic_inc = 0
- is_monotonic_dec = 0
- break
- prev = cur
- return is_monotonic_inc, is_monotonic_dec, \
- is_unique and (is_monotonic_inc or is_monotonic_dec)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def arrmap_bool(ndarray[uint8_t] index, object func):
- cdef Py_ssize_t length = index.shape[0]
- cdef Py_ssize_t i = 0
-
- cdef ndarray[object] result = np.empty(length, dtype=np.object_)
-
- from pandas.lib import maybe_convert_objects
-
- for i in range(length):
- result[i] = func(index[i])
-
- return maybe_convert_objects(result)
-
-#----------------------------------------------------------------------
-# put template
-#----------------------------------------------------------------------
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def diff_2d_float64(ndarray[float64_t, ndim=2] arr,
- ndarray[float64_t, ndim=2] out,
- Py_ssize_t periods, int axis):
- cdef:
- Py_ssize_t i, j, sx, sy
-
- sx, sy = (<object> arr).shape
- if arr.flags.f_contiguous:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for j in range(sy):
- for i in range(start, stop):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for j in range(start, stop):
- for i in range(sx):
- out[i, j] = arr[i, j] - arr[i, j - periods]
- else:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for i in range(start, stop):
- for j in range(sy):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for i in range(sx):
- for j in range(start, stop):
- out[i, j] = arr[i, j] - arr[i, j - periods]
-
-
-def put2d_float64_float64(ndarray[float64_t, ndim=2, cast=True] values,
- ndarray[int64_t] indexer, Py_ssize_t loc,
- ndarray[float64_t] out):
- cdef:
- Py_ssize_t i, j, k
-
- k = len(values)
- for j from 0 <= j < k:
- i = indexer[j]
- out[i] = values[j, loc]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def diff_2d_float32(ndarray[float32_t, ndim=2] arr,
- ndarray[float32_t, ndim=2] out,
- Py_ssize_t periods, int axis):
- cdef:
- Py_ssize_t i, j, sx, sy
-
- sx, sy = (<object> arr).shape
- if arr.flags.f_contiguous:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for j in range(sy):
- for i in range(start, stop):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for j in range(start, stop):
- for i in range(sx):
- out[i, j] = arr[i, j] - arr[i, j - periods]
- else:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for i in range(start, stop):
- for j in range(sy):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for i in range(sx):
- for j in range(start, stop):
- out[i, j] = arr[i, j] - arr[i, j - periods]
-
-
-def put2d_float32_float32(ndarray[float32_t, ndim=2, cast=True] values,
- ndarray[int64_t] indexer, Py_ssize_t loc,
- ndarray[float32_t] out):
- cdef:
- Py_ssize_t i, j, k
-
- k = len(values)
- for j from 0 <= j < k:
- i = indexer[j]
- out[i] = values[j, loc]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def diff_2d_int8(ndarray[int8_t, ndim=2] arr,
- ndarray[float32_t, ndim=2] out,
- Py_ssize_t periods, int axis):
- cdef:
- Py_ssize_t i, j, sx, sy
-
- sx, sy = (<object> arr).shape
- if arr.flags.f_contiguous:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for j in range(sy):
- for i in range(start, stop):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for j in range(start, stop):
- for i in range(sx):
- out[i, j] = arr[i, j] - arr[i, j - periods]
- else:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for i in range(start, stop):
- for j in range(sy):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for i in range(sx):
- for j in range(start, stop):
- out[i, j] = arr[i, j] - arr[i, j - periods]
-
-
-def put2d_int8_float32(ndarray[int8_t, ndim=2, cast=True] values,
- ndarray[int64_t] indexer, Py_ssize_t loc,
- ndarray[float32_t] out):
- cdef:
- Py_ssize_t i, j, k
-
- k = len(values)
- for j from 0 <= j < k:
- i = indexer[j]
- out[i] = values[j, loc]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def diff_2d_int16(ndarray[int16_t, ndim=2] arr,
- ndarray[float32_t, ndim=2] out,
- Py_ssize_t periods, int axis):
- cdef:
- Py_ssize_t i, j, sx, sy
-
- sx, sy = (<object> arr).shape
- if arr.flags.f_contiguous:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for j in range(sy):
- for i in range(start, stop):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for j in range(start, stop):
- for i in range(sx):
- out[i, j] = arr[i, j] - arr[i, j - periods]
- else:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for i in range(start, stop):
- for j in range(sy):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for i in range(sx):
- for j in range(start, stop):
- out[i, j] = arr[i, j] - arr[i, j - periods]
-
-
-def put2d_int16_float32(ndarray[int16_t, ndim=2, cast=True] values,
- ndarray[int64_t] indexer, Py_ssize_t loc,
- ndarray[float32_t] out):
- cdef:
- Py_ssize_t i, j, k
-
- k = len(values)
- for j from 0 <= j < k:
- i = indexer[j]
- out[i] = values[j, loc]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def diff_2d_int32(ndarray[int32_t, ndim=2] arr,
- ndarray[float64_t, ndim=2] out,
- Py_ssize_t periods, int axis):
- cdef:
- Py_ssize_t i, j, sx, sy
-
- sx, sy = (<object> arr).shape
- if arr.flags.f_contiguous:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for j in range(sy):
- for i in range(start, stop):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for j in range(start, stop):
- for i in range(sx):
- out[i, j] = arr[i, j] - arr[i, j - periods]
- else:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for i in range(start, stop):
- for j in range(sy):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for i in range(sx):
- for j in range(start, stop):
- out[i, j] = arr[i, j] - arr[i, j - periods]
-
-
-def put2d_int32_float64(ndarray[int32_t, ndim=2, cast=True] values,
- ndarray[int64_t] indexer, Py_ssize_t loc,
- ndarray[float64_t] out):
- cdef:
- Py_ssize_t i, j, k
-
- k = len(values)
- for j from 0 <= j < k:
- i = indexer[j]
- out[i] = values[j, loc]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def diff_2d_int64(ndarray[int64_t, ndim=2] arr,
- ndarray[float64_t, ndim=2] out,
- Py_ssize_t periods, int axis):
- cdef:
- Py_ssize_t i, j, sx, sy
-
- sx, sy = (<object> arr).shape
- if arr.flags.f_contiguous:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for j in range(sy):
- for i in range(start, stop):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for j in range(start, stop):
- for i in range(sx):
- out[i, j] = arr[i, j] - arr[i, j - periods]
- else:
- if axis == 0:
- if periods >= 0:
- start, stop = periods, sx
- else:
- start, stop = 0, sx + periods
- for i in range(start, stop):
- for j in range(sy):
- out[i, j] = arr[i, j] - arr[i - periods, j]
- else:
- if periods >= 0:
- start, stop = periods, sy
- else:
- start, stop = 0, sy + periods
- for i in range(sx):
- for j in range(start, stop):
- out[i, j] = arr[i, j] - arr[i, j - periods]
-
-
-def put2d_int64_float64(ndarray[int64_t, ndim=2, cast=True] values,
- ndarray[int64_t] indexer, Py_ssize_t loc,
- ndarray[float64_t] out):
- cdef:
- Py_ssize_t i, j, k
-
- k = len(values)
- for j from 0 <= j < k:
- i = indexer[j]
- out[i] = values[j, loc]
-
-#----------------------------------------------------------------------
-# ensure_dtype
-#----------------------------------------------------------------------
-
-cdef int PLATFORM_INT = (<ndarray> np.arange(0, dtype=np.intp)).descr.type_num
-
-cpdef ensure_platform_int(object arr):
- # GH3033, GH1392
- # platform int is the size of the int pointer, e.g. np.intp
- if util.is_array(arr):
- if (<ndarray> arr).descr.type_num == PLATFORM_INT:
- return arr
- else:
- return arr.astype(np.intp)
- else:
- return np.array(arr, dtype=np.intp)
-
-cpdef ensure_object(object arr):
- if util.is_array(arr):
- if (<ndarray> arr).descr.type_num == NPY_OBJECT:
- return arr
- else:
- return arr.astype(np.object_)
- elif hasattr(arr, 'asobject'):
- return arr.asobject
- else:
- return np.array(arr, dtype=np.object_)
-
-cpdef ensure_float64(object arr):
- if util.is_array(arr):
- if (<ndarray> arr).descr.type_num == NPY_FLOAT64:
- return arr
- else:
- return arr.astype(np.float64)
- else:
- return np.array(arr, dtype=np.float64)
-
-cpdef ensure_float32(object arr):
- if util.is_array(arr):
- if (<ndarray> arr).descr.type_num == NPY_FLOAT32:
- return arr
- else:
- return arr.astype(np.float32)
- else:
- return np.array(arr, dtype=np.float32)
-
-cpdef ensure_int8(object arr):
- if util.is_array(arr):
- if (<ndarray> arr).descr.type_num == NPY_INT8:
- return arr
- else:
- return arr.astype(np.int8)
- else:
- return np.array(arr, dtype=np.int8)
-
-cpdef ensure_int16(object arr):
- if util.is_array(arr):
- if (<ndarray> arr).descr.type_num == NPY_INT16:
- return arr
- else:
- return arr.astype(np.int16)
- else:
- return np.array(arr, dtype=np.int16)
-
-cpdef ensure_int32(object arr):
- if util.is_array(arr):
- if (<ndarray> arr).descr.type_num == NPY_INT32:
- return arr
- else:
- return arr.astype(np.int32)
- else:
- return np.array(arr, dtype=np.int32)
-
-cpdef ensure_int64(object arr):
- if util.is_array(arr):
- if (<ndarray> arr).descr.type_num == NPY_INT64:
- return arr
- else:
- return arr.astype(np.int64)
- else:
- return np.array(arr, dtype=np.int64)
diff --git a/pandas/src/algos_groupby_helper.pxi b/pandas/src/algos_groupby_helper.pxi
deleted file mode 100644
index 013a03f719bbd..0000000000000
--- a/pandas/src/algos_groupby_helper.pxi
+++ /dev/null
@@ -1,1375 +0,0 @@
-"""
-Template for each `dtype` helper function using groupby
-
-WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
-"""
-
-cdef extern from "numpy/npy_math.h":
- double NAN "NPY_NAN"
-_int64_max = np.iinfo(np.int64).max
-
-#----------------------------------------------------------------------
-# group_add, group_prod, group_var, group_mean, group_ohlc
-#----------------------------------------------------------------------
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_add_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, count
- ndarray[float64_t, ndim=2] sumx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
-
- if K > 1:
-
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
-
- else:
-
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_prod_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, count
- ndarray[float64_t, ndim=2] prodx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- prodx = np.ones_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- prodx[lab, j] *= val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[lab, 0] += 1
- prodx[lab, 0] *= val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = prodx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-@cython.cdivision(True)
-def group_var_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, ct, oldmean
- ndarray[float64_t, ndim=2] nobs, mean
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- mean = np.zeros_like(out)
-
- N, K = (<object> values).shape
-
- out[:, :] = 0.0
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- oldmean = mean[lab, j]
- mean[lab, j] += (val - oldmean) / nobs[lab, j]
- out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
-
- for i in range(ncounts):
- for j in range(K):
- ct = nobs[i, j]
- if ct < 2:
- out[i, j] = NAN
- else:
- out[i, j] /= (ct - 1)
-# add passing bin edges, instead of labels
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_mean_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, count
- ndarray[float64_t, ndim=2] sumx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
- # not nan
- if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
-
- for i in range(ncounts):
- for j in range(K):
- count = nobs[i, j]
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j] / count
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_ohlc_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab
- float64_t val, count
- Py_ssize_t ngroups = len(counts)
-
- if len(labels) == 0:
- return
-
- N, K = (<object> values).shape
-
- if out.shape[1] != 4:
- raise ValueError('Output array must have 4 columns')
-
- if K > 1:
- raise NotImplementedError("Argument 'values' must have only "
- "one dimension")
- out.fill(np.nan)
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab == -1:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
- if val != val:
- continue
-
- if out[lab, 0] != out[lab, 0]:
- out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val
- else:
- out[lab, 1] = max(out[lab, 1], val)
- out[lab, 2] = min(out[lab, 2], val)
- out[lab, 3] = val
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_add_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, count
- ndarray[float32_t, ndim=2] sumx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
-
- if K > 1:
-
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
-
- else:
-
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_prod_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, count
- ndarray[float32_t, ndim=2] prodx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- prodx = np.ones_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- prodx[lab, j] *= val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[lab, 0] += 1
- prodx[lab, 0] *= val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = prodx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-@cython.cdivision(True)
-def group_var_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, ct, oldmean
- ndarray[float32_t, ndim=2] nobs, mean
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- mean = np.zeros_like(out)
-
- N, K = (<object> values).shape
-
- out[:, :] = 0.0
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- oldmean = mean[lab, j]
- mean[lab, j] += (val - oldmean) / nobs[lab, j]
- out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
-
- for i in range(ncounts):
- for j in range(K):
- ct = nobs[i, j]
- if ct < 2:
- out[i, j] = NAN
- else:
- out[i, j] /= (ct - 1)
-# add passing bin edges, instead of labels
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_mean_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, count
- ndarray[float32_t, ndim=2] sumx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
- # not nan
- if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
-
- for i in range(ncounts):
- for j in range(K):
- count = nobs[i, j]
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j] / count
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_ohlc_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab
- float32_t val, count
- Py_ssize_t ngroups = len(counts)
-
- if len(labels) == 0:
- return
-
- N, K = (<object> values).shape
-
- if out.shape[1] != 4:
- raise ValueError('Output array must have 4 columns')
-
- if K > 1:
- raise NotImplementedError("Argument 'values' must have only "
- "one dimension")
- out.fill(np.nan)
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab == -1:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
- if val != val:
- continue
-
- if out[lab, 0] != out[lab, 0]:
- out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val
- else:
- out[lab, 1] = max(out[lab, 1], val)
- out[lab, 2] = min(out[lab, 2], val)
- out[lab, 3] = val
-
-#----------------------------------------------------------------------
-# group_nth, group_last
-#----------------------------------------------------------------------
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_last_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, count
- ndarray[float64_t, ndim=2] resx
- ndarray[int64_t, ndim=2] nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros((<object> out).shape, dtype=np.int64)
- resx = np.empty_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, j] += 1
- resx[lab, j] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = resx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_nth_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels, int64_t rank):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, count
- ndarray[float64_t, ndim=2] resx
- ndarray[int64_t, ndim=2] nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros((<object> out).shape, dtype=np.int64)
- resx = np.empty_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, j] += 1
- if nobs[lab, j] == rank:
- resx[lab, j] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = resx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_last_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, count
- ndarray[float32_t, ndim=2] resx
- ndarray[int64_t, ndim=2] nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros((<object> out).shape, dtype=np.int64)
- resx = np.empty_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, j] += 1
- resx[lab, j] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = resx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_nth_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels, int64_t rank):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, count
- ndarray[float32_t, ndim=2] resx
- ndarray[int64_t, ndim=2] nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros((<object> out).shape, dtype=np.int64)
- resx = np.empty_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, j] += 1
- if nobs[lab, j] == rank:
- resx[lab, j] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = resx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_last_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- int64_t val, count
- ndarray[int64_t, ndim=2] resx
- ndarray[int64_t, ndim=2] nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros((<object> out).shape, dtype=np.int64)
- resx = np.empty_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != iNaT:
- nobs[lab, j] += 1
- resx[lab, j] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = iNaT
- else:
- out[i, j] = resx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_nth_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] labels, int64_t rank):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- int64_t val, count
- ndarray[int64_t, ndim=2] resx
- ndarray[int64_t, ndim=2] nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros((<object> out).shape, dtype=np.int64)
- resx = np.empty_like(out)
-
- N, K = (<object> values).shape
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != iNaT:
- nobs[lab, j] += 1
- if nobs[lab, j] == rank:
- resx[lab, j] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = iNaT
- else:
- out[i, j] = resx[i, j]
-
-#----------------------------------------------------------------------
-# group_min, group_max
-#----------------------------------------------------------------------
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_max_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, count
- ndarray[float64_t, ndim=2] maxx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
-
- maxx = np.empty_like(out)
- maxx.fill(-np.inf)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, 0] += 1
- if val > maxx[lab, 0]:
- maxx[lab, 0] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = maxx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, count
- ndarray[float64_t, ndim=2] minx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(np.inf)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != NAN:
-
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, 0] += 1
- if val < minx[lab, 0]:
- minx[lab, 0] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = minx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_max_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, count
- ndarray[float32_t, ndim=2] maxx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
-
- maxx = np.empty_like(out)
- maxx.fill(-np.inf)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, 0] += 1
- if val > maxx[lab, 0]:
- maxx[lab, 0] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = maxx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, count
- ndarray[float32_t, ndim=2] minx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(np.inf)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != NAN:
-
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val and val != NAN:
- nobs[lab, 0] += 1
- if val < minx[lab, 0]:
- minx[lab, 0] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = minx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_max_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- int64_t val, count
- ndarray[int64_t, ndim=2] maxx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
-
- maxx = np.empty_like(out)
- maxx.fill(-_int64_max)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != iNaT:
- nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val and val != iNaT:
- nobs[lab, 0] += 1
- if val > maxx[lab, 0]:
- maxx[lab, 0] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = iNaT
- else:
- out[i, j] = maxx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- int64_t val, count
- ndarray[int64_t, ndim=2] minx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(_int64_max)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val and val != iNaT:
-
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val and val != iNaT:
- nobs[lab, 0] += 1
- if val < minx[lab, 0]:
- minx[lab, 0] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = iNaT
- else:
- out[i, j] = minx[i, j]
-
-#----------------------------------------------------------------------
-# other grouping functions not needing a template
-#----------------------------------------------------------------------
-
-
-def group_median_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, ngroups, size
- ndarray[int64_t] _counts
- ndarray data
- float64_t* ptr
- ngroups = len(counts)
- N, K = (<object> values).shape
-
- indexer, _counts = groupsort_indexer(labels, ngroups)
- counts[:] = _counts[1:]
-
- data = np.empty((K, N), dtype=np.float64)
- ptr = <float64_t*> data.data
-
- take_2d_axis1_float64_float64(values.T, indexer, out=data)
-
- for i in range(K):
- # exclude NA group
- ptr += _counts[0]
- for j in range(ngroups):
- size = _counts[j + 1]
- out[j, i] = _median_linear(ptr, size)
- ptr += size
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_cumprod_float64(float64_t[:, :] out,
- float64_t[:, :] values,
- int64_t[:] labels,
- float64_t[:, :] accum):
- """
- Only transforms on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, size
- float64_t val
- int64_t lab
-
- N, K = (<object> values).shape
- accum = np.ones_like(accum)
-
- with nogil:
- for i in range(N):
- lab = labels[i]
-
- if lab < 0:
- continue
- for j in range(K):
- val = values[i, j]
- if val == val:
- accum[lab, j] *= val
- out[i, j] = accum[lab, j]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_cumsum(numeric[:, :] out,
- numeric[:, :] values,
- int64_t[:] labels,
- numeric[:, :] accum):
- """
- Only transforms on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, size
- numeric val
- int64_t lab
-
- N, K = (<object> values).shape
- accum = np.zeros_like(accum)
-
- with nogil:
- for i in range(N):
- lab = labels[i]
-
- if lab < 0:
- continue
- for j in range(K):
- val = values[i, j]
- if val == val:
- accum[lab, j] += val
- out[i, j] = accum[lab, j]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_shift_indexer(int64_t[:] out, int64_t[:] labels,
- int ngroups, int periods):
- cdef:
- Py_ssize_t N, i, j, ii
- int offset, sign
- int64_t lab, idxer, idxer_slot
- int64_t[:] label_seen = np.zeros(ngroups, dtype=np.int64)
- int64_t[:, :] label_indexer
-
- N, = (<object> labels).shape
-
- if periods < 0:
- periods = -periods
- offset = N - 1
- sign = -1
- elif periods > 0:
- offset = 0
- sign = 1
-
- if periods == 0:
- with nogil:
- for i in range(N):
- out[i] = i
- else:
- # array of each previous indexer seen
- label_indexer = np.zeros((ngroups, periods), dtype=np.int64)
- with nogil:
- for i in range(N):
- ## reverse iterator if shifting backwards
- ii = offset + sign * i
- lab = labels[ii]
-
- # Skip null keys
- if lab == -1:
- out[ii] = -1
- continue
-
- label_seen[lab] += 1
-
- idxer_slot = label_seen[lab] % periods
- idxer = label_indexer[lab, idxer_slot]
-
- if label_seen[lab] > periods:
- out[ii] = idxer
- else:
- out[ii] = -1
-
- label_indexer[lab, idxer_slot] = ii
diff --git a/pandas/src/algos_take_helper.pxi b/pandas/src/algos_take_helper.pxi
deleted file mode 100644
index d8fb05804d4e5..0000000000000
--- a/pandas/src/algos_take_helper.pxi
+++ /dev/null
@@ -1,4949 +0,0 @@
-"""
-Template for each `dtype` helper function for take
-
-WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
-"""
-
-#----------------------------------------------------------------------
-# take_1d, take_2d
-#----------------------------------------------------------------------
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_bool_bool_memview(uint8_t[:] values,
- int64_t[:] indexer,
- uint8_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- uint8_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_bool_bool(ndarray[uint8_t, ndim=1] values,
- int64_t[:] indexer,
- uint8_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_bool_bool_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- uint8_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_bool_bool_memview(uint8_t[:, :] values,
- int64_t[:] indexer,
- uint8_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- uint8_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- uint8_t *v
- uint8_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(uint8_t) and
- sizeof(uint8_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(uint8_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_bool_bool(ndarray[uint8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- uint8_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_bool_bool_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- uint8_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- uint8_t *v
- uint8_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(uint8_t) and
- sizeof(uint8_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(uint8_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_bool_bool_memview(uint8_t[:, :] values,
- int64_t[:] indexer,
- uint8_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- uint8_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_bool_bool(ndarray[uint8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- uint8_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_bool_bool_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- uint8_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_bool_bool(ndarray[uint8_t, ndim=2] values,
- indexer,
- ndarray[uint8_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- uint8_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_bool_object_memview(uint8_t[:] values,
- int64_t[:] indexer,
- object[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- object fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = True if values[idx] > 0 else False
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_bool_object(ndarray[uint8_t, ndim=1] values,
- int64_t[:] indexer,
- object[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_bool_object_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- object fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = True if values[idx] > 0 else False
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_bool_object_memview(uint8_t[:, :] values,
- int64_t[:] indexer,
- object[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- object fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- object *v
- object *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(object) and
- sizeof(object) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(object) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = True if values[idx, j] > 0 else False
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_bool_object(ndarray[uint8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- object[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_bool_object_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- object fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- object *v
- object *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(object) and
- sizeof(object) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(object) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = True if values[idx, j] > 0 else False
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_bool_object_memview(uint8_t[:, :] values,
- int64_t[:] indexer,
- object[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- object fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = True if values[i, idx] > 0 else False
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_bool_object(ndarray[uint8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- object[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_bool_object_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- object fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = True if values[i, idx] > 0 else False
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_bool_object(ndarray[uint8_t, ndim=2] values,
- indexer,
- ndarray[object, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- object fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = True if values[idx, idx1[j]] > 0 else False
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int8_int8_memview(int8_t[:] values,
- int64_t[:] indexer,
- int8_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- int8_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int8_int8(ndarray[int8_t, ndim=1] values,
- int64_t[:] indexer,
- int8_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int8_int8_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- int8_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int8_int8_memview(int8_t[:, :] values,
- int64_t[:] indexer,
- int8_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int8_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- int8_t *v
- int8_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int8_t) and
- sizeof(int8_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int8_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int8_int8(ndarray[int8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int8_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int8_int8_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int8_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- int8_t *v
- int8_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int8_t) and
- sizeof(int8_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int8_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int8_int8_memview(int8_t[:, :] values,
- int64_t[:] indexer,
- int8_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int8_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int8_int8(ndarray[int8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int8_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int8_int8_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int8_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int8_int8(ndarray[int8_t, ndim=2] values,
- indexer,
- ndarray[int8_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- int8_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int8_int32_memview(int8_t[:] values,
- int64_t[:] indexer,
- int32_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- int32_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int8_int32(ndarray[int8_t, ndim=1] values,
- int64_t[:] indexer,
- int32_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int8_int32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- int32_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int8_int32_memview(int8_t[:, :] values,
- int64_t[:] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int32_t *v
- int32_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int32_t) and
- sizeof(int32_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int32_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int8_int32(ndarray[int8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int8_int32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int32_t *v
- int32_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int32_t) and
- sizeof(int32_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int32_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int8_int32_memview(int8_t[:, :] values,
- int64_t[:] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int8_int32(ndarray[int8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int8_int32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int8_int32(ndarray[int8_t, ndim=2] values,
- indexer,
- ndarray[int32_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- int32_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int8_int64_memview(int8_t[:] values,
- int64_t[:] indexer,
- int64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- int64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int8_int64(ndarray[int8_t, ndim=1] values,
- int64_t[:] indexer,
- int64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int8_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- int64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int8_int64_memview(int8_t[:, :] values,
- int64_t[:] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int64_t *v
- int64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int64_t) and
- sizeof(int64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int8_int64(ndarray[int8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int8_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int64_t *v
- int64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int64_t) and
- sizeof(int64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int8_int64_memview(int8_t[:, :] values,
- int64_t[:] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int8_int64(ndarray[int8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int8_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int8_int64(ndarray[int8_t, ndim=2] values,
- indexer,
- ndarray[int64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- int64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int8_float64_memview(int8_t[:] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int8_float64(ndarray[int8_t, ndim=1] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int8_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int8_float64_memview(int8_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int8_float64(ndarray[int8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int8_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int8_float64_memview(int8_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int8_float64(ndarray[int8_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int8_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int8_float64(ndarray[int8_t, ndim=2] values,
- indexer,
- ndarray[float64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- float64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int16_int16_memview(int16_t[:] values,
- int64_t[:] indexer,
- int16_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- int16_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int16_int16(ndarray[int16_t, ndim=1] values,
- int64_t[:] indexer,
- int16_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int16_int16_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- int16_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int16_int16_memview(int16_t[:, :] values,
- int64_t[:] indexer,
- int16_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int16_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- int16_t *v
- int16_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int16_t) and
- sizeof(int16_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int16_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int16_int16(ndarray[int16_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int16_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int16_int16_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int16_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- int16_t *v
- int16_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int16_t) and
- sizeof(int16_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int16_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int16_int16_memview(int16_t[:, :] values,
- int64_t[:] indexer,
- int16_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int16_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int16_int16(ndarray[int16_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int16_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int16_int16_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int16_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int16_int16(ndarray[int16_t, ndim=2] values,
- indexer,
- ndarray[int16_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- int16_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int16_int32_memview(int16_t[:] values,
- int64_t[:] indexer,
- int32_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- int32_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int16_int32(ndarray[int16_t, ndim=1] values,
- int64_t[:] indexer,
- int32_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int16_int32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- int32_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int16_int32_memview(int16_t[:, :] values,
- int64_t[:] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int32_t *v
- int32_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int32_t) and
- sizeof(int32_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int32_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int16_int32(ndarray[int16_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int16_int32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int32_t *v
- int32_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int32_t) and
- sizeof(int32_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int32_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int16_int32_memview(int16_t[:, :] values,
- int64_t[:] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int16_int32(ndarray[int16_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int16_int32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int16_int32(ndarray[int16_t, ndim=2] values,
- indexer,
- ndarray[int32_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- int32_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int16_int64_memview(int16_t[:] values,
- int64_t[:] indexer,
- int64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- int64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int16_int64(ndarray[int16_t, ndim=1] values,
- int64_t[:] indexer,
- int64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int16_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- int64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int16_int64_memview(int16_t[:, :] values,
- int64_t[:] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int64_t *v
- int64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int64_t) and
- sizeof(int64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int16_int64(ndarray[int16_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int16_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int64_t *v
- int64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int64_t) and
- sizeof(int64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int16_int64_memview(int16_t[:, :] values,
- int64_t[:] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int16_int64(ndarray[int16_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int16_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int16_int64(ndarray[int16_t, ndim=2] values,
- indexer,
- ndarray[int64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- int64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int16_float64_memview(int16_t[:] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int16_float64(ndarray[int16_t, ndim=1] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int16_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int16_float64_memview(int16_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int16_float64(ndarray[int16_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int16_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int16_float64_memview(int16_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int16_float64(ndarray[int16_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int16_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int16_float64(ndarray[int16_t, ndim=2] values,
- indexer,
- ndarray[float64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- float64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int32_int32_memview(int32_t[:] values,
- int64_t[:] indexer,
- int32_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- int32_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int32_int32(ndarray[int32_t, ndim=1] values,
- int64_t[:] indexer,
- int32_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int32_int32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- int32_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int32_int32_memview(int32_t[:, :] values,
- int64_t[:] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- int32_t *v
- int32_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int32_t) and
- sizeof(int32_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int32_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int32_int32(ndarray[int32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int32_int32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- int32_t *v
- int32_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int32_t) and
- sizeof(int32_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int32_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int32_int32_memview(int32_t[:, :] values,
- int64_t[:] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int32_int32(ndarray[int32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int32_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int32_int32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int32_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int32_int32(ndarray[int32_t, ndim=2] values,
- indexer,
- ndarray[int32_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- int32_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int32_int64_memview(int32_t[:] values,
- int64_t[:] indexer,
- int64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- int64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int32_int64(ndarray[int32_t, ndim=1] values,
- int64_t[:] indexer,
- int64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int32_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- int64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int32_int64_memview(int32_t[:, :] values,
- int64_t[:] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int64_t *v
- int64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int64_t) and
- sizeof(int64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int32_int64(ndarray[int32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int32_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- int64_t *v
- int64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int64_t) and
- sizeof(int64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int32_int64_memview(int32_t[:, :] values,
- int64_t[:] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int32_int64(ndarray[int32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int32_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int32_int64(ndarray[int32_t, ndim=2] values,
- indexer,
- ndarray[int64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- int64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int32_float64_memview(int32_t[:] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int32_float64(ndarray[int32_t, ndim=1] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int32_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int32_float64_memview(int32_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int32_float64(ndarray[int32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int32_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int32_float64_memview(int32_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int32_float64(ndarray[int32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int32_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int32_float64(ndarray[int32_t, ndim=2] values,
- indexer,
- ndarray[float64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- float64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int64_int64_memview(int64_t[:] values,
- int64_t[:] indexer,
- int64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- int64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int64_int64(ndarray[int64_t, ndim=1] values,
- int64_t[:] indexer,
- int64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int64_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- int64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int64_int64_memview(int64_t[:, :] values,
- int64_t[:] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- int64_t *v
- int64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int64_t) and
- sizeof(int64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int64_int64(ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int64_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- int64_t *v
- int64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(int64_t) and
- sizeof(int64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(int64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int64_int64_memview(int64_t[:, :] values,
- int64_t[:] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int64_int64(ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] indexer,
- int64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int64_int64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- int64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int64_int64(ndarray[int64_t, ndim=2] values,
- indexer,
- ndarray[int64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- int64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_int64_float64_memview(int64_t[:] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_int64_float64(ndarray[int64_t, ndim=1] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_int64_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_int64_float64_memview(int64_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_int64_float64(ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_int64_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_int64_float64_memview(int64_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_int64_float64(ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_int64_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_int64_float64(ndarray[int64_t, ndim=2] values,
- indexer,
- ndarray[float64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- float64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_float32_float32_memview(float32_t[:] values,
- int64_t[:] indexer,
- float32_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- float32_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_float32_float32(ndarray[float32_t, ndim=1] values,
- int64_t[:] indexer,
- float32_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_float32_float32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- float32_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_float32_float32_memview(float32_t[:, :] values,
- int64_t[:] indexer,
- float32_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float32_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- float32_t *v
- float32_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float32_t) and
- sizeof(float32_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float32_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_float32_float32(ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float32_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_float32_float32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float32_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- float32_t *v
- float32_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float32_t) and
- sizeof(float32_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float32_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_float32_float32_memview(float32_t[:, :] values,
- int64_t[:] indexer,
- float32_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float32_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_float32_float32(ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float32_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_float32_float32_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float32_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_float32_float32(ndarray[float32_t, ndim=2] values,
- indexer,
- ndarray[float32_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- float32_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_float32_float64_memview(float32_t[:] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_float32_float64(ndarray[float32_t, ndim=1] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_float32_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_float32_float64_memview(float32_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_float32_float64(ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_float32_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_float32_float64_memview(float32_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_float32_float64(ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_float32_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_float32_float64(ndarray[float32_t, ndim=2] values,
- indexer,
- ndarray[float64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- float64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_float64_float64_memview(float64_t[:] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_float64_float64(ndarray[float64_t, ndim=1] values,
- int64_t[:] indexer,
- float64_t[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_float64_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- float64_t fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
- with nogil:
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_float64_float64_memview(float64_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_float64_float64(ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_float64_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF True:
- cdef:
- float64_t *v
- float64_t *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(float64_t) and
- sizeof(float64_t) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(float64_t) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_float64_float64_memview(float64_t[:, :] values,
- int64_t[:] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_float64_float64(ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] indexer,
- float64_t[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_float64_float64_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- float64_t fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_float64_float64(ndarray[float64_t, ndim=2] values,
- indexer,
- ndarray[float64_t, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- float64_t fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_object_object_memview(object[:] values,
- int64_t[:] indexer,
- object[:] out,
- fill_value=np.nan):
-
-
-
- cdef:
- Py_ssize_t i, n, idx
- object fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_object_object(ndarray[object, ndim=1] values,
- int64_t[:] indexer,
- object[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_object_object_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-
- cdef:
- Py_ssize_t i, n, idx
- object fv
-
- n = indexer.shape[0]
-
- fv = fill_value
-
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- out[i] = fv
- else:
- out[i] = values[idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_object_object_memview(object[:, :] values,
- int64_t[:] indexer,
- object[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- object fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- object *v
- object *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(object) and
- sizeof(object) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(object) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_object_object(ndarray[object, ndim=2] values,
- ndarray[int64_t] indexer,
- object[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_object_object_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- object fv
-
- n = len(indexer)
- k = values.shape[1]
-
- fv = fill_value
-
- IF False:
- cdef:
- object *v
- object *o
-
- #GH3130
- if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(object) and
- sizeof(object) * n >= 256):
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- v = &values[idx, 0]
- o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(object) * k))
- return
-
- for i from 0 <= i < n:
- idx = indexer[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- out[i, j] = values[idx, j]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_object_object_memview(object[:, :] values,
- int64_t[:] indexer,
- object[:, :] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- object fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_object_object(ndarray[object, ndim=2] values,
- ndarray[int64_t] indexer,
- object[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_object_object_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
- cdef:
- Py_ssize_t i, j, k, n, idx
- object fv
-
- n = len(values)
- k = len(indexer)
-
- if n == 0 or k == 0:
- return
-
- fv = fill_value
-
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- idx = indexer[j]
- if idx == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[i, idx]
-
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_multi_object_object(ndarray[object, ndim=2] values,
- indexer,
- ndarray[object, ndim=2] out,
- fill_value=np.nan):
- cdef:
- Py_ssize_t i, j, k, n, idx
- ndarray[int64_t] idx0 = indexer[0]
- ndarray[int64_t] idx1 = indexer[1]
- object fv
-
- n = len(idx0)
- k = len(idx1)
-
- fv = fill_value
- for i from 0 <= i < n:
- idx = idx0[i]
- if idx == -1:
- for j from 0 <= j < k:
- out[i, j] = fv
- else:
- for j from 0 <= j < k:
- if idx1[j] == -1:
- out[i, j] = fv
- else:
- out[i, j] = values[idx, idx1[j]]
diff --git a/pandas/src/datetime.pxd b/pandas/src/datetime.pxd
index 5f7de8244d17e..2267c8282ec14 100644
--- a/pandas/src/datetime.pxd
+++ b/pandas/src/datetime.pxd
@@ -42,9 +42,6 @@ cdef extern from "datetime.h":
object PyDateTime_FromDateAndTime(int year, int month, int day, int hour,
int minute, int second, int us)
-cdef extern from "datetime_helper.h":
- void mangle_nat(object o)
-
cdef extern from "numpy/ndarrayobject.h":
ctypedef int64_t npy_timedelta
@@ -126,8 +123,8 @@ cdef extern from "datetime/np_datetime_strings.h":
-cdef inline _string_to_dts(object val, pandas_datetimestruct* dts,
- int* out_local, int* out_tzoffset):
+cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts,
+ int* out_local, int* out_tzoffset) except? -1:
cdef int result
cdef char *tmp
@@ -139,10 +136,11 @@ cdef inline _string_to_dts(object val, pandas_datetimestruct* dts,
if result == -1:
raise ValueError('Unable to parse %s' % str(val))
+ return result
cdef inline int _cstring_to_dts(char *val, int length,
pandas_datetimestruct* dts,
- int* out_local, int* out_tzoffset):
+ int* out_local, int* out_tzoffset) except? -1:
cdef:
npy_bool special
PANDAS_DATETIMEUNIT out_bestunit
@@ -195,4 +193,3 @@ cdef inline int64_t _date_to_datetime64(object val,
dts.hour = dts.min = dts.sec = dts.us = 0
dts.ps = dts.as = 0
return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, dts)
-
diff --git a/pandas/src/datetime/np_datetime.c b/pandas/src/datetime/np_datetime.c
index 80703c8b08de6..d4b9de45618f3 100644
--- a/pandas/src/datetime/np_datetime.c
+++ b/pandas/src/datetime/np_datetime.c
@@ -846,7 +846,8 @@ convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
dt = dt % perday;
}
else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
+ set_datetimestruct_days(dt / perday - (dt % perday == 0 ? 0 : 1),
+ out);
dt = (perday-1) + (dt + 1) % perday;
}
out->hour = dt;
@@ -860,7 +861,8 @@ convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
dt = dt % perday;
}
else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
+ set_datetimestruct_days(dt / perday - (dt % perday == 0 ? 0 : 1),
+ out);
dt = (perday-1) + (dt + 1) % perday;
}
out->hour = dt / 60;
@@ -875,7 +877,8 @@ convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
dt = dt % perday;
}
else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
+ set_datetimestruct_days(dt / perday - (dt % perday == 0 ? 0 : 1),
+ out);
dt = (perday-1) + (dt + 1) % perday;
}
out->hour = dt / (60*60);
@@ -891,7 +894,8 @@ convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
dt = dt % perday;
}
else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
+ set_datetimestruct_days(dt / perday - (dt % perday == 0 ? 0 : 1),
+ out);
dt = (perday-1) + (dt + 1) % perday;
}
out->hour = dt / (60*60*1000LL);
@@ -908,7 +912,8 @@ convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
dt = dt % perday;
}
else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
+ set_datetimestruct_days(dt / perday - (dt % perday == 0 ? 0 : 1),
+ out);
dt = (perday-1) + (dt + 1) % perday;
}
out->hour = dt / (60*60*1000000LL);
@@ -925,7 +930,8 @@ convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
dt = dt % perday;
}
else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
+ set_datetimestruct_days(dt / perday - (dt % perday == 0 ? 0 : 1),
+ out);
dt = (perday-1) + (dt + 1) % perday;
}
out->hour = dt / (60*60*1000000000LL);
@@ -943,7 +949,8 @@ convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
dt = dt % perday;
}
else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
+ set_datetimestruct_days(dt / perday - (dt % perday == 0 ? 0 : 1),
+ out);
dt = (perday-1) + (dt + 1) % perday;
}
out->hour = dt / (60*60*1000000000000LL);
diff --git a/pandas/src/datetime_helper.h b/pandas/src/datetime_helper.h
index d78e91e747854..11399181fa4e7 100644
--- a/pandas/src/datetime_helper.h
+++ b/pandas/src/datetime_helper.h
@@ -7,11 +7,6 @@
#define PyInt_AS_LONG PyLong_AsLong
#endif
-void mangle_nat(PyObject *val) {
- PyDateTime_GET_MONTH(val) = -1;
- PyDateTime_GET_DAY(val) = -1;
-}
-
npy_int64 get_long_attr(PyObject *o, const char *attr) {
npy_int64 long_val;
PyObject *value = PyObject_GetAttrString(o, attr);
diff --git a/pandas/src/hash.pyx b/pandas/src/hash.pyx
new file mode 100644
index 0000000000000..06ed947808e39
--- /dev/null
+++ b/pandas/src/hash.pyx
@@ -0,0 +1,191 @@
+# cython: profile=False
+# Translated from the reference implementation
+# at https://github.com/veorq/SipHash
+
+import cython
+cimport numpy as cnp
+import numpy as np
+from numpy cimport ndarray, uint8_t, uint32_t, uint64_t
+
+from util cimport _checknull
+from cpython cimport (PyString_Check,
+ PyBytes_Check,
+ PyUnicode_Check)
+from libc.stdlib cimport malloc, free
+
+DEF cROUNDS = 2
+DEF dROUNDS = 4
+
+
+@cython.boundscheck(False)
+def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'):
+ """
+ Parameters
+ ----------
+ arr : 1-d object ndarray of objects
+ key : hash key, must be 16 byte len encoded
+ encoding : encoding for key & arr, default to 'utf8'
+
+ Returns
+ -------
+ 1-d uint64 ndarray of hashes
+
+ Notes
+ -----
+ allowed values must be strings, or nulls
+ mixed array types will raise TypeError
+
+ """
+ cdef:
+ Py_ssize_t i, l, n
+ ndarray[uint64_t] result
+ bytes data, k
+ uint8_t *kb
+ uint64_t *lens
+ char **vecs, *cdata
+ object val
+
+ k = <bytes>key.encode(encoding)
+ kb = <uint8_t *>k
+ if len(k) != 16:
+ raise ValueError(
+ 'key should be a 16-byte string encoded, got {!r} (len {})'.format(
+ k, len(k)))
+
+ n = len(arr)
+
+ # create an array of bytes
+ vecs = <char **> malloc(n * sizeof(char *))
+ lens = <uint64_t*> malloc(n * sizeof(uint64_t))
+
+ cdef list datas = []
+ for i in range(n):
+ val = arr[i]
+ if PyString_Check(val):
+ data = <bytes>val.encode(encoding)
+ elif PyBytes_Check(val):
+ data = <bytes>val
+ elif PyUnicode_Check(val):
+ data = <bytes>val.encode(encoding)
+ elif _checknull(val):
+ # null, stringify and encode
+ data = <bytes>str(val).encode(encoding)
+
+ else:
+ raise TypeError("{} of type {} is not a valid type for hashing, "
+ "must be string or null".format(val, type(val)))
+
+ l = len(data)
+ lens[i] = l
+ cdata = data
+
+ # keep the refernce alive thru the end of the
+ # function
+ datas.append(data)
+ vecs[i] = cdata
+
+ result = np.empty(n, dtype=np.uint64)
+ with nogil:
+ for i in range(n):
+ result[i] = low_level_siphash(<uint8_t *>vecs[i], lens[i], kb)
+
+ free(vecs)
+ free(lens)
+ return result
+
+cdef inline uint64_t _rotl(uint64_t x, uint64_t b) nogil:
+ return (x << b) | (x >> (64 - b))
+
+cdef inline void u32to8_le(uint8_t* p, uint32_t v) nogil:
+ p[0] = <uint8_t>(v)
+ p[1] = <uint8_t>(v >> 8)
+ p[2] = <uint8_t>(v >> 16)
+ p[3] = <uint8_t>(v >> 24)
+
+cdef inline void u64to8_le(uint8_t* p, uint64_t v) nogil:
+ u32to8_le(p, <uint32_t>v)
+ u32to8_le(p + 4, <uint32_t>(v >> 32))
+
+cdef inline uint64_t u8to64_le(uint8_t* p) nogil:
+ return (<uint64_t>p[0] |
+ <uint64_t>p[1] << 8 |
+ <uint64_t>p[2] << 16 |
+ <uint64_t>p[3] << 24 |
+ <uint64_t>p[4] << 32 |
+ <uint64_t>p[5] << 40 |
+ <uint64_t>p[6] << 48 |
+ <uint64_t>p[7] << 56)
+
+cdef inline void _sipround(uint64_t* v0, uint64_t* v1,
+ uint64_t* v2, uint64_t* v3) nogil:
+ v0[0] += v1[0]
+ v1[0] = _rotl(v1[0], 13)
+ v1[0] ^= v0[0]
+ v0[0] = _rotl(v0[0], 32)
+ v2[0] += v3[0]
+ v3[0] = _rotl(v3[0], 16)
+ v3[0] ^= v2[0]
+ v0[0] += v3[0]
+ v3[0] = _rotl(v3[0], 21)
+ v3[0] ^= v0[0]
+ v2[0] += v1[0]
+ v1[0] = _rotl(v1[0], 17)
+ v1[0] ^= v2[0]
+ v2[0] = _rotl(v2[0], 32)
+
+cpdef uint64_t siphash(bytes data, bytes key) except? 0:
+ if len(key) != 16:
+ raise ValueError(
+ 'key should be a 16-byte bytestring, got {!r} (len {})'.format(
+ key, len(key)))
+ return low_level_siphash(data, len(data), key)
+
+
+@cython.cdivision(True)
+cdef uint64_t low_level_siphash(uint8_t* data, size_t datalen,
+ uint8_t* key) nogil:
+ cdef uint64_t v0 = 0x736f6d6570736575ULL
+ cdef uint64_t v1 = 0x646f72616e646f6dULL
+ cdef uint64_t v2 = 0x6c7967656e657261ULL
+ cdef uint64_t v3 = 0x7465646279746573ULL
+ cdef uint64_t b
+ cdef uint64_t k0 = u8to64_le(key)
+ cdef uint64_t k1 = u8to64_le(key + 8)
+ cdef uint64_t m
+ cdef int i
+ cdef uint8_t* end = data + datalen - (datalen % sizeof(uint64_t))
+ cdef int left = datalen & 7
+ cdef int left_byte
+
+ b = (<uint64_t>datalen) << 56
+ v3 ^= k1
+ v2 ^= k0
+ v1 ^= k1
+ v0 ^= k0
+
+ while (data != end):
+ m = u8to64_le(data)
+ v3 ^= m
+ for i in range(cROUNDS):
+ _sipround(&v0, &v1, &v2, &v3)
+ v0 ^= m
+
+ data += sizeof(uint64_t)
+
+ for i in range(left-1, -1, -1):
+ b |= (<uint64_t>data[i]) << (i * 8)
+
+ v3 ^= b
+
+ for i in range(cROUNDS):
+ _sipround(&v0, &v1, &v2, &v3)
+
+ v0 ^= b
+ v2 ^= 0xff
+
+ for i in range(dROUNDS):
+ _sipround(&v0, &v1, &v2, &v3)
+
+ b = v0 ^ v1 ^ v2 ^ v3
+
+ return b
diff --git a/pandas/src/hashtable_class_helper.pxi b/pandas/src/hashtable_class_helper.pxi
deleted file mode 100644
index da0c76aeca86f..0000000000000
--- a/pandas/src/hashtable_class_helper.pxi
+++ /dev/null
@@ -1,860 +0,0 @@
-"""
-Template for each `dtype` helper function for hashtable
-
-WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
-"""
-
-#----------------------------------------------------------------------
-# VectorData
-#----------------------------------------------------------------------
-
-
-ctypedef struct Float64VectorData:
- float64_t *data
- size_t n, m
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef void append_data_float64(Float64VectorData *data,
- float64_t x) nogil:
-
- data.data[data.n] = x
- data.n += 1
-
-
-ctypedef struct Int64VectorData:
- int64_t *data
- size_t n, m
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef void append_data_int64(Int64VectorData *data,
- int64_t x) nogil:
-
- data.data[data.n] = x
- data.n += 1
-
-ctypedef fused vector_data:
- Int64VectorData
- Float64VectorData
-
-cdef bint needs_resize(vector_data *data) nogil:
- return data.n == data.m
-
-#----------------------------------------------------------------------
-# Vector
-#----------------------------------------------------------------------
-
-cdef class Float64Vector:
-
- cdef:
- Float64VectorData *data
- ndarray ao
-
- def __cinit__(self):
- self.data = <Float64VectorData *>PyMem_Malloc(
- sizeof(Float64VectorData))
- if not self.data:
- raise MemoryError()
- self.data.n = 0
- self.data.m = _INIT_VEC_CAP
- self.ao = np.empty(self.data.m, dtype=np.float64)
- self.data.data = <float64_t*> self.ao.data
-
- cdef resize(self):
- self.data.m = max(self.data.m * 4, _INIT_VEC_CAP)
- self.ao.resize(self.data.m)
- self.data.data = <float64_t*> self.ao.data
-
- def __dealloc__(self):
- PyMem_Free(self.data)
-
- def __len__(self):
- return self.data.n
-
- def to_array(self):
- self.ao.resize(self.data.n)
- self.data.m = self.data.n
- return self.ao
-
- cdef inline void append(self, float64_t x):
-
- if needs_resize(self.data):
- self.resize()
-
- append_data_float64(self.data, x)
-
-cdef class Int64Vector:
-
- cdef:
- Int64VectorData *data
- ndarray ao
-
- def __cinit__(self):
- self.data = <Int64VectorData *>PyMem_Malloc(
- sizeof(Int64VectorData))
- if not self.data:
- raise MemoryError()
- self.data.n = 0
- self.data.m = _INIT_VEC_CAP
- self.ao = np.empty(self.data.m, dtype=np.int64)
- self.data.data = <int64_t*> self.ao.data
-
- cdef resize(self):
- self.data.m = max(self.data.m * 4, _INIT_VEC_CAP)
- self.ao.resize(self.data.m)
- self.data.data = <int64_t*> self.ao.data
-
- def __dealloc__(self):
- PyMem_Free(self.data)
-
- def __len__(self):
- return self.data.n
-
- def to_array(self):
- self.ao.resize(self.data.n)
- self.data.m = self.data.n
- return self.ao
-
- cdef inline void append(self, int64_t x):
-
- if needs_resize(self.data):
- self.resize()
-
- append_data_int64(self.data, x)
-
-
-cdef class ObjectVector:
-
- cdef:
- PyObject **data
- size_t n, m
- ndarray ao
-
- def __cinit__(self):
- self.n = 0
- self.m = _INIT_VEC_CAP
- self.ao = np.empty(_INIT_VEC_CAP, dtype=object)
- self.data = <PyObject**> self.ao.data
-
- def __len__(self):
- return self.n
-
- cdef inline append(self, object o):
- if self.n == self.m:
- self.m = max(self.m * 2, _INIT_VEC_CAP)
- self.ao.resize(self.m)
- self.data = <PyObject**> self.ao.data
-
- Py_INCREF(o)
- self.data[self.n] = <PyObject*> o
- self.n += 1
-
- def to_array(self):
- self.ao.resize(self.n)
- self.m = self.n
- return self.ao
-
-
-#----------------------------------------------------------------------
-# HashTable
-#----------------------------------------------------------------------
-
-
-cdef class HashTable:
- pass
-
-cdef class Float64HashTable(HashTable):
-
- def __cinit__(self, size_hint=1):
- self.table = kh_init_float64()
- if size_hint is not None:
- kh_resize_float64(self.table, size_hint)
-
- def __len__(self):
- return self.table.size
-
- def __dealloc__(self):
- kh_destroy_float64(self.table)
-
- def __contains__(self, object key):
- cdef khiter_t k
- k = kh_get_float64(self.table, key)
- return k != self.table.n_buckets
-
- cpdef get_item(self, float64_t val):
- cdef khiter_t k
- k = kh_get_float64(self.table, val)
- if k != self.table.n_buckets:
- return self.table.vals[k]
- else:
- raise KeyError(val)
-
- def get_iter_test(self, float64_t key, Py_ssize_t iterations):
- cdef Py_ssize_t i, val=0
- for i in range(iterations):
- k = kh_get_float64(self.table, val)
- if k != self.table.n_buckets:
- val = self.table.vals[k]
-
- cpdef set_item(self, float64_t key, Py_ssize_t val):
- cdef:
- khiter_t k
- int ret = 0
-
- k = kh_put_float64(self.table, key, &ret)
- self.table.keys[k] = key
- if kh_exist_float64(self.table, k):
- self.table.vals[k] = val
- else:
- raise KeyError(key)
-
- @cython.boundscheck(False)
- def map(self, float64_t[:] keys, int64_t[:] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- float64_t key
- khiter_t k
-
- with nogil:
- for i in range(n):
- key = keys[i]
- k = kh_put_float64(self.table, key, &ret)
- self.table.vals[k] = <Py_ssize_t> values[i]
-
- @cython.boundscheck(False)
- def map_locations(self, ndarray[float64_t, ndim=1] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- float64_t val
- khiter_t k
-
- with nogil:
- for i in range(n):
- val = values[i]
- k = kh_put_float64(self.table, val, &ret)
- self.table.vals[k] = i
-
- @cython.boundscheck(False)
- def lookup(self, float64_t[:] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- float64_t val
- khiter_t k
- int64_t[:] locs = np.empty(n, dtype=np.int64)
-
- with nogil:
- for i in range(n):
- val = values[i]
- k = kh_get_float64(self.table, val)
- if k != self.table.n_buckets:
- locs[i] = self.table.vals[k]
- else:
- locs[i] = -1
-
- return np.asarray(locs)
-
- def factorize(self, float64_t values):
- uniques = Float64Vector()
- labels = self.get_labels(values, uniques, 0, 0)
- return uniques.to_array(), labels
-
- @cython.boundscheck(False)
- def get_labels(self, float64_t[:] values, Float64Vector uniques,
- Py_ssize_t count_prior, Py_ssize_t na_sentinel,
- bint check_null=True):
- cdef:
- Py_ssize_t i, n = len(values)
- int64_t[:] labels
- Py_ssize_t idx, count = count_prior
- int ret = 0
- float64_t val
- khiter_t k
- Float64VectorData *ud
-
- labels = np.empty(n, dtype=np.int64)
- ud = uniques.data
-
- with nogil:
- for i in range(n):
- val = values[i]
-
- if check_null and val != val:
- labels[i] = na_sentinel
- continue
-
- k = kh_get_float64(self.table, val)
-
- if k != self.table.n_buckets:
- idx = self.table.vals[k]
- labels[i] = idx
- else:
- k = kh_put_float64(self.table, val, &ret)
- self.table.vals[k] = count
-
- if needs_resize(ud):
- with gil:
- uniques.resize()
- append_data_float64(ud, val)
- labels[i] = count
- count += 1
-
- return np.asarray(labels)
-
- @cython.boundscheck(False)
- def get_labels_groupby(self, float64_t[:] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int64_t[:] labels
- Py_ssize_t idx, count = 0
- int ret = 0
- float64_t val
- khiter_t k
- Float64Vector uniques = Float64Vector()
- Float64VectorData *ud
-
- labels = np.empty(n, dtype=np.int64)
- ud = uniques.data
-
- with nogil:
- for i in range(n):
- val = values[i]
-
- # specific for groupby
- if val < 0:
- labels[i] = -1
- continue
-
- k = kh_get_float64(self.table, val)
- if k != self.table.n_buckets:
- idx = self.table.vals[k]
- labels[i] = idx
- else:
- k = kh_put_float64(self.table, val, &ret)
- self.table.vals[k] = count
-
- if needs_resize(ud):
- with gil:
- uniques.resize()
- append_data_float64(ud, val)
- labels[i] = count
- count += 1
-
- arr_uniques = uniques.to_array()
-
- return np.asarray(labels), arr_uniques
-
- @cython.boundscheck(False)
- def unique(self, float64_t[:] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- float64_t val
- khiter_t k
- bint seen_na = 0
- Float64Vector uniques = Float64Vector()
- Float64VectorData *ud
-
- ud = uniques.data
-
- with nogil:
- for i in range(n):
- val = values[i]
-
- if val == val:
- k = kh_get_float64(self.table, val)
- if k == self.table.n_buckets:
- kh_put_float64(self.table, val, &ret)
- if needs_resize(ud):
- with gil:
- uniques.resize()
- append_data_float64(ud, val)
- elif not seen_na:
- seen_na = 1
- if needs_resize(ud):
- with gil:
- uniques.resize()
- append_data_float64(ud, NAN)
-
- return uniques.to_array()
-
-cdef class Int64HashTable(HashTable):
-
- def __cinit__(self, size_hint=1):
- self.table = kh_init_int64()
- if size_hint is not None:
- kh_resize_int64(self.table, size_hint)
-
- def __len__(self):
- return self.table.size
-
- def __dealloc__(self):
- kh_destroy_int64(self.table)
-
- def __contains__(self, object key):
- cdef khiter_t k
- k = kh_get_int64(self.table, key)
- return k != self.table.n_buckets
-
- cpdef get_item(self, int64_t val):
- cdef khiter_t k
- k = kh_get_int64(self.table, val)
- if k != self.table.n_buckets:
- return self.table.vals[k]
- else:
- raise KeyError(val)
-
- def get_iter_test(self, int64_t key, Py_ssize_t iterations):
- cdef Py_ssize_t i, val=0
- for i in range(iterations):
- k = kh_get_int64(self.table, val)
- if k != self.table.n_buckets:
- val = self.table.vals[k]
-
- cpdef set_item(self, int64_t key, Py_ssize_t val):
- cdef:
- khiter_t k
- int ret = 0
-
- k = kh_put_int64(self.table, key, &ret)
- self.table.keys[k] = key
- if kh_exist_int64(self.table, k):
- self.table.vals[k] = val
- else:
- raise KeyError(key)
-
- @cython.boundscheck(False)
- def map(self, int64_t[:] keys, int64_t[:] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- int64_t key
- khiter_t k
-
- with nogil:
- for i in range(n):
- key = keys[i]
- k = kh_put_int64(self.table, key, &ret)
- self.table.vals[k] = <Py_ssize_t> values[i]
-
- @cython.boundscheck(False)
- def map_locations(self, ndarray[int64_t, ndim=1] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- int64_t val
- khiter_t k
-
- with nogil:
- for i in range(n):
- val = values[i]
- k = kh_put_int64(self.table, val, &ret)
- self.table.vals[k] = i
-
- @cython.boundscheck(False)
- def lookup(self, int64_t[:] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- int64_t val
- khiter_t k
- int64_t[:] locs = np.empty(n, dtype=np.int64)
-
- with nogil:
- for i in range(n):
- val = values[i]
- k = kh_get_int64(self.table, val)
- if k != self.table.n_buckets:
- locs[i] = self.table.vals[k]
- else:
- locs[i] = -1
-
- return np.asarray(locs)
-
- def factorize(self, int64_t values):
- uniques = Int64Vector()
- labels = self.get_labels(values, uniques, 0, 0)
- return uniques.to_array(), labels
-
- @cython.boundscheck(False)
- def get_labels(self, int64_t[:] values, Int64Vector uniques,
- Py_ssize_t count_prior, Py_ssize_t na_sentinel,
- bint check_null=True):
- cdef:
- Py_ssize_t i, n = len(values)
- int64_t[:] labels
- Py_ssize_t idx, count = count_prior
- int ret = 0
- int64_t val
- khiter_t k
- Int64VectorData *ud
-
- labels = np.empty(n, dtype=np.int64)
- ud = uniques.data
-
- with nogil:
- for i in range(n):
- val = values[i]
-
- if check_null and val == iNaT:
- labels[i] = na_sentinel
- continue
-
- k = kh_get_int64(self.table, val)
-
- if k != self.table.n_buckets:
- idx = self.table.vals[k]
- labels[i] = idx
- else:
- k = kh_put_int64(self.table, val, &ret)
- self.table.vals[k] = count
-
- if needs_resize(ud):
- with gil:
- uniques.resize()
- append_data_int64(ud, val)
- labels[i] = count
- count += 1
-
- return np.asarray(labels)
-
- @cython.boundscheck(False)
- def get_labels_groupby(self, int64_t[:] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int64_t[:] labels
- Py_ssize_t idx, count = 0
- int ret = 0
- int64_t val
- khiter_t k
- Int64Vector uniques = Int64Vector()
- Int64VectorData *ud
-
- labels = np.empty(n, dtype=np.int64)
- ud = uniques.data
-
- with nogil:
- for i in range(n):
- val = values[i]
-
- # specific for groupby
- if val < 0:
- labels[i] = -1
- continue
-
- k = kh_get_int64(self.table, val)
- if k != self.table.n_buckets:
- idx = self.table.vals[k]
- labels[i] = idx
- else:
- k = kh_put_int64(self.table, val, &ret)
- self.table.vals[k] = count
-
- if needs_resize(ud):
- with gil:
- uniques.resize()
- append_data_int64(ud, val)
- labels[i] = count
- count += 1
-
- arr_uniques = uniques.to_array()
-
- return np.asarray(labels), arr_uniques
-
- @cython.boundscheck(False)
- def unique(self, int64_t[:] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- int64_t val
- khiter_t k
- bint seen_na = 0
- Int64Vector uniques = Int64Vector()
- Int64VectorData *ud
-
- ud = uniques.data
-
- with nogil:
- for i in range(n):
- val = values[i]
-
- k = kh_get_int64(self.table, val)
- if k == self.table.n_buckets:
- kh_put_int64(self.table, val, &ret)
- if needs_resize(ud):
- with gil:
- uniques.resize()
- append_data_int64(ud, val)
-
- return uniques.to_array()
-
-
-cdef class StringHashTable(HashTable):
- cdef kh_str_t *table
-
- def __cinit__(self, int size_hint=1):
- self.table = kh_init_str()
- if size_hint is not None:
- kh_resize_str(self.table, size_hint)
-
- def __dealloc__(self):
- kh_destroy_str(self.table)
-
- cpdef get_item(self, object val):
- cdef khiter_t k
- k = kh_get_str(self.table, util.get_c_string(val))
- if k != self.table.n_buckets:
- return self.table.vals[k]
- else:
- raise KeyError(val)
-
- def get_iter_test(self, object key, Py_ssize_t iterations):
- cdef Py_ssize_t i, val
- for i in range(iterations):
- k = kh_get_str(self.table, util.get_c_string(key))
- if k != self.table.n_buckets:
- val = self.table.vals[k]
-
- cpdef set_item(self, object key, Py_ssize_t val):
- cdef:
- khiter_t k
- int ret = 0
- char* buf
-
- buf = util.get_c_string(key)
-
- k = kh_put_str(self.table, buf, &ret)
- self.table.keys[k] = key
- if kh_exist_str(self.table, k):
- self.table.vals[k] = val
- else:
- raise KeyError(key)
-
- def get_indexer(self, ndarray[object] values):
- cdef:
- Py_ssize_t i, n = len(values)
- ndarray[int64_t] labels = np.empty(n, dtype=np.int64)
- char *buf
- int64_t *resbuf = <int64_t*> labels.data
- khiter_t k
- kh_str_t *table = self.table
-
- for i in range(n):
- buf = util.get_c_string(values[i])
- k = kh_get_str(table, buf)
- if k != table.n_buckets:
- resbuf[i] = table.vals[k]
- else:
- resbuf[i] = -1
- return labels
-
- def unique(self, ndarray[object] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- object val
- char *buf
- khiter_t k
- ObjectVector uniques = ObjectVector()
-
- for i in range(n):
- val = values[i]
- buf = util.get_c_string(val)
- k = kh_get_str(self.table, buf)
- if k == self.table.n_buckets:
- kh_put_str(self.table, buf, &ret)
- uniques.append(val)
-
- return uniques.to_array()
-
- def factorize(self, ndarray[object] values):
- cdef:
- Py_ssize_t i, n = len(values)
- ndarray[int64_t] labels = np.empty(n, dtype=np.int64)
- dict reverse = {}
- Py_ssize_t idx, count = 0
- int ret = 0
- object val
- char *buf
- khiter_t k
-
- for i in range(n):
- val = values[i]
- buf = util.get_c_string(val)
- k = kh_get_str(self.table, buf)
- if k != self.table.n_buckets:
- idx = self.table.vals[k]
- labels[i] = idx
- else:
- k = kh_put_str(self.table, buf, &ret)
- # print 'putting %s, %s' % (val, count)
-
- self.table.vals[k] = count
- reverse[count] = val
- labels[i] = count
- count += 1
-
- return reverse, labels
-
-
-na_sentinel = object
-
-cdef class PyObjectHashTable(HashTable):
-
- def __init__(self, size_hint=1):
- self.table = kh_init_pymap()
- kh_resize_pymap(self.table, size_hint)
-
- def __dealloc__(self):
- if self.table is not NULL:
- self.destroy()
-
- def __len__(self):
- return self.table.size
-
- def __contains__(self, object key):
- cdef khiter_t k
- hash(key)
- if key != key or key is None:
- key = na_sentinel
- k = kh_get_pymap(self.table, <PyObject*>key)
- return k != self.table.n_buckets
-
- def destroy(self):
- kh_destroy_pymap(self.table)
- self.table = NULL
-
- cpdef get_item(self, object val):
- cdef khiter_t k
- if val != val or val is None:
- val = na_sentinel
- k = kh_get_pymap(self.table, <PyObject*>val)
- if k != self.table.n_buckets:
- return self.table.vals[k]
- else:
- raise KeyError(val)
-
- def get_iter_test(self, object key, Py_ssize_t iterations):
- cdef Py_ssize_t i, val
- if key != key or key is None:
- key = na_sentinel
- for i in range(iterations):
- k = kh_get_pymap(self.table, <PyObject*>key)
- if k != self.table.n_buckets:
- val = self.table.vals[k]
-
- cpdef set_item(self, object key, Py_ssize_t val):
- cdef:
- khiter_t k
- int ret = 0
- char* buf
-
- hash(key)
- if key != key or key is None:
- key = na_sentinel
- k = kh_put_pymap(self.table, <PyObject*>key, &ret)
- # self.table.keys[k] = key
- if kh_exist_pymap(self.table, k):
- self.table.vals[k] = val
- else:
- raise KeyError(key)
-
- def map_locations(self, ndarray[object] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- object val
- khiter_t k
-
- for i in range(n):
- val = values[i]
- hash(val)
- if val != val or val is None:
- val = na_sentinel
-
- k = kh_put_pymap(self.table, <PyObject*>val, &ret)
- self.table.vals[k] = i
-
- def lookup(self, ndarray[object] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- object val
- khiter_t k
- int64_t[:] locs = np.empty(n, dtype=np.int64)
-
- for i in range(n):
- val = values[i]
- hash(val)
- if val != val or val is None:
- val = na_sentinel
-
- k = kh_get_pymap(self.table, <PyObject*>val)
- if k != self.table.n_buckets:
- locs[i] = self.table.vals[k]
- else:
- locs[i] = -1
-
- return np.asarray(locs)
-
- def unique(self, ndarray[object] values):
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- object val
- khiter_t k
- ObjectVector uniques = ObjectVector()
- bint seen_na = 0
-
- for i in range(n):
- val = values[i]
- hash(val)
- if not _checknan(val):
- k = kh_get_pymap(self.table, <PyObject*>val)
- if k == self.table.n_buckets:
- kh_put_pymap(self.table, <PyObject*>val, &ret)
- uniques.append(val)
- elif not seen_na:
- seen_na = 1
- uniques.append(nan)
-
- return uniques.to_array()
-
- def get_labels(self, ndarray[object] values, ObjectVector uniques,
- Py_ssize_t count_prior, int64_t na_sentinel,
- bint check_null=True):
- cdef:
- Py_ssize_t i, n = len(values)
- int64_t[:] labels
- Py_ssize_t idx, count = count_prior
- int ret = 0
- object val
- khiter_t k
-
- labels = np.empty(n, dtype=np.int64)
-
- for i in range(n):
- val = values[i]
- hash(val)
-
- if check_null and val != val or val is None:
- labels[i] = na_sentinel
- continue
-
- k = kh_get_pymap(self.table, <PyObject*>val)
- if k != self.table.n_buckets:
- idx = self.table.vals[k]
- labels[i] = idx
- else:
- k = kh_put_pymap(self.table, <PyObject*>val, &ret)
- self.table.vals[k] = count
- uniques.append(val)
- labels[i] = count
- count += 1
-
- return np.asarray(labels)
\ No newline at end of file
diff --git a/pandas/src/hashtable_func_helper.pxi b/pandas/src/hashtable_func_helper.pxi
deleted file mode 100644
index d05b81acc5dd5..0000000000000
--- a/pandas/src/hashtable_func_helper.pxi
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
-Template for each `dtype` helper function for hashtable
-
-WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
-"""
-
-#----------------------------------------------------------------------
-# VectorData
-#----------------------------------------------------------------------
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef build_count_table_float64(float64_t[:] values,
- kh_float64_t *table, bint dropna):
- cdef:
- khiter_t k
- Py_ssize_t i, n = len(values)
- float64_t val
- int ret = 0
-
- with nogil:
- kh_resize_float64(table, n)
-
- for i in range(n):
- val = values[i]
- if val == val or not dropna:
- k = kh_get_float64(table, val)
- if k != table.n_buckets:
- table.vals[k] += 1
- else:
- k = kh_put_float64(table, val, &ret)
- table.vals[k] = 1
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cpdef value_count_float64(float64_t[:] values, bint dropna):
- cdef:
- Py_ssize_t i=0
- kh_float64_t *table
- float64_t[:] result_keys
- int64_t[:] result_counts
- int k
-
- table = kh_init_float64()
- build_count_table_float64(values, table, dropna)
-
- result_keys = np.empty(table.n_occupied, dtype=np.float64)
- result_counts = np.zeros(table.n_occupied, dtype=np.int64)
-
- with nogil:
- for k in range(table.n_buckets):
- if kh_exist_float64(table, k):
- result_keys[i] = table.keys[k]
- result_counts[i] = table.vals[k]
- i += 1
- kh_destroy_float64(table)
-
- return np.asarray(result_keys), np.asarray(result_counts)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def duplicated_float64(float64_t[:] values,
- object keep='first'):
- cdef:
- int ret = 0, k
- float64_t value
- Py_ssize_t i, n = len(values)
- kh_float64_t * table = kh_init_float64()
- ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool')
-
- kh_resize_float64(table, min(n, _SIZE_HINT_LIMIT))
-
- if keep not in ('last', 'first', False):
- raise ValueError('keep must be either "first", "last" or False')
-
- if keep == 'last':
- with nogil:
- for i from n > i >=0:
- kh_put_float64(table, values[i], &ret)
- out[i] = ret == 0
- elif keep == 'first':
- with nogil:
- for i from 0 <= i < n:
- kh_put_float64(table, values[i], &ret)
- out[i] = ret == 0
- else:
- with nogil:
- for i from 0 <= i < n:
- value = values[i]
- k = kh_get_float64(table, value)
- if k != table.n_buckets:
- out[table.vals[k]] = 1
- out[i] = 1
- else:
- k = kh_put_float64(table, value, &ret)
- table.keys[k] = value
- table.vals[k] = i
- out[i] = 0
- kh_destroy_float64(table)
- return out
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef build_count_table_int64(int64_t[:] values,
- kh_int64_t *table, bint dropna):
- cdef:
- khiter_t k
- Py_ssize_t i, n = len(values)
- int64_t val
- int ret = 0
-
- with nogil:
- kh_resize_int64(table, n)
-
- for i in range(n):
- val = values[i]
- if val == val or not dropna:
- k = kh_get_int64(table, val)
- if k != table.n_buckets:
- table.vals[k] += 1
- else:
- k = kh_put_int64(table, val, &ret)
- table.vals[k] = 1
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cpdef value_count_int64(int64_t[:] values, bint dropna):
- cdef:
- Py_ssize_t i=0
- kh_int64_t *table
- int64_t[:] result_keys
- int64_t[:] result_counts
- int k
-
- table = kh_init_int64()
- build_count_table_int64(values, table, dropna)
-
- result_keys = np.empty(table.n_occupied, dtype=np.int64)
- result_counts = np.zeros(table.n_occupied, dtype=np.int64)
-
- with nogil:
- for k in range(table.n_buckets):
- if kh_exist_int64(table, k):
- result_keys[i] = table.keys[k]
- result_counts[i] = table.vals[k]
- i += 1
- kh_destroy_int64(table)
-
- return np.asarray(result_keys), np.asarray(result_counts)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def duplicated_int64(int64_t[:] values,
- object keep='first'):
- cdef:
- int ret = 0, k
- int64_t value
- Py_ssize_t i, n = len(values)
- kh_int64_t * table = kh_init_int64()
- ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool')
-
- kh_resize_int64(table, min(n, _SIZE_HINT_LIMIT))
-
- if keep not in ('last', 'first', False):
- raise ValueError('keep must be either "first", "last" or False')
-
- if keep == 'last':
- with nogil:
- for i from n > i >=0:
- kh_put_int64(table, values[i], &ret)
- out[i] = ret == 0
- elif keep == 'first':
- with nogil:
- for i from 0 <= i < n:
- kh_put_int64(table, values[i], &ret)
- out[i] = ret == 0
- else:
- with nogil:
- for i from 0 <= i < n:
- value = values[i]
- k = kh_get_int64(table, value)
- if k != table.n_buckets:
- out[table.vals[k]] = 1
- out[i] = 1
- else:
- k = kh_put_int64(table, value, &ret)
- table.keys[k] = value
- table.vals[k] = i
- out[i] = 0
- kh_destroy_int64(table)
- return out
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx
index 4fa730eac0fd1..5ac2c70bb1808 100644
--- a/pandas/src/inference.pyx
+++ b/pandas/src/inference.pyx
@@ -6,19 +6,9 @@ iNaT = util.get_nat()
cdef bint PY2 = sys.version_info[0] == 2
-cdef extern from "headers/stdint.h":
- enum: UINT8_MAX
- enum: UINT16_MAX
- enum: UINT32_MAX
- enum: UINT64_MAX
- enum: INT8_MIN
- enum: INT8_MAX
- enum: INT16_MIN
- enum: INT16_MAX
- enum: INT32_MAX
- enum: INT32_MIN
- enum: INT64_MAX
- enum: INT64_MIN
+from util cimport (UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX,
+ INT8_MIN, INT8_MAX, INT16_MIN, INT16_MAX,
+ INT32_MAX, INT32_MIN, INT64_MAX, INT64_MIN)
# core.common import for fast inference checks
diff --git a/pandas/src/join_helper.pxi b/pandas/src/join_helper.pxi
deleted file mode 100644
index 44b8159351492..0000000000000
--- a/pandas/src/join_helper.pxi
+++ /dev/null
@@ -1,1899 +0,0 @@
-"""
-Template for each `dtype` helper function for join
-
-WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
-"""
-
-#----------------------------------------------------------------------
-# left_join_indexer, inner_join_indexer, outer_join_indexer
-#----------------------------------------------------------------------
-
-# Joins on ordered, unique indices
-
-# right might contain non-unique values
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def left_join_indexer_unique_float64(ndarray[float64_t] left,
- ndarray[float64_t] right):
- cdef:
- Py_ssize_t i, j, nleft, nright
- ndarray[int64_t] indexer
- float64_t lval, rval
-
- i = 0
- j = 0
- nleft = len(left)
- nright = len(right)
-
- indexer = np.empty(nleft, dtype=np.int64)
- while True:
- if i == nleft:
- break
-
- if j == nright:
- indexer[i] = -1
- i += 1
- continue
-
- rval = right[j]
-
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
-
- if left[i] == right[j]:
- indexer[i] = j
- i += 1
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
- j += 1
- elif left[i] > rval:
- indexer[i] = -1
- j += 1
- else:
- indexer[i] = -1
- i += 1
- return indexer
-
-
-# @cython.wraparound(False)
-# @cython.boundscheck(False)
-def left_join_indexer_float64(ndarray[float64_t] left,
- ndarray[float64_t] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- float64_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[float64_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.float64)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- i += 1
- count += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def inner_join_indexer_float64(ndarray[float64_t] left,
- ndarray[float64_t] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- float64_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[float64_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.float64)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = rval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def outer_join_indexer_float64(ndarray[float64_t] left,
- ndarray[float64_t] right):
- cdef:
- Py_ssize_t i, j, nright, nleft, count
- float64_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[float64_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- count = nright
- elif nright == 0:
- count = nleft
- else:
- while True:
- if i == nleft:
- count += nright - j
- break
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- count += 1
- j += 1
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.float64)
-
- # do it again, but populate the indexers / result
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- for j in range(nright):
- lindexer[j] = -1
- rindexer[j] = j
- result[j] = right[j]
- elif nright == 0:
- for i in range(nleft):
- lindexer[i] = i
- rindexer[i] = -1
- result[i] = left[i]
- else:
- while True:
- if i == nleft:
- while j < nright:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = right[j]
- count += 1
- j += 1
- break
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = lval
- count += 1
- i += 1
- else:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = rval
- count += 1
- j += 1
-
- return result, lindexer, rindexer
-
-# Joins on ordered, unique indices
-
-# right might contain non-unique values
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def left_join_indexer_unique_float32(ndarray[float32_t] left,
- ndarray[float32_t] right):
- cdef:
- Py_ssize_t i, j, nleft, nright
- ndarray[int64_t] indexer
- float32_t lval, rval
-
- i = 0
- j = 0
- nleft = len(left)
- nright = len(right)
-
- indexer = np.empty(nleft, dtype=np.int64)
- while True:
- if i == nleft:
- break
-
- if j == nright:
- indexer[i] = -1
- i += 1
- continue
-
- rval = right[j]
-
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
-
- if left[i] == right[j]:
- indexer[i] = j
- i += 1
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
- j += 1
- elif left[i] > rval:
- indexer[i] = -1
- j += 1
- else:
- indexer[i] = -1
- i += 1
- return indexer
-
-
-# @cython.wraparound(False)
-# @cython.boundscheck(False)
-def left_join_indexer_float32(ndarray[float32_t] left,
- ndarray[float32_t] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- float32_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[float32_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.float32)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- i += 1
- count += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def inner_join_indexer_float32(ndarray[float32_t] left,
- ndarray[float32_t] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- float32_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[float32_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.float32)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = rval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def outer_join_indexer_float32(ndarray[float32_t] left,
- ndarray[float32_t] right):
- cdef:
- Py_ssize_t i, j, nright, nleft, count
- float32_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[float32_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- count = nright
- elif nright == 0:
- count = nleft
- else:
- while True:
- if i == nleft:
- count += nright - j
- break
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- count += 1
- j += 1
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.float32)
-
- # do it again, but populate the indexers / result
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- for j in range(nright):
- lindexer[j] = -1
- rindexer[j] = j
- result[j] = right[j]
- elif nright == 0:
- for i in range(nleft):
- lindexer[i] = i
- rindexer[i] = -1
- result[i] = left[i]
- else:
- while True:
- if i == nleft:
- while j < nright:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = right[j]
- count += 1
- j += 1
- break
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = lval
- count += 1
- i += 1
- else:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = rval
- count += 1
- j += 1
-
- return result, lindexer, rindexer
-
-# Joins on ordered, unique indices
-
-# right might contain non-unique values
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def left_join_indexer_unique_object(ndarray[object] left,
- ndarray[object] right):
- cdef:
- Py_ssize_t i, j, nleft, nright
- ndarray[int64_t] indexer
- object lval, rval
-
- i = 0
- j = 0
- nleft = len(left)
- nright = len(right)
-
- indexer = np.empty(nleft, dtype=np.int64)
- while True:
- if i == nleft:
- break
-
- if j == nright:
- indexer[i] = -1
- i += 1
- continue
-
- rval = right[j]
-
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
-
- if left[i] == right[j]:
- indexer[i] = j
- i += 1
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
- j += 1
- elif left[i] > rval:
- indexer[i] = -1
- j += 1
- else:
- indexer[i] = -1
- i += 1
- return indexer
-
-
-# @cython.wraparound(False)
-# @cython.boundscheck(False)
-def left_join_indexer_object(ndarray[object] left,
- ndarray[object] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- object lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[object] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=object)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- i += 1
- count += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def inner_join_indexer_object(ndarray[object] left,
- ndarray[object] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- object lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[object] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=object)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = rval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def outer_join_indexer_object(ndarray[object] left,
- ndarray[object] right):
- cdef:
- Py_ssize_t i, j, nright, nleft, count
- object lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[object] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- count = nright
- elif nright == 0:
- count = nleft
- else:
- while True:
- if i == nleft:
- count += nright - j
- break
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- count += 1
- j += 1
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=object)
-
- # do it again, but populate the indexers / result
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- for j in range(nright):
- lindexer[j] = -1
- rindexer[j] = j
- result[j] = right[j]
- elif nright == 0:
- for i in range(nleft):
- lindexer[i] = i
- rindexer[i] = -1
- result[i] = left[i]
- else:
- while True:
- if i == nleft:
- while j < nright:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = right[j]
- count += 1
- j += 1
- break
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = lval
- count += 1
- i += 1
- else:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = rval
- count += 1
- j += 1
-
- return result, lindexer, rindexer
-
-# Joins on ordered, unique indices
-
-# right might contain non-unique values
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def left_join_indexer_unique_int32(ndarray[int32_t] left,
- ndarray[int32_t] right):
- cdef:
- Py_ssize_t i, j, nleft, nright
- ndarray[int64_t] indexer
- int32_t lval, rval
-
- i = 0
- j = 0
- nleft = len(left)
- nright = len(right)
-
- indexer = np.empty(nleft, dtype=np.int64)
- while True:
- if i == nleft:
- break
-
- if j == nright:
- indexer[i] = -1
- i += 1
- continue
-
- rval = right[j]
-
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
-
- if left[i] == right[j]:
- indexer[i] = j
- i += 1
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
- j += 1
- elif left[i] > rval:
- indexer[i] = -1
- j += 1
- else:
- indexer[i] = -1
- i += 1
- return indexer
-
-
-# @cython.wraparound(False)
-# @cython.boundscheck(False)
-def left_join_indexer_int32(ndarray[int32_t] left,
- ndarray[int32_t] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- int32_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[int32_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.int32)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- i += 1
- count += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def inner_join_indexer_int32(ndarray[int32_t] left,
- ndarray[int32_t] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- int32_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[int32_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.int32)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = rval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def outer_join_indexer_int32(ndarray[int32_t] left,
- ndarray[int32_t] right):
- cdef:
- Py_ssize_t i, j, nright, nleft, count
- int32_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[int32_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- count = nright
- elif nright == 0:
- count = nleft
- else:
- while True:
- if i == nleft:
- count += nright - j
- break
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- count += 1
- j += 1
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.int32)
-
- # do it again, but populate the indexers / result
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- for j in range(nright):
- lindexer[j] = -1
- rindexer[j] = j
- result[j] = right[j]
- elif nright == 0:
- for i in range(nleft):
- lindexer[i] = i
- rindexer[i] = -1
- result[i] = left[i]
- else:
- while True:
- if i == nleft:
- while j < nright:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = right[j]
- count += 1
- j += 1
- break
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = lval
- count += 1
- i += 1
- else:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = rval
- count += 1
- j += 1
-
- return result, lindexer, rindexer
-
-# Joins on ordered, unique indices
-
-# right might contain non-unique values
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def left_join_indexer_unique_int64(ndarray[int64_t] left,
- ndarray[int64_t] right):
- cdef:
- Py_ssize_t i, j, nleft, nright
- ndarray[int64_t] indexer
- int64_t lval, rval
-
- i = 0
- j = 0
- nleft = len(left)
- nright = len(right)
-
- indexer = np.empty(nleft, dtype=np.int64)
- while True:
- if i == nleft:
- break
-
- if j == nright:
- indexer[i] = -1
- i += 1
- continue
-
- rval = right[j]
-
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
-
- if left[i] == right[j]:
- indexer[i] = j
- i += 1
- while i < nleft - 1 and left[i] == rval:
- indexer[i] = j
- i += 1
- j += 1
- elif left[i] > rval:
- indexer[i] = -1
- j += 1
- else:
- indexer[i] = -1
- i += 1
- return indexer
-
-
-# @cython.wraparound(False)
-# @cython.boundscheck(False)
-def left_join_indexer_int64(ndarray[int64_t] left,
- ndarray[int64_t] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- int64_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[int64_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.int64)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0:
- while i < nleft:
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- i += 1
- count += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def inner_join_indexer_int64(ndarray[int64_t] left,
- ndarray[int64_t] right):
- """
- Two-pass algorithm for monotonic indexes. Handles many-to-one merges
- """
- cdef:
- Py_ssize_t i, j, k, nright, nleft, count
- int64_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[int64_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- # do it again now that result size is known
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.int64)
-
- i = 0
- j = 0
- count = 0
- if nleft > 0 and nright > 0:
- while True:
- if i == nleft:
- break
- if j == nright:
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = rval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- i += 1
- else:
- j += 1
-
- return result, lindexer, rindexer
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def outer_join_indexer_int64(ndarray[int64_t] left,
- ndarray[int64_t] right):
- cdef:
- Py_ssize_t i, j, nright, nleft, count
- int64_t lval, rval
- ndarray[int64_t] lindexer, rindexer
- ndarray[int64_t] result
-
- nleft = len(left)
- nright = len(right)
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- count = nright
- elif nright == 0:
- count = nleft
- else:
- while True:
- if i == nleft:
- count += nright - j
- break
- if j == nright:
- count += nleft - i
- break
-
- lval = left[i]
- rval = right[j]
- if lval == rval:
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- count += 1
- i += 1
- else:
- count += 1
- j += 1
-
- lindexer = np.empty(count, dtype=np.int64)
- rindexer = np.empty(count, dtype=np.int64)
- result = np.empty(count, dtype=np.int64)
-
- # do it again, but populate the indexers / result
-
- i = 0
- j = 0
- count = 0
- if nleft == 0:
- for j in range(nright):
- lindexer[j] = -1
- rindexer[j] = j
- result[j] = right[j]
- elif nright == 0:
- for i in range(nleft):
- lindexer[i] = i
- rindexer[i] = -1
- result[i] = left[i]
- else:
- while True:
- if i == nleft:
- while j < nright:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = right[j]
- count += 1
- j += 1
- break
- if j == nright:
- while i < nleft:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = left[i]
- count += 1
- i += 1
- break
-
- lval = left[i]
- rval = right[j]
-
- if lval == rval:
- lindexer[count] = i
- rindexer[count] = j
- result[count] = lval
- count += 1
- if i < nleft - 1:
- if j < nright - 1 and right[j + 1] == rval:
- j += 1
- else:
- i += 1
- if left[i] != rval:
- j += 1
- elif j < nright - 1:
- j += 1
- if lval != right[j]:
- i += 1
- else:
- # end of the road
- break
- elif lval < rval:
- lindexer[count] = i
- rindexer[count] = -1
- result[count] = lval
- count += 1
- i += 1
- else:
- lindexer[count] = -1
- rindexer[count] = j
- result[count] = rval
- count += 1
- j += 1
-
- return result, lindexer, rindexer
diff --git a/pandas/src/joins_func_helper.pxi b/pandas/src/joins_func_helper.pxi
deleted file mode 100644
index 7a59da37c5ced..0000000000000
--- a/pandas/src/joins_func_helper.pxi
+++ /dev/null
@@ -1,373 +0,0 @@
-"""
-Template for each `dtype` helper function for hashtable
-
-WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
-"""
-
-#----------------------------------------------------------------------
-# asof_join_by
-#----------------------------------------------------------------------
-
-
-from hashtable cimport *
-
-
-def asof_join_int64_t_by_object(ndarray[int64_t] left_values,
- ndarray[int64_t] right_values,
- ndarray[object] left_by_values,
- ndarray[object] right_by_values,
- bint allow_exact_matches=1,
- tolerance=None):
-
- cdef:
- Py_ssize_t left_pos, right_pos, left_size, right_size, found_right_pos
- ndarray[int64_t] left_indexer, right_indexer
- bint has_tolerance = 0
- int64_t tolerance_
- PyObjectHashTable hash_table
- object by_value
-
- # if we are using tolerance, set our objects
- if tolerance is not None:
- has_tolerance = 1
- tolerance_ = tolerance
-
- left_size = len(left_values)
- right_size = len(right_values)
-
- left_indexer = np.empty(left_size, dtype=np.int64)
- right_indexer = np.empty(left_size, dtype=np.int64)
-
- hash_table = PyObjectHashTable(right_size)
-
- right_pos = 0
- for left_pos in range(left_size):
- # restart right_pos if it went negative in a previous iteration
- if right_pos < 0:
- right_pos = 0
-
- # find last position in right whose value is less than left's value
- if allow_exact_matches:
- while right_pos < right_size and\
- right_values[right_pos] <= left_values[left_pos]:
- hash_table.set_item(right_by_values[right_pos], right_pos)
- right_pos += 1
- else:
- while right_pos < right_size and\
- right_values[right_pos] < left_values[left_pos]:
- hash_table.set_item(right_by_values[right_pos], right_pos)
- right_pos += 1
- right_pos -= 1
-
- # save positions as the desired index
- by_value = left_by_values[left_pos]
- found_right_pos = hash_table.get_item(by_value)\
- if by_value in hash_table else -1
- left_indexer[left_pos] = left_pos
- right_indexer[left_pos] = found_right_pos
-
- # if needed, verify that tolerance is met
- if has_tolerance and found_right_pos != -1:
- diff = left_values[left_pos] - right_values[found_right_pos]
- if diff > tolerance_:
- right_indexer[left_pos] = -1
-
- return left_indexer, right_indexer
-
-
-def asof_join_double_by_object(ndarray[double] left_values,
- ndarray[double] right_values,
- ndarray[object] left_by_values,
- ndarray[object] right_by_values,
- bint allow_exact_matches=1,
- tolerance=None):
-
- cdef:
- Py_ssize_t left_pos, right_pos, left_size, right_size, found_right_pos
- ndarray[int64_t] left_indexer, right_indexer
- bint has_tolerance = 0
- double tolerance_
- PyObjectHashTable hash_table
- object by_value
-
- # if we are using tolerance, set our objects
- if tolerance is not None:
- has_tolerance = 1
- tolerance_ = tolerance
-
- left_size = len(left_values)
- right_size = len(right_values)
-
- left_indexer = np.empty(left_size, dtype=np.int64)
- right_indexer = np.empty(left_size, dtype=np.int64)
-
- hash_table = PyObjectHashTable(right_size)
-
- right_pos = 0
- for left_pos in range(left_size):
- # restart right_pos if it went negative in a previous iteration
- if right_pos < 0:
- right_pos = 0
-
- # find last position in right whose value is less than left's value
- if allow_exact_matches:
- while right_pos < right_size and\
- right_values[right_pos] <= left_values[left_pos]:
- hash_table.set_item(right_by_values[right_pos], right_pos)
- right_pos += 1
- else:
- while right_pos < right_size and\
- right_values[right_pos] < left_values[left_pos]:
- hash_table.set_item(right_by_values[right_pos], right_pos)
- right_pos += 1
- right_pos -= 1
-
- # save positions as the desired index
- by_value = left_by_values[left_pos]
- found_right_pos = hash_table.get_item(by_value)\
- if by_value in hash_table else -1
- left_indexer[left_pos] = left_pos
- right_indexer[left_pos] = found_right_pos
-
- # if needed, verify that tolerance is met
- if has_tolerance and found_right_pos != -1:
- diff = left_values[left_pos] - right_values[found_right_pos]
- if diff > tolerance_:
- right_indexer[left_pos] = -1
-
- return left_indexer, right_indexer
-
-
-def asof_join_int64_t_by_int64_t(ndarray[int64_t] left_values,
- ndarray[int64_t] right_values,
- ndarray[int64_t] left_by_values,
- ndarray[int64_t] right_by_values,
- bint allow_exact_matches=1,
- tolerance=None):
-
- cdef:
- Py_ssize_t left_pos, right_pos, left_size, right_size, found_right_pos
- ndarray[int64_t] left_indexer, right_indexer
- bint has_tolerance = 0
- int64_t tolerance_
- Int64HashTable hash_table
- int64_t by_value
-
- # if we are using tolerance, set our objects
- if tolerance is not None:
- has_tolerance = 1
- tolerance_ = tolerance
-
- left_size = len(left_values)
- right_size = len(right_values)
-
- left_indexer = np.empty(left_size, dtype=np.int64)
- right_indexer = np.empty(left_size, dtype=np.int64)
-
- hash_table = Int64HashTable(right_size)
-
- right_pos = 0
- for left_pos in range(left_size):
- # restart right_pos if it went negative in a previous iteration
- if right_pos < 0:
- right_pos = 0
-
- # find last position in right whose value is less than left's value
- if allow_exact_matches:
- while right_pos < right_size and\
- right_values[right_pos] <= left_values[left_pos]:
- hash_table.set_item(right_by_values[right_pos], right_pos)
- right_pos += 1
- else:
- while right_pos < right_size and\
- right_values[right_pos] < left_values[left_pos]:
- hash_table.set_item(right_by_values[right_pos], right_pos)
- right_pos += 1
- right_pos -= 1
-
- # save positions as the desired index
- by_value = left_by_values[left_pos]
- found_right_pos = hash_table.get_item(by_value)\
- if by_value in hash_table else -1
- left_indexer[left_pos] = left_pos
- right_indexer[left_pos] = found_right_pos
-
- # if needed, verify that tolerance is met
- if has_tolerance and found_right_pos != -1:
- diff = left_values[left_pos] - right_values[found_right_pos]
- if diff > tolerance_:
- right_indexer[left_pos] = -1
-
- return left_indexer, right_indexer
-
-
-def asof_join_double_by_int64_t(ndarray[double] left_values,
- ndarray[double] right_values,
- ndarray[int64_t] left_by_values,
- ndarray[int64_t] right_by_values,
- bint allow_exact_matches=1,
- tolerance=None):
-
- cdef:
- Py_ssize_t left_pos, right_pos, left_size, right_size, found_right_pos
- ndarray[int64_t] left_indexer, right_indexer
- bint has_tolerance = 0
- double tolerance_
- Int64HashTable hash_table
- int64_t by_value
-
- # if we are using tolerance, set our objects
- if tolerance is not None:
- has_tolerance = 1
- tolerance_ = tolerance
-
- left_size = len(left_values)
- right_size = len(right_values)
-
- left_indexer = np.empty(left_size, dtype=np.int64)
- right_indexer = np.empty(left_size, dtype=np.int64)
-
- hash_table = Int64HashTable(right_size)
-
- right_pos = 0
- for left_pos in range(left_size):
- # restart right_pos if it went negative in a previous iteration
- if right_pos < 0:
- right_pos = 0
-
- # find last position in right whose value is less than left's value
- if allow_exact_matches:
- while right_pos < right_size and\
- right_values[right_pos] <= left_values[left_pos]:
- hash_table.set_item(right_by_values[right_pos], right_pos)
- right_pos += 1
- else:
- while right_pos < right_size and\
- right_values[right_pos] < left_values[left_pos]:
- hash_table.set_item(right_by_values[right_pos], right_pos)
- right_pos += 1
- right_pos -= 1
-
- # save positions as the desired index
- by_value = left_by_values[left_pos]
- found_right_pos = hash_table.get_item(by_value)\
- if by_value in hash_table else -1
- left_indexer[left_pos] = left_pos
- right_indexer[left_pos] = found_right_pos
-
- # if needed, verify that tolerance is met
- if has_tolerance and found_right_pos != -1:
- diff = left_values[left_pos] - right_values[found_right_pos]
- if diff > tolerance_:
- right_indexer[left_pos] = -1
-
- return left_indexer, right_indexer
-
-
-#----------------------------------------------------------------------
-# asof_join
-#----------------------------------------------------------------------
-
-
-def asof_join_int64_t(ndarray[int64_t] left_values,
- ndarray[int64_t] right_values,
- bint allow_exact_matches=1,
- tolerance=None):
-
- cdef:
- Py_ssize_t left_pos, right_pos, left_size, right_size
- ndarray[int64_t] left_indexer, right_indexer
- bint has_tolerance = 0
- int64_t tolerance_
-
- # if we are using tolerance, set our objects
- if tolerance is not None:
- has_tolerance = 1
- tolerance_ = tolerance
-
- left_size = len(left_values)
- right_size = len(right_values)
-
- left_indexer = np.empty(left_size, dtype=np.int64)
- right_indexer = np.empty(left_size, dtype=np.int64)
-
- right_pos = 0
- for left_pos in range(left_size):
- # restart right_pos if it went negative in a previous iteration
- if right_pos < 0:
- right_pos = 0
-
- # find last position in right whose value is less than left's value
- if allow_exact_matches:
- while right_pos < right_size and\
- right_values[right_pos] <= left_values[left_pos]:
- right_pos += 1
- else:
- while right_pos < right_size and\
- right_values[right_pos] < left_values[left_pos]:
- right_pos += 1
- right_pos -= 1
-
- # save positions as the desired index
- left_indexer[left_pos] = left_pos
- right_indexer[left_pos] = right_pos
-
- # if needed, verify that tolerance is met
- if has_tolerance and right_pos != -1:
- diff = left_values[left_pos] - right_values[right_pos]
- if diff > tolerance_:
- right_indexer[left_pos] = -1
-
- return left_indexer, right_indexer
-
-
-def asof_join_double(ndarray[double] left_values,
- ndarray[double] right_values,
- bint allow_exact_matches=1,
- tolerance=None):
-
- cdef:
- Py_ssize_t left_pos, right_pos, left_size, right_size
- ndarray[int64_t] left_indexer, right_indexer
- bint has_tolerance = 0
- double tolerance_
-
- # if we are using tolerance, set our objects
- if tolerance is not None:
- has_tolerance = 1
- tolerance_ = tolerance
-
- left_size = len(left_values)
- right_size = len(right_values)
-
- left_indexer = np.empty(left_size, dtype=np.int64)
- right_indexer = np.empty(left_size, dtype=np.int64)
-
- right_pos = 0
- for left_pos in range(left_size):
- # restart right_pos if it went negative in a previous iteration
- if right_pos < 0:
- right_pos = 0
-
- # find last position in right whose value is less than left's value
- if allow_exact_matches:
- while right_pos < right_size and\
- right_values[right_pos] <= left_values[left_pos]:
- right_pos += 1
- else:
- while right_pos < right_size and\
- right_values[right_pos] < left_values[left_pos]:
- right_pos += 1
- right_pos -= 1
-
- # save positions as the desired index
- left_indexer[left_pos] = left_pos
- right_indexer[left_pos] = right_pos
-
- # if needed, verify that tolerance is met
- if has_tolerance and right_pos != -1:
- diff = left_values[left_pos] - right_values[right_pos]
- if diff > tolerance_:
- right_indexer[left_pos] = -1
-
- return left_indexer, right_indexer
diff --git a/pandas/src/joins_func_helper.pxi.in b/pandas/src/joins_func_helper.pxi.in
index 06c35cfb69e53..33926a23f7f41 100644
--- a/pandas/src/joins_func_helper.pxi.in
+++ b/pandas/src/joins_func_helper.pxi.in
@@ -1,3 +1,4 @@
+# cython: boundscheck=False, wraparound=False
"""
Template for each `dtype` helper function for hashtable
@@ -14,7 +15,9 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
by_dtypes = [('PyObjectHashTable', 'object'), ('Int64HashTable', 'int64_t')]
# on_dtype
-on_dtypes = ['int64_t', 'double']
+on_dtypes = ['uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
+ 'int8_t', 'int16_t', 'int32_t', 'int64_t',
+ 'float', 'double']
}}
@@ -98,7 +101,9 @@ def asof_join_{{on_dtype}}_by_{{by_dtype}}(ndarray[{{on_dtype}}] left_values,
{{py:
# on_dtype
-dtypes = ['int64_t', 'double']
+dtypes = ['uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
+ 'int8_t', 'int16_t', 'int32_t', 'int64_t',
+ 'float', 'double']
}}
diff --git a/pandas/src/msgpack/unpack_template.h b/pandas/src/msgpack/unpack_template.h
index 95af6735520fc..fba372ddcb3e4 100644
--- a/pandas/src/msgpack/unpack_template.h
+++ b/pandas/src/msgpack/unpack_template.h
@@ -17,7 +17,7 @@
*/
#ifndef USE_CASE_RANGE
-#if !defined(_MSC_VER)
+#ifdef __GNUC__
#define USE_CASE_RANGE
#endif
#endif
diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c
index af85b7b894d26..450abcf6c325c 100644
--- a/pandas/src/parser/tokenizer.c
+++ b/pandas/src/parser/tokenizer.c
@@ -478,9 +478,10 @@ static int end_line(parser_t *self) {
}
}
- if (self->state == SKIP_LINE || \
- self->state == QUOTE_IN_SKIP_LINE || \
- self->state == QUOTE_IN_QUOTE_IN_SKIP_LINE
+ if (self->state == START_FIELD_IN_SKIP_LINE || \
+ self->state == IN_FIELD_IN_SKIP_LINE || \
+ self->state == IN_QUOTED_FIELD_IN_SKIP_LINE || \
+ self->state == QUOTE_IN_QUOTED_FIELD_IN_SKIP_LINE
) {
TRACE(("end_line: Skipping row %d\n", self->file_lines));
// increment file line count
@@ -725,16 +726,14 @@ int skip_this_line(parser_t *self, int64_t rownum) {
}
}
-int tokenize_bytes(parser_t *self, size_t line_limit)
+int tokenize_bytes(parser_t *self, size_t line_limit, int start_lines)
{
- int i, slen, start_lines;
+ int i, slen;
long maxstreamsize;
char c;
char *stream;
char *buf = self->data + self->datapos;
- start_lines = self->lines;
-
if (make_stream_space(self, self->datalen - self->datapos) < 0) {
self->error_msg = "out of memory";
return -1;
@@ -761,38 +760,54 @@ int tokenize_bytes(parser_t *self, size_t line_limit)
switch(self->state) {
- case SKIP_LINE:
- TRACE(("tokenize_bytes SKIP_LINE 0x%x, state %d\n", c, self->state));
+ case START_FIELD_IN_SKIP_LINE:
if (IS_TERMINATOR(c)) {
END_LINE();
} else if (IS_CARRIAGE(c)) {
self->file_lines++;
self->state = EAT_CRNL_NOP;
} else if (IS_QUOTE(c)) {
- self->state = QUOTE_IN_SKIP_LINE;
+ self->state = IN_QUOTED_FIELD_IN_SKIP_LINE;
+ } else if (IS_DELIMITER(c)) {
+ // Do nothing, we're starting a new field again.
+ } else {
+ self->state = IN_FIELD_IN_SKIP_LINE;
}
break;
- case QUOTE_IN_SKIP_LINE:
+ case IN_FIELD_IN_SKIP_LINE:
+ if (IS_TERMINATOR(c)) {
+ END_LINE();
+ } else if (IS_CARRIAGE(c)) {
+ self->file_lines++;
+ self->state = EAT_CRNL_NOP;
+ } else if (IS_DELIMITER(c)) {
+ self->state = START_FIELD_IN_SKIP_LINE;
+ }
+ break;
+
+ case IN_QUOTED_FIELD_IN_SKIP_LINE:
if (IS_QUOTE(c)) {
if (self->doublequote) {
- self->state = QUOTE_IN_QUOTE_IN_SKIP_LINE;
+ self->state = QUOTE_IN_QUOTED_FIELD_IN_SKIP_LINE;
} else {
- self->state = SKIP_LINE;
+ self->state = IN_FIELD_IN_SKIP_LINE;
}
}
break;
- case QUOTE_IN_QUOTE_IN_SKIP_LINE:
+ case QUOTE_IN_QUOTED_FIELD_IN_SKIP_LINE:
if (IS_QUOTE(c)) {
- self->state = QUOTE_IN_SKIP_LINE;
+ self->state = IN_QUOTED_FIELD_IN_SKIP_LINE;
} else if (IS_TERMINATOR(c)) {
END_LINE();
} else if (IS_CARRIAGE(c)) {
self->file_lines++;
self->state = EAT_CRNL_NOP;
+ } else if (IS_DELIMITER(c)) {
+ self->state = START_FIELD_IN_SKIP_LINE;
} else {
- self->state = SKIP_LINE;
+ self->state = IN_FIELD_IN_SKIP_LINE;
}
break;
@@ -846,9 +861,9 @@ int tokenize_bytes(parser_t *self, size_t line_limit)
// start of record
if (skip_this_line(self, self->file_lines)) {
if (IS_QUOTE(c)) {
- self->state = QUOTE_IN_SKIP_LINE;
+ self->state = IN_QUOTED_FIELD_IN_SKIP_LINE;
} else {
- self->state = SKIP_LINE;
+ self->state = IN_FIELD_IN_SKIP_LINE;
if (IS_TERMINATOR(c)) {
END_LINE();
@@ -1367,7 +1382,7 @@ int _tokenize_helper(parser_t *self, size_t nrows, int all) {
TRACE(("_tokenize_helper: Trying to process %d bytes, datalen=%d, datapos= %d\n",
self->datalen - self->datapos, self->datalen, self->datapos));
- status = tokenize_bytes(self, nrows);
+ status = tokenize_bytes(self, nrows, start_lines);
if (status < 0) {
// XXX
diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h
index 8f7ae436bb7b7..487c1265d9358 100644
--- a/pandas/src/parser/tokenizer.h
+++ b/pandas/src/parser/tokenizer.h
@@ -123,9 +123,10 @@ typedef enum {
EAT_COMMENT,
EAT_LINE_COMMENT,
WHITESPACE_LINE,
- SKIP_LINE,
- QUOTE_IN_SKIP_LINE,
- QUOTE_IN_QUOTE_IN_SKIP_LINE,
+ START_FIELD_IN_SKIP_LINE,
+ IN_FIELD_IN_SKIP_LINE,
+ IN_QUOTED_FIELD_IN_SKIP_LINE,
+ QUOTE_IN_QUOTED_FIELD_IN_SKIP_LINE,
FINISHED
} ParserState;
diff --git a/pandas/src/sparse_op_helper.pxi b/pandas/src/sparse_op_helper.pxi
deleted file mode 100644
index 8462c31c84679..0000000000000
--- a/pandas/src/sparse_op_helper.pxi
+++ /dev/null
@@ -1,5864 +0,0 @@
-"""
-Template for each `dtype` helper function for sparse ops
-
-WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
-"""
-
-#----------------------------------------------------------------------
-# Sparse op
-#----------------------------------------------------------------------
-
-cdef inline float64_t __div_float64(float64_t a, float64_t b):
- if b == 0:
- if a > 0:
- return INF
- elif a < 0:
- return -INF
- else:
- return NaN
- else:
- return float(a) / b
-
-cdef inline float64_t __truediv_float64(float64_t a, float64_t b):
- return __div_float64(a, b)
-
-cdef inline float64_t __floordiv_float64(float64_t a, float64_t b):
- if b == 0:
- # numpy >= 1.11 returns NaN
- # for a // 0, rather than +-inf
- if _np_version_under1p11:
- if a > 0:
- return INF
- elif a < 0:
- return -INF
- return NaN
- else:
- return a // b
-
-cdef inline float64_t __mod_float64(float64_t a, float64_t b):
- if b == 0:
- return NaN
- else:
- return a % b
-
-cdef inline float64_t __div_int64(int64_t a, int64_t b):
- if b == 0:
- if a > 0:
- return INF
- elif a < 0:
- return -INF
- else:
- return NaN
- else:
- return float(a) / b
-
-cdef inline float64_t __truediv_int64(int64_t a, int64_t b):
- return __div_int64(a, b)
-
-cdef inline int64_t __floordiv_int64(int64_t a, int64_t b):
- if b == 0:
- return 0
- else:
- return a // b
-
-cdef inline int64_t __mod_int64(int64_t a, int64_t b):
- if b == 0:
- return 0
- else:
- return a % b
-
-#----------------------------------------------------------------------
-# sparse array op
-#----------------------------------------------------------------------
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_add_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] + yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill + y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] + y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] + yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill + y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill + yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_add_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill + y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] + yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] + y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] + yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill + y[yi]
- yi += 1
-
- return out, out_index, xfill + yfill
-
-
-cpdef sparse_add_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_add_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_add_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_add_float64(float64_t xfill,
- float64_t yfill):
- return xfill + yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_add_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] + yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill + y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] + y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] + yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill + y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill + yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_add_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill + y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] + yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] + y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] + yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill + y[yi]
- yi += 1
-
- return out, out_index, xfill + yfill
-
-
-cpdef sparse_add_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_add_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_add_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_add_int64(int64_t xfill,
- int64_t yfill):
- return xfill + yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_sub_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] - yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill - y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] - y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] - yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill - y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill - yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_sub_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill - y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] - yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] - y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] - yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill - y[yi]
- yi += 1
-
- return out, out_index, xfill - yfill
-
-
-cpdef sparse_sub_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_sub_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_sub_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_sub_float64(float64_t xfill,
- float64_t yfill):
- return xfill - yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_sub_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] - yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill - y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] - y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] - yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill - y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill - yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_sub_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill - y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] - yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] - y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] - yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill - y[yi]
- yi += 1
-
- return out, out_index, xfill - yfill
-
-
-cpdef sparse_sub_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_sub_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_sub_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_sub_int64(int64_t xfill,
- int64_t yfill):
- return xfill - yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_mul_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] * yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill * y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] * y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] * yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill * y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill * yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_mul_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill * y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] * yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] * y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] * yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill * y[yi]
- yi += 1
-
- return out, out_index, xfill * yfill
-
-
-cpdef sparse_mul_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_mul_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_mul_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_mul_float64(float64_t xfill,
- float64_t yfill):
- return xfill * yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_mul_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] * yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill * y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] * y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] * yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill * y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill * yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_mul_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill * y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] * yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] * y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] * yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill * y[yi]
- yi += 1
-
- return out, out_index, xfill * yfill
-
-
-cpdef sparse_mul_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_mul_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_mul_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_mul_int64(int64_t xfill,
- int64_t yfill):
- return xfill * yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_div_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = __div_float64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = __div_float64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __div_float64(x[xi], y[yi])
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __div_float64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = __div_float64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, __div_float64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_div_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = __div_float64(xfill, y[yi])
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = __div_float64(x[xi], yfill)
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __div_float64(x[xi], y[yi])
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __div_float64(x[xi], yfill)
- xi += 1
- else:
- # use x fill value
- out[out_i] = __div_float64(xfill, y[yi])
- yi += 1
-
- return out, out_index, __div_float64(xfill, yfill)
-
-
-cpdef sparse_div_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_div_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_div_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_div_float64(float64_t xfill,
- float64_t yfill):
- return __div_float64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_div_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = __div_int64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = __div_int64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __div_int64(x[xi], y[yi])
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __div_int64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = __div_int64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, __div_int64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_div_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = __div_int64(xfill, y[yi])
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = __div_int64(x[xi], yfill)
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __div_int64(x[xi], y[yi])
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __div_int64(x[xi], yfill)
- xi += 1
- else:
- # use x fill value
- out[out_i] = __div_int64(xfill, y[yi])
- yi += 1
-
- return out, out_index, __div_int64(xfill, yfill)
-
-
-cpdef sparse_div_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_div_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_div_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_div_int64(int64_t xfill,
- int64_t yfill):
- return __div_int64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_mod_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = __mod_float64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = __mod_float64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __mod_float64(x[xi], y[yi])
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __mod_float64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = __mod_float64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, __mod_float64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_mod_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = __mod_float64(xfill, y[yi])
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = __mod_float64(x[xi], yfill)
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __mod_float64(x[xi], y[yi])
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __mod_float64(x[xi], yfill)
- xi += 1
- else:
- # use x fill value
- out[out_i] = __mod_float64(xfill, y[yi])
- yi += 1
-
- return out, out_index, __mod_float64(xfill, yfill)
-
-
-cpdef sparse_mod_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_mod_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_mod_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_mod_float64(float64_t xfill,
- float64_t yfill):
- return __mod_float64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_mod_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = __mod_int64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = __mod_int64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __mod_int64(x[xi], y[yi])
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __mod_int64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = __mod_int64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, __mod_int64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_mod_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = __mod_int64(xfill, y[yi])
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = __mod_int64(x[xi], yfill)
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __mod_int64(x[xi], y[yi])
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __mod_int64(x[xi], yfill)
- xi += 1
- else:
- # use x fill value
- out[out_i] = __mod_int64(xfill, y[yi])
- yi += 1
-
- return out, out_index, __mod_int64(xfill, yfill)
-
-
-cpdef sparse_mod_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_mod_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_mod_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_mod_int64(int64_t xfill,
- int64_t yfill):
- return __mod_int64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_truediv_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = __truediv_float64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = __truediv_float64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __truediv_float64(x[xi], y[yi])
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __truediv_float64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = __truediv_float64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, __truediv_float64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_truediv_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = __truediv_float64(xfill, y[yi])
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = __truediv_float64(x[xi], yfill)
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __truediv_float64(x[xi], y[yi])
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __truediv_float64(x[xi], yfill)
- xi += 1
- else:
- # use x fill value
- out[out_i] = __truediv_float64(xfill, y[yi])
- yi += 1
-
- return out, out_index, __truediv_float64(xfill, yfill)
-
-
-cpdef sparse_truediv_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_truediv_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_truediv_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_truediv_float64(float64_t xfill,
- float64_t yfill):
- return __truediv_float64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_truediv_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = __truediv_int64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = __truediv_int64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __truediv_int64(x[xi], y[yi])
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __truediv_int64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = __truediv_int64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, __truediv_int64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_truediv_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = __truediv_int64(xfill, y[yi])
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = __truediv_int64(x[xi], yfill)
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __truediv_int64(x[xi], y[yi])
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __truediv_int64(x[xi], yfill)
- xi += 1
- else:
- # use x fill value
- out[out_i] = __truediv_int64(xfill, y[yi])
- yi += 1
-
- return out, out_index, __truediv_int64(xfill, yfill)
-
-
-cpdef sparse_truediv_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_truediv_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_truediv_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_truediv_int64(int64_t xfill,
- int64_t yfill):
- return __truediv_int64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_floordiv_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = __floordiv_float64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = __floordiv_float64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __floordiv_float64(x[xi], y[yi])
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __floordiv_float64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = __floordiv_float64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, __floordiv_float64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_floordiv_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = __floordiv_float64(xfill, y[yi])
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = __floordiv_float64(x[xi], yfill)
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __floordiv_float64(x[xi], y[yi])
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __floordiv_float64(x[xi], yfill)
- xi += 1
- else:
- # use x fill value
- out[out_i] = __floordiv_float64(xfill, y[yi])
- yi += 1
-
- return out, out_index, __floordiv_float64(xfill, yfill)
-
-
-cpdef sparse_floordiv_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_floordiv_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_floordiv_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_floordiv_float64(float64_t xfill,
- float64_t yfill):
- return __floordiv_float64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_floordiv_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = __floordiv_int64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = __floordiv_int64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __floordiv_int64(x[xi], y[yi])
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __floordiv_int64(x[xi], yfill)
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = __floordiv_int64(xfill, y[yi])
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, __floordiv_int64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_floordiv_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = __floordiv_int64(xfill, y[yi])
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = __floordiv_int64(x[xi], yfill)
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = __floordiv_int64(x[xi], y[yi])
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = __floordiv_int64(x[xi], yfill)
- xi += 1
- else:
- # use x fill value
- out[out_i] = __floordiv_int64(xfill, y[yi])
- yi += 1
-
- return out, out_index, __floordiv_int64(xfill, yfill)
-
-
-cpdef sparse_floordiv_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_floordiv_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_floordiv_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_floordiv_int64(int64_t xfill,
- int64_t yfill):
- return __floordiv_int64(xfill, yfill)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_pow_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] ** yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill ** y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] ** y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] ** yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill ** y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill ** yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_pow_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[float64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.float64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill ** y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] ** yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] ** y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] ** yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill ** y[yi]
- yi += 1
-
- return out, out_index, xfill ** yfill
-
-
-cpdef sparse_pow_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_pow_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_pow_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_pow_float64(float64_t xfill,
- float64_t yfill):
- return xfill ** yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_pow_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] ** yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill ** y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] ** y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] ** yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill ** y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill ** yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_pow_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[int64_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.int64)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill ** y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] ** yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] ** y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] ** yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill ** y[yi]
- yi += 1
-
- return out, out_index, xfill ** yfill
-
-
-cpdef sparse_pow_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_pow_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_pow_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_pow_int64(int64_t xfill,
- int64_t yfill):
- return xfill ** yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_eq_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] == yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill == y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] == y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] == yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill == y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill == yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_eq_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill == y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] == yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] == y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] == yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill == y[yi]
- yi += 1
-
- return out, out_index, xfill == yfill
-
-
-cpdef sparse_eq_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_eq_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_eq_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_eq_float64(float64_t xfill,
- float64_t yfill):
- return xfill == yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_eq_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] == yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill == y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] == y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] == yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill == y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill == yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_eq_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill == y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] == yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] == y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] == yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill == y[yi]
- yi += 1
-
- return out, out_index, xfill == yfill
-
-
-cpdef sparse_eq_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_eq_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_eq_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_eq_int64(int64_t xfill,
- int64_t yfill):
- return xfill == yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_ne_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] != yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill != y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] != y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] != yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill != y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill != yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_ne_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill != y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] != yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] != y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] != yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill != y[yi]
- yi += 1
-
- return out, out_index, xfill != yfill
-
-
-cpdef sparse_ne_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_ne_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_ne_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_ne_float64(float64_t xfill,
- float64_t yfill):
- return xfill != yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_ne_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] != yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill != y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] != y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] != yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill != y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill != yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_ne_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill != y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] != yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] != y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] != yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill != y[yi]
- yi += 1
-
- return out, out_index, xfill != yfill
-
-
-cpdef sparse_ne_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_ne_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_ne_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_ne_int64(int64_t xfill,
- int64_t yfill):
- return xfill != yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_lt_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] < yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill < y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] < y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] < yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill < y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill < yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_lt_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill < y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] < yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] < y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] < yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill < y[yi]
- yi += 1
-
- return out, out_index, xfill < yfill
-
-
-cpdef sparse_lt_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_lt_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_lt_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_lt_float64(float64_t xfill,
- float64_t yfill):
- return xfill < yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_lt_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] < yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill < y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] < y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] < yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill < y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill < yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_lt_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill < y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] < yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] < y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] < yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill < y[yi]
- yi += 1
-
- return out, out_index, xfill < yfill
-
-
-cpdef sparse_lt_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_lt_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_lt_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_lt_int64(int64_t xfill,
- int64_t yfill):
- return xfill < yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_gt_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] > yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill > y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] > y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] > yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill > y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill > yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_gt_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill > y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] > yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] > y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] > yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill > y[yi]
- yi += 1
-
- return out, out_index, xfill > yfill
-
-
-cpdef sparse_gt_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_gt_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_gt_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_gt_float64(float64_t xfill,
- float64_t yfill):
- return xfill > yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_gt_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] > yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill > y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] > y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] > yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill > y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill > yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_gt_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill > y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] > yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] > y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] > yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill > y[yi]
- yi += 1
-
- return out, out_index, xfill > yfill
-
-
-cpdef sparse_gt_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_gt_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_gt_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_gt_int64(int64_t xfill,
- int64_t yfill):
- return xfill > yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_le_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] <= yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill <= y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] <= y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] <= yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill <= y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill <= yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_le_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill <= y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] <= yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] <= y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] <= yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill <= y[yi]
- yi += 1
-
- return out, out_index, xfill <= yfill
-
-
-cpdef sparse_le_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_le_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_le_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_le_float64(float64_t xfill,
- float64_t yfill):
- return xfill <= yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_le_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] <= yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill <= y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] <= y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] <= yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill <= y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill <= yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_le_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill <= y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] <= yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] <= y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] <= yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill <= y[yi]
- yi += 1
-
- return out, out_index, xfill <= yfill
-
-
-cpdef sparse_le_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_le_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_le_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_le_int64(int64_t xfill,
- int64_t yfill):
- return xfill <= yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_ge_float64(ndarray x_,
- BlockIndex xindex,
- float64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- float64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] >= yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill >= y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] >= y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] >= yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill >= y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill >= yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_ge_float64(ndarray x_, IntIndex xindex,
- float64_t xfill,
- ndarray y_, IntIndex yindex,
- float64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[float64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill >= y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] >= yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] >= y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] >= yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill >= y[yi]
- yi += 1
-
- return out, out_index, xfill >= yfill
-
-
-cpdef sparse_ge_float64(ndarray[float64_t, ndim=1] x,
- SparseIndex xindex, float64_t xfill,
- ndarray[float64_t, ndim=1] y,
- SparseIndex yindex, float64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_ge_float64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_ge_float64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_ge_float64(float64_t xfill,
- float64_t yfill):
- return xfill >= yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_ge_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] >= yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill >= y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] >= y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] >= yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill >= y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill >= yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_ge_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill >= y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] >= yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] >= y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] >= yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill >= y[yi]
- yi += 1
-
- return out, out_index, xfill >= yfill
-
-
-cpdef sparse_ge_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_ge_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_ge_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_ge_int64(int64_t xfill,
- int64_t yfill):
- return xfill >= yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_and_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] & yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill & y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] & y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] & yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill & y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill & yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_and_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill & y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] & yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] & y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] & yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill & y[yi]
- yi += 1
-
- return out, out_index, xfill & yfill
-
-
-cpdef sparse_and_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_and_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_and_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_and_int64(int64_t xfill,
- int64_t yfill):
- return xfill & yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_and_uint8(ndarray x_,
- BlockIndex xindex,
- uint8_t xfill,
- ndarray y_,
- BlockIndex yindex,
- uint8_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[uint8_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] & yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill & y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] & y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] & yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill & y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill & yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_and_uint8(ndarray x_, IntIndex xindex,
- uint8_t xfill,
- ndarray y_, IntIndex yindex,
- uint8_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[uint8_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill & y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] & yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] & y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] & yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill & y[yi]
- yi += 1
-
- return out, out_index, xfill & yfill
-
-
-cpdef sparse_and_uint8(ndarray[uint8_t, ndim=1] x,
- SparseIndex xindex, uint8_t xfill,
- ndarray[uint8_t, ndim=1] y,
- SparseIndex yindex, uint8_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_and_uint8(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_and_uint8(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_and_uint8(uint8_t xfill,
- uint8_t yfill):
- return xfill & yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_or_int64(ndarray x_,
- BlockIndex xindex,
- int64_t xfill,
- ndarray y_,
- BlockIndex yindex,
- int64_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] | yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill | y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] | y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] | yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill | y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill | yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_or_int64(ndarray x_, IntIndex xindex,
- int64_t xfill,
- ndarray y_, IntIndex yindex,
- int64_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[int64_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill | y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] | yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] | y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] | yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill | y[yi]
- yi += 1
-
- return out, out_index, xfill | yfill
-
-
-cpdef sparse_or_int64(ndarray[int64_t, ndim=1] x,
- SparseIndex xindex, int64_t xfill,
- ndarray[int64_t, ndim=1] y,
- SparseIndex yindex, int64_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_or_int64(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_or_int64(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_or_int64(int64_t xfill,
- int64_t yfill):
- return xfill | yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple block_op_or_uint8(ndarray x_,
- BlockIndex xindex,
- uint8_t xfill,
- ndarray y_,
- BlockIndex yindex,
- uint8_t yfill):
- '''
- Binary operator on BlockIndex objects with fill values
- '''
-
- cdef:
- BlockIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xbp = 0, ybp = 0 # block positions
- int32_t xloc, yloc
- Py_ssize_t xblock = 0, yblock = 0 # block numbers
-
- ndarray[uint8_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # to suppress Cython warning
- x = x_
- y = y_
-
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- # Wow, what a hack job. Need to do something about this
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if yblock == yindex.nblocks:
- # use y fill value
- out[out_i] = x[xi] | yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- continue
-
- if xblock == xindex.nblocks:
- # use x fill value
- out[out_i] = xfill | y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
- continue
-
- yloc = yindex.locbuf[yblock] + ybp
- xloc = xindex.locbuf[xblock] + xbp
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] | y[yi]
- xi += 1
- yi += 1
-
- # advance both locations
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
-
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] | yfill
- xi += 1
-
- # advance x location
- xbp += 1
- if xbp == xindex.lenbuf[xblock]:
- xblock += 1
- xbp = 0
- else:
- # use x fill value
- out[out_i] = xfill | y[yi]
- yi += 1
-
- # advance y location
- ybp += 1
- if ybp == yindex.lenbuf[yblock]:
- yblock += 1
- ybp = 0
-
- return out, out_index, xfill | yfill
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline tuple int_op_or_uint8(ndarray x_, IntIndex xindex,
- uint8_t xfill,
- ndarray y_, IntIndex yindex,
- uint8_t yfill):
- cdef:
- IntIndex out_index
- Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices
- int32_t xloc, yloc
- ndarray[int32_t, ndim=1] xindices, yindices, out_indices
- ndarray[uint8_t, ndim=1] x, y
- ndarray[uint8_t, ndim=1] out
-
- # suppress Cython compiler warnings due to inlining
- x = x_
- y = y_
-
- # need to do this first to know size of result array
- out_index = xindex.make_union(yindex)
- out = np.empty(out_index.npoints, dtype=np.uint8)
-
- xindices = xindex.indices
- yindices = yindex.indices
- out_indices = out_index.indices
-
- # walk the two SparseVectors, adding matched locations...
- for out_i from 0 <= out_i < out_index.npoints:
- if xi == xindex.npoints:
- # use x fill value
- out[out_i] = xfill | y[yi]
- yi += 1
- continue
-
- if yi == yindex.npoints:
- # use y fill value
- out[out_i] = x[xi] | yfill
- xi += 1
- continue
-
- xloc = xindices[xi]
- yloc = yindices[yi]
-
- # each index in the out_index had to come from either x, y, or both
- if xloc == yloc:
- out[out_i] = x[xi] | y[yi]
- xi += 1
- yi += 1
- elif xloc < yloc:
- # use y fill value
- out[out_i] = x[xi] | yfill
- xi += 1
- else:
- # use x fill value
- out[out_i] = xfill | y[yi]
- yi += 1
-
- return out, out_index, xfill | yfill
-
-
-cpdef sparse_or_uint8(ndarray[uint8_t, ndim=1] x,
- SparseIndex xindex, uint8_t xfill,
- ndarray[uint8_t, ndim=1] y,
- SparseIndex yindex, uint8_t yfill):
-
- if isinstance(xindex, BlockIndex):
- return block_op_or_uint8(x, xindex.to_block_index(), xfill,
- y, yindex.to_block_index(), yfill)
- elif isinstance(xindex, IntIndex):
- return int_op_or_uint8(x, xindex.to_int_index(), xfill,
- y, yindex.to_int_index(), yfill)
- else:
- raise NotImplementedError
-
-
-cpdef sparse_fill_or_uint8(uint8_t xfill,
- uint8_t yfill):
- return xfill | yfill
diff --git a/pandas/src/util.pxd b/pandas/src/util.pxd
index fcb5583a0a6e7..be8d0d4aa6302 100644
--- a/pandas/src/util.pxd
+++ b/pandas/src/util.pxd
@@ -38,6 +38,20 @@ ctypedef fused numeric:
cnp.float32_t
cnp.float64_t
+cdef extern from "headers/stdint.h":
+ enum: UINT8_MAX
+ enum: UINT16_MAX
+ enum: UINT32_MAX
+ enum: UINT64_MAX
+ enum: INT8_MIN
+ enum: INT8_MAX
+ enum: INT16_MIN
+ enum: INT16_MAX
+ enum: INT32_MAX
+ enum: INT32_MIN
+ enum: INT64_MAX
+ enum: INT64_MIN
+
cdef inline object get_value_at(ndarray arr, object loc):
cdef:
Py_ssize_t i, sz
@@ -56,7 +70,12 @@ cdef inline object get_value_at(ndarray arr, object loc):
return get_value_1d(arr, i)
-cdef inline set_value_at(ndarray arr, object loc, object value):
+cdef inline set_value_at_unsafe(ndarray arr, object loc, object value):
+ """Sets a value into the array without checking the writeable flag.
+
+ This should be used when setting values in a loop, check the writeable
+ flag above the loop and then eschew the check on each iteration.
+ """
cdef:
Py_ssize_t i, sz
if is_float_object(loc):
@@ -73,6 +92,14 @@ cdef inline set_value_at(ndarray arr, object loc, object value):
assign_value_1d(arr, i, value)
+cdef inline set_value_at(ndarray arr, object loc, object value):
+ """Sets a value into the array after checking that the array is mutable.
+ """
+ if not cnp.PyArray_ISWRITEABLE(arr):
+ raise ValueError('assignment destination is read-only')
+
+ set_value_at_unsafe(arr, loc, value)
+
cdef inline int is_contiguous(ndarray arr):
return cnp.PyArray_CHKFLAGS(arr, cnp.NPY_C_CONTIGUOUS)
diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py
index 3bbfd621d2342..e7c32a4baa4ea 100644
--- a/pandas/tests/formats/test_format.py
+++ b/pandas/tests/formats/test_format.py
@@ -89,7 +89,7 @@ def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
- if re.match('^[\.\ ]+$', row):
+ if re.match(r'^[\.\ ]+$', row):
only_dot_row = True
return only_dot_row
@@ -834,7 +834,7 @@ def check_with_width(df, col_space):
# check that col_space affects HTML generation
# and be very brittle about it.
html = df.to_html(col_space=col_space)
- hdrs = [x for x in html.split("\n") if re.search("<th[>\s]", x)]
+ hdrs = [x for x in html.split(r"\n") if re.search(r"<th[>\s]", x)]
self.assertTrue(len(hdrs) > 0)
for h in hdrs:
self.assertTrue("min-width" in h)
@@ -1940,7 +1940,7 @@ def test_to_string(self):
float_format='%.5f'.__mod__)
lines = result.split('\n')
header = lines[0].strip().split()
- joined = '\n'.join([re.sub('\s+', ' ', x).strip() for x in lines[1:]])
+ joined = '\n'.join([re.sub(r'\s+', ' ', x).strip() for x in lines[1:]])
recons = read_table(StringIO(joined), names=header,
header=None, sep=' ')
tm.assert_series_equal(recons['B'], biggie['B'])
@@ -3782,7 +3782,7 @@ def chck_ncols(self, s):
res = repr(s)
lines = res.split('\n')
lines = [line for line in repr(s).split('\n')
- if not re.match('[^\.]*\.+', line)][:-1]
+ if not re.match(r'[^\.]*\.+', line)][:-1]
ncolsizes = len(set(len(line.strip()) for line in lines))
self.assertEqual(ncolsizes, 1)
@@ -3823,7 +3823,7 @@ def test_max_rows_eq_one(self):
def test_truncate_ndots(self):
def getndots(s):
- return len(re.match('[^\.]*(\.*)', s).groups()[0])
+ return len(re.match(r'[^\.]*(\.*)', s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 390d796ced006..f6081e14d4081 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -806,7 +806,7 @@ def test_sem(self):
def test_sort_invalid_kwargs(self):
df = DataFrame([1, 2, 3], columns=['a'])
- msg = "sort\(\) got an unexpected keyword argument 'foo'"
+ msg = r"sort\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, df.sort, foo=2)
# Neither of these should raise an error because they
@@ -1323,6 +1323,35 @@ def test_nsmallest_multiple_columns(self):
expected = df.sort_values(['a', 'c']).head(5)
tm.assert_frame_equal(result, expected)
+ def test_nsmallest_nlargest_duplicate_index(self):
+ # GH 13412
+ df = pd.DataFrame({'a': [1, 2, 3, 4],
+ 'b': [4, 3, 2, 1],
+ 'c': [0, 1, 2, 3]},
+ index=[0, 0, 1, 1])
+ result = df.nsmallest(4, 'a')
+ expected = df.sort_values('a').head(4)
+ tm.assert_frame_equal(result, expected)
+
+ result = df.nlargest(4, 'a')
+ expected = df.sort_values('a', ascending=False).head(4)
+ tm.assert_frame_equal(result, expected)
+
+ result = df.nsmallest(4, ['a', 'c'])
+ expected = df.sort_values(['a', 'c']).head(4)
+ tm.assert_frame_equal(result, expected)
+
+ result = df.nsmallest(4, ['c', 'a'])
+ expected = df.sort_values(['c', 'a']).head(4)
+ tm.assert_frame_equal(result, expected)
+
+ result = df.nlargest(4, ['a', 'c'])
+ expected = df.sort_values(['a', 'c'], ascending=False).head(4)
+ tm.assert_frame_equal(result, expected)
+
+ result = df.nlargest(4, ['c', 'a'])
+ expected = df.sort_values(['c', 'a'], ascending=False).head(4)
+ tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Isin
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index b7cd8a1c01224..c6b69dad3e6b5 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -57,6 +57,24 @@ def test_concat_multiple_tzs(self):
expected = DataFrame(dict(time=[ts2, ts3]))
assert_frame_equal(results, expected)
+ def test_concat_tuple_keys(self):
+ # GH 14438
+ df1 = pd.DataFrame(np.ones((2, 2)), columns=list('AB'))
+ df2 = pd.DataFrame(np.ones((3, 2)) * 2, columns=list('AB'))
+ results = pd.concat((df1, df2), keys=[('bee', 'bah'), ('bee', 'boo')])
+ expected = pd.DataFrame(
+ {'A': {('bee', 'bah', 0): 1.0,
+ ('bee', 'bah', 1): 1.0,
+ ('bee', 'boo', 0): 2.0,
+ ('bee', 'boo', 1): 2.0,
+ ('bee', 'boo', 2): 2.0},
+ 'B': {('bee', 'bah', 0): 1.0,
+ ('bee', 'bah', 1): 1.0,
+ ('bee', 'boo', 0): 2.0,
+ ('bee', 'boo', 1): 2.0,
+ ('bee', 'boo', 2): 2.0}})
+ assert_frame_equal(results, expected)
+
def test_append_series_dict(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
@@ -347,6 +365,65 @@ def test_concat_named_keys(self):
names=[None, None]))
assert_frame_equal(concatted_unnamed, expected_unnamed)
+ def test_concat_axis_parameter(self):
+ # GH 14369
+ df1 = pd.DataFrame({'A': [0.1, 0.2]}, index=range(2))
+ df2 = pd.DataFrame({'A': [0.3, 0.4]}, index=range(2))
+
+ # Index/row/0 DataFrame
+ expected_index = pd.DataFrame(
+ {'A': [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
+
+ concatted_index = pd.concat([df1, df2], axis='index')
+ assert_frame_equal(concatted_index, expected_index)
+
+ concatted_row = pd.concat([df1, df2], axis='rows')
+ assert_frame_equal(concatted_row, expected_index)
+
+ concatted_0 = pd.concat([df1, df2], axis=0)
+ assert_frame_equal(concatted_0, expected_index)
+
+ # Columns/1 DataFrame
+ expected_columns = pd.DataFrame(
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=['A', 'A'])
+
+ concatted_columns = pd.concat([df1, df2], axis='columns')
+ assert_frame_equal(concatted_columns, expected_columns)
+
+ concatted_1 = pd.concat([df1, df2], axis=1)
+ assert_frame_equal(concatted_1, expected_columns)
+
+ series1 = pd.Series([0.1, 0.2])
+ series2 = pd.Series([0.3, 0.4])
+
+ # Index/row/0 Series
+ expected_index_series = pd.Series(
+ [0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
+
+ concatted_index_series = pd.concat([series1, series2], axis='index')
+ assert_series_equal(concatted_index_series, expected_index_series)
+
+ concatted_row_series = pd.concat([series1, series2], axis='rows')
+ assert_series_equal(concatted_row_series, expected_index_series)
+
+ concatted_0_series = pd.concat([series1, series2], axis=0)
+ assert_series_equal(concatted_0_series, expected_index_series)
+
+ # Columns/1 Series
+ expected_columns_series = pd.DataFrame(
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1])
+
+ concatted_columns_series = pd.concat(
+ [series1, series2], axis='columns')
+ assert_frame_equal(concatted_columns_series, expected_columns_series)
+
+ concatted_1_series = pd.concat([series1, series2], axis=1)
+ assert_frame_equal(concatted_1_series, expected_columns_series)
+
+ # Testing ValueError
+ with assertRaisesRegexp(ValueError, 'No axis named'):
+ pd.concat([series1, series2], axis='something')
+
class TestDataFrameCombineFirst(tm.TestCase, TestData):
@@ -648,3 +725,13 @@ def test_combine_first_period(self):
exp = pd.DataFrame({'P': exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
self.assertEqual(res['P'].dtype, 'object')
+
+ def test_combine_first_int(self):
+ # GH14687 - integer series that do no align exactly
+
+ df1 = pd.DataFrame({'a': [0, 1, 3, 5]}, dtype='int64')
+ df2 = pd.DataFrame({'a': [1, 4]}, dtype='int64')
+
+ res = df1.combine_first(df2)
+ tm.assert_frame_equal(res, df1)
+ self.assertEqual(res['a'].dtype, 'int64')
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index d21db5ba52a45..489c85a7234b8 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -259,6 +259,14 @@ def test_constructor_dict(self):
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assert_index_equal(frame.index, Index([], dtype=np.int64))
+ # GH 14381
+ # Dict with None value
+ frame_none = DataFrame(dict(a=None), index=[0])
+ frame_none_list = DataFrame(dict(a=[None]), index=[0])
+ tm.assert_equal(frame_none.get_value(0, 'a'), None)
+ tm.assert_equal(frame_none_list.get_value(0, 'a'), None)
+ tm.assert_frame_equal(frame_none, frame_none_list)
+
# GH10856
# dict with scalar values should raise error, even if columns passed
with tm.assertRaises(ValueError):
@@ -296,7 +304,7 @@ def test_constructor_error_msgs(self):
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
- msg = "Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
+ msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
@@ -308,11 +316,11 @@ def test_constructor_error_msgs(self):
# wrong size axis labels
with tm.assertRaisesRegexp(ValueError, "Shape of passed values is "
- "\(3, 2\), indices imply \(3, 1\)"):
+ r"\(3, 2\), indices imply \(3, 1\)"):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
with tm.assertRaisesRegexp(ValueError, "Shape of passed values is "
- "\(3, 2\), indices imply \(2, 2\)"):
+ r"\(3, 2\), indices imply \(2, 2\)"):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
with tm.assertRaisesRegexp(ValueError, 'If using all scalar values, '
diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py
index 77974718714f8..220d29f624942 100644
--- a/pandas/tests/frame/test_nonunique_indexes.py
+++ b/pandas/tests/frame/test_nonunique_indexes.py
@@ -468,3 +468,13 @@ def test_set_value_by_index(self):
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 1], expected)
+
+ def test_insert_with_columns_dups(self):
+ # GH 14291
+ df = pd.DataFrame()
+ df.insert(0, 'A', ['g', 'h', 'i'], allow_duplicates=True)
+ df.insert(0, 'A', ['d', 'e', 'f'], allow_duplicates=True)
+ df.insert(0, 'A', ['a', 'b', 'c'], allow_duplicates=True)
+ exp = pd.DataFrame([['a', 'd', 'g'], ['b', 'e', 'h'],
+ ['c', 'f', 'i']], columns=['A', 'A', 'A'])
+ assert_frame_equal(df, exp)
diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py
index 52e8697abe850..22414a6ba8a53 100644
--- a/pandas/tests/frame/test_quantile.py
+++ b/pandas/tests/frame/test_quantile.py
@@ -262,6 +262,11 @@ def test_quantile_datetime(self):
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
+ # empty when numeric_only=True
+ # FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
+ # result = df[['a', 'c']].quantile(.5)
+ # result = df[['a', 'c']].quantile([.5])
+
def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
@@ -340,3 +345,95 @@ def test_quantile_box(self):
pd.Timedelta('2 days')]],
index=[0.5], columns=list('AaBbCc'))
tm.assert_frame_equal(res, exp)
+
+ def test_quantile_nan(self):
+
+ # GH 14357 - float block where some cols have missing values
+ df = DataFrame({'a': np.arange(1, 6.0), 'b': np.arange(1, 6.0)})
+ df.iloc[-1, 1] = np.nan
+
+ res = df.quantile(0.5)
+ exp = Series([3.0, 2.5], index=['a', 'b'], name=0.5)
+ tm.assert_series_equal(res, exp)
+
+ res = df.quantile([0.5, 0.75])
+ exp = DataFrame({'a': [3.0, 4.0], 'b': [2.5, 3.25]}, index=[0.5, 0.75])
+ tm.assert_frame_equal(res, exp)
+
+ res = df.quantile(0.5, axis=1)
+ exp = Series(np.arange(1.0, 6.0), name=0.5)
+ tm.assert_series_equal(res, exp)
+
+ res = df.quantile([0.5, 0.75], axis=1)
+ exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75])
+ tm.assert_frame_equal(res, exp)
+
+ # full-nan column
+ df['b'] = np.nan
+
+ res = df.quantile(0.5)
+ exp = Series([3.0, np.nan], index=['a', 'b'], name=0.5)
+ tm.assert_series_equal(res, exp)
+
+ res = df.quantile([0.5, 0.75])
+ exp = DataFrame({'a': [3.0, 4.0], 'b': [np.nan, np.nan]},
+ index=[0.5, 0.75])
+ tm.assert_frame_equal(res, exp)
+
+ def test_quantile_nat(self):
+
+ # full NaT column
+ df = DataFrame({'a': [pd.NaT, pd.NaT, pd.NaT]})
+
+ res = df.quantile(0.5, numeric_only=False)
+ exp = Series([pd.NaT], index=['a'], name=0.5)
+ tm.assert_series_equal(res, exp)
+
+ res = df.quantile([0.5], numeric_only=False)
+ exp = DataFrame({'a': [pd.NaT]}, index=[0.5])
+ tm.assert_frame_equal(res, exp)
+
+ # mixed non-null / full null column
+ df = DataFrame({'a': [pd.Timestamp('2012-01-01'),
+ pd.Timestamp('2012-01-02'),
+ pd.Timestamp('2012-01-03')],
+ 'b': [pd.NaT, pd.NaT, pd.NaT]})
+
+ res = df.quantile(0.5, numeric_only=False)
+ exp = Series([pd.Timestamp('2012-01-02'), pd.NaT], index=['a', 'b'],
+ name=0.5)
+ tm.assert_series_equal(res, exp)
+
+ res = df.quantile([0.5], numeric_only=False)
+ exp = DataFrame([[pd.Timestamp('2012-01-02'), pd.NaT]], index=[0.5],
+ columns=['a', 'b'])
+ tm.assert_frame_equal(res, exp)
+
+ def test_quantile_empty(self):
+
+ # floats
+ df = DataFrame(columns=['a', 'b'], dtype='float64')
+
+ res = df.quantile(0.5)
+ exp = Series([np.nan, np.nan], index=['a', 'b'], name=0.5)
+ tm.assert_series_equal(res, exp)
+
+ res = df.quantile([0.5])
+ exp = DataFrame([[np.nan, np.nan]], columns=['a', 'b'], index=[0.5])
+ tm.assert_frame_equal(res, exp)
+
+ # FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
+ # res = df.quantile(0.5, axis=1)
+ # res = df.quantile([0.5], axis=1)
+
+ # ints
+ df = DataFrame(columns=['a', 'b'], dtype='int64')
+
+ # FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
+ # res = df.quantile(0.5)
+
+ # datetimes
+ df = DataFrame(columns=['a', 'b'], dtype='datetime64')
+
+ # FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0)
+ # res = df.quantile(0.5, numeric_only=False)
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 85159de64d83e..36ae5dac733a5 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -147,6 +147,14 @@ def test_query_non_str(self):
with tm.assertRaisesRegexp(ValueError, msg):
df.query(111)
+ def test_query_empty_string(self):
+ # GH 13139
+ df = pd.DataFrame({'A': [1, 2, 3]})
+
+ msg = "expr cannot be an empty string"
+ with tm.assertRaisesRegexp(ValueError, msg):
+ df.query('')
+
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(randn(10, 2), columns=list('ab'))
@@ -1116,8 +1124,8 @@ def test_invalid_type_for_operator_raises(self):
ops = '+', '-', '*', '/'
for op in ops:
with tm.assertRaisesRegexp(TypeError,
- "unsupported operand type\(s\) for "
- ".+: '.+' and '.+'"):
+ r"unsupported operand type\(s\) for "
+ r".+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=self.engine,
parser=self.parser)
diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py
index bed0e0623ace0..3bc388da5bec8 100644
--- a/pandas/tests/frame/test_replace.py
+++ b/pandas/tests/frame/test_replace.py
@@ -550,7 +550,7 @@ def test_regex_replace_numeric_to_object_conversion(self):
self.assertEqual(res.a.dtype, np.object_)
def test_replace_regex_metachar(self):
- metachars = '[]', '()', '\d', '\w', '\s'
+ metachars = '[]', '()', r'\d', r'\w', r'\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
@@ -889,7 +889,7 @@ def test_replace_doesnt_replace_without_regex(self):
2 2 0 0 0
3 3 0 bt 0"""
df = pd.read_csv(StringIO(raw), sep=r'\s+')
- res = df.replace({'\D': 1})
+ res = df.replace({r'\D': 1})
assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 5e5e9abda1200..12cd62f8b4cc0 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -405,3 +405,11 @@ def memory_usage(f):
# high upper bound
self.assertTrue(memory_usage(unstacked) - memory_usage(df) < 2000)
+
+ def test_info_categorical(self):
+ # GH14298
+ idx = pd.CategoricalIndex(['a', 'b'])
+ df = pd.DataFrame(np.zeros((2, 2)), index=idx, columns=idx)
+
+ buf = StringIO()
+ df.info(buf=buf)
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 8b1b1130dc2fc..6b0dd38cdb82c 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -282,6 +282,46 @@ def test_unstack_fill_frame_categorical(self):
index=list('xyz'))
assert_frame_equal(result, expected)
+ def test_unstack_preserve_dtypes(self):
+ # Checks fix for #11847
+ df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
+ index=['a', 'b', 'c'],
+ some_categories=pd.Series(['a', 'b', 'c']
+ ).astype('category'),
+ A=np.random.rand(3),
+ B=1,
+ C='foo',
+ D=pd.Timestamp('20010102'),
+ E=pd.Series([1.0, 50.0, 100.0]
+ ).astype('float32'),
+ F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
+ G=False,
+ H=pd.Series([1, 200, 923442], dtype='int8')))
+
+ def unstack_and_compare(df, column_name):
+ unstacked1 = df.unstack([column_name])
+ unstacked2 = df.unstack(column_name)
+ assert_frame_equal(unstacked1, unstacked2)
+
+ df1 = df.set_index(['state', 'index'])
+ unstack_and_compare(df1, 'index')
+
+ df1 = df.set_index(['state', 'some_categories'])
+ unstack_and_compare(df1, 'some_categories')
+
+ df1 = df.set_index(['F', 'C'])
+ unstack_and_compare(df1, 'F')
+
+ df1 = df.set_index(['G', 'B', 'state'])
+ unstack_and_compare(df1, 'B')
+
+ df1 = df.set_index(['E', 'A'])
+ unstack_and_compare(df1, 'E')
+
+ df1 = df.set_index(['state', 'index'])
+ s = df1['A']
+ unstack_and_compare(s, 'index')
+
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index 6d09378ca864e..d888a5d334099 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -587,7 +587,7 @@ def _make_frame(names=None):
df = _make_frame(True)
df.to_csv(path, tupleize_cols=False)
- for i in [5, 6, 7]:
+ for i in [6, 7]:
msg = 'len of {i}, but only 5 lines in file'.format(i=i)
with assertRaisesRegexp(CParserError, msg):
read_csv(path, tupleize_cols=False,
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 773f20532e4ff..1b373baf9b3c1 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -431,7 +431,7 @@ def test_take_invalid_kwargs(self):
idx = self.create_index()
indices = [1, 2]
- msg = "take\(\) got an unexpected keyword argument 'foo'"
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 21471b1883209..afcd6889bf4d9 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -6,8 +6,8 @@
from pandas.indexes.api import Index, MultiIndex
from .common import Base
-from pandas.compat import (is_platform_windows, range, lrange, lzip, u,
- zip, PY3)
+from pandas.compat import (range, lrange, lzip, u,
+ zip, PY3, PY36)
import operator
import os
@@ -913,12 +913,13 @@ def test_summary(self):
def test_format(self):
self._check_method_works(Index.format)
- index = Index([datetime.now()])
-
+ # GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
- # formating does not we are skipping
- if not is_platform_windows():
+ # formating does not we are skipping)
+ now = datetime.now()
+ if not str(now).endswith("000"):
+ index = Index([now])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
@@ -1763,7 +1764,12 @@ def create_index(self):
def test_order(self):
idx = self.create_index()
# 9816 deprecated
- if PY3:
+ if PY36:
+ with tm.assertRaisesRegexp(TypeError, "'>' not supported "
+ "between instances of 'str' and 'int'"):
+ with tm.assert_produces_warning(FutureWarning):
+ idx.order()
+ elif PY3:
with tm.assertRaisesRegexp(TypeError, "unorderable types"):
with tm.assert_produces_warning(FutureWarning):
idx.order()
@@ -1773,7 +1779,11 @@ def test_order(self):
def test_argsort(self):
idx = self.create_index()
- if PY3:
+ if PY36:
+ with tm.assertRaisesRegexp(TypeError, "'>' not supported "
+ "between instances of 'str' and 'int'"):
+ result = idx.argsort()
+ elif PY3:
with tm.assertRaisesRegexp(TypeError, "unorderable types"):
result = idx.argsort()
else:
@@ -1783,7 +1793,11 @@ def test_argsort(self):
def test_numpy_argsort(self):
idx = self.create_index()
- if PY3:
+ if PY36:
+ with tm.assertRaisesRegexp(TypeError, "'>' not supported "
+ "between instances of 'str' and 'int'"):
+ result = np.argsort(idx)
+ elif PY3:
with tm.assertRaisesRegexp(TypeError, "unorderable types"):
result = np.argsort(idx)
else:
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 9f8405bcc2e1e..819b88bf4c5d3 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -278,6 +278,11 @@ def test_append(self):
# invalid objects
self.assertRaises(TypeError, lambda: ci.append(Index(['a', 'd'])))
+ # GH14298 - if base object is not categorical -> coerce to object
+ result = Index(['c', 'a']).append(ci)
+ expected = Index(list('caaabbca'))
+ tm.assert_index_equal(result, expected, exact=True)
+
def test_insert(self):
ci = self.create_index()
@@ -885,7 +890,7 @@ def test_take_invalid_kwargs(self):
idx = pd.CategoricalIndex([1, 2, 3], name='foo')
indices = [1, 0, -1]
- msg = "take\(\) got an unexpected keyword argument 'foo'"
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
diff --git a/pandas/tests/indexes/test_datetimelike.py b/pandas/tests/indexes/test_datetimelike.py
index 7502a4ce26b04..b04e840ffc849 100644
--- a/pandas/tests/indexes/test_datetimelike.py
+++ b/pandas/tests/indexes/test_datetimelike.py
@@ -732,6 +732,31 @@ def test_fillna_datetime64(self):
dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
+ def test_difference_of_union(self):
+ # GH14323: Test taking the union of differences of an Index.
+ # Difference of DatetimeIndex does not preserve frequency,
+ # so a differencing operation should not retain the freq field of the
+ # original index.
+ i = pd.date_range("20160920", "20160925", freq="D")
+
+ a = pd.date_range("20160921", "20160924", freq="D")
+ expected = pd.DatetimeIndex(["20160920", "20160925"], freq=None)
+ a_diff = i.difference(a)
+ tm.assert_index_equal(a_diff, expected)
+ tm.assert_attr_equal('freq', a_diff, expected)
+
+ b = pd.date_range("20160922", "20160925", freq="D")
+ b_diff = i.difference(b)
+ expected = pd.DatetimeIndex(["20160920", "20160921"], freq=None)
+ tm.assert_index_equal(b_diff, expected)
+ tm.assert_attr_equal('freq', b_diff, expected)
+
+ union_of_diff = a_diff.union(b_diff)
+ expected = pd.DatetimeIndex(["20160920", "20160921", "20160925"],
+ freq=None)
+ tm.assert_index_equal(union_of_diff, expected)
+ tm.assert_attr_equal('freq', union_of_diff, expected)
+
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
@@ -938,6 +963,30 @@ def test_no_millisecond_field(self):
with self.assertRaises(AttributeError):
DatetimeIndex([]).millisecond
+ def test_difference_of_union(self):
+ # GH14323: Test taking the union of differences of an Index.
+ # Difference of Period MUST preserve frequency, but the ability
+ # to union results must be preserved
+ i = pd.period_range("20160920", "20160925", freq="D")
+
+ a = pd.period_range("20160921", "20160924", freq="D")
+ expected = pd.PeriodIndex(["20160920", "20160925"], freq='D')
+ a_diff = i.difference(a)
+ tm.assert_index_equal(a_diff, expected)
+ tm.assert_attr_equal('freq', a_diff, expected)
+
+ b = pd.period_range("20160922", "20160925", freq="D")
+ b_diff = i.difference(b)
+ expected = pd.PeriodIndex(["20160920", "20160921"], freq='D')
+ tm.assert_index_equal(b_diff, expected)
+ tm.assert_attr_equal('freq', b_diff, expected)
+
+ union_of_diff = a_diff.union(b_diff)
+ expected = pd.PeriodIndex(["20160920", "20160921", "20160925"],
+ freq='D')
+ tm.assert_index_equal(union_of_diff, expected)
+ tm.assert_attr_equal('freq', union_of_diff, expected)
+
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
@@ -1149,3 +1198,28 @@ def test_fillna_timedelta(self):
exp = pd.Index(
[pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
+
+ def test_difference_of_union(self):
+ # GH14323: Test taking the union of differences of an Index.
+ # Difference of TimedeltaIndex does not preserve frequency,
+ # so a differencing operation should not retain the freq field of the
+ # original index.
+ i = pd.timedelta_range("0 days", "5 days", freq="D")
+
+ a = pd.timedelta_range("1 days", "4 days", freq="D")
+ expected = pd.TimedeltaIndex(["0 days", "5 days"], freq=None)
+ a_diff = i.difference(a)
+ tm.assert_index_equal(a_diff, expected)
+ tm.assert_attr_equal('freq', a_diff, expected)
+
+ b = pd.timedelta_range("2 days", "5 days", freq="D")
+ b_diff = i.difference(b)
+ expected = pd.TimedeltaIndex(["0 days", "1 days"], freq=None)
+ tm.assert_index_equal(b_diff, expected)
+ tm.assert_attr_equal('freq', b_diff, expected)
+
+ union_of_difference = a_diff.union(b_diff)
+ expected = pd.TimedeltaIndex(["0 days", "1 days", "5 days"],
+ freq=None)
+ tm.assert_index_equal(union_of_difference, expected)
+ tm.assert_attr_equal('freq', union_of_difference, expected)
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index fdc5a2eaec812..61a4ea53f06fb 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -1870,7 +1870,7 @@ def take_invalid_kwargs(self):
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
- msg = "take\(\) got an unexpected keyword argument 'foo'"
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index b0b8864521666..38e715fce2720 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -29,12 +29,7 @@ def setUp(self):
def create_index(self):
return RangeIndex(5)
- def test_binops(self):
- ops = [operator.add, operator.sub, operator.mul, operator.floordiv,
- operator.truediv, pow]
- scalars = [-1, 1, 2]
- idxs = [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2),
- RangeIndex(-10, 10, 2), RangeIndex(5, -5, -1)]
+ def check_binop(self, ops, scalars, idxs):
for op in ops:
for a, b in combinations(idxs, 2):
result = op(a, b)
@@ -46,6 +41,23 @@ def test_binops(self):
expected = op(Int64Index(idx), scalar)
tm.assert_index_equal(result, expected)
+ def test_binops(self):
+ ops = [operator.add, operator.sub, operator.mul, operator.floordiv,
+ operator.truediv]
+ scalars = [-1, 1, 2]
+ idxs = [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2),
+ RangeIndex(-10, 10, 2), RangeIndex(5, -5, -1)]
+ self.check_binop(ops, scalars, idxs)
+
+ def test_binops_pow(self):
+ # later versions of numpy don't allow powers of negative integers
+ # so test separately
+ # https://github.com/numpy/numpy/pull/8127
+ ops = [pow]
+ scalars = [1, 2]
+ idxs = [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)]
+ self.check_binop(ops, scalars, idxs)
+
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
@@ -587,6 +599,35 @@ def test_intersection(self):
other.values)))
self.assert_index_equal(result, expected)
+ index = RangeIndex(5)
+
+ # intersect of non-overlapping indices
+ other = RangeIndex(5, 10, 1)
+ result = index.intersection(other)
+ expected = RangeIndex(0, 0, 1)
+ self.assert_index_equal(result, expected)
+
+ other = RangeIndex(-1, -5, -1)
+ result = index.intersection(other)
+ expected = RangeIndex(0, 0, 1)
+ self.assert_index_equal(result, expected)
+
+ # intersection of empty indices
+ other = RangeIndex(0, 0, 1)
+ result = index.intersection(other)
+ expected = RangeIndex(0, 0, 1)
+ self.assert_index_equal(result, expected)
+
+ result = other.intersection(index)
+ self.assert_index_equal(result, expected)
+
+ # intersection of non-overlapping values based on start value and gcd
+ index = RangeIndex(1, 10, 2)
+ other = RangeIndex(0, 10, 4)
+ result = index.intersection(other)
+ expected = RangeIndex(0, 0, 1)
+ self.assert_index_equal(result, expected)
+
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index fa406a27bef69..9ca1fd2a76817 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -1338,7 +1338,7 @@ def test_at_to_fail(self):
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
- self.assertRaisesRegexp(KeyError, "\['y'\] not in index",
+ self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
@@ -2232,7 +2232,7 @@ def f():
with tm.assertRaisesRegexp(
KeyError,
'MultiIndex Slicing requires the index to be fully '
- 'lexsorted tuple len \(2\), lexsort depth \(0\)'):
+ r'lexsorted tuple len \(2\), lexsort depth \(0\)'):
df.loc[(slice(None), df.loc[:, ('a', 'bar')] > 5), :]
def test_multiindex_slicers_non_unique(self):
@@ -3613,6 +3613,27 @@ def test_iloc_non_unique_indexing(self):
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
+ def test_string_slice(self):
+ # GH 14424
+ # string indexing against datetimelike with object
+ # dtype should properly raises KeyError
+ df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
+ dtype=object))
+ self.assertTrue(df.index.is_all_dates)
+ with tm.assertRaises(KeyError):
+ df['2011']
+
+ with tm.assertRaises(KeyError):
+ df.loc['2011', 0]
+
+ df = pd.DataFrame()
+ self.assertFalse(df.index.is_all_dates)
+ with tm.assertRaises(KeyError):
+ df['2011']
+
+ with tm.assertRaises(KeyError):
+ df.loc['2011', 0]
+
def test_mi_access(self):
# GH 4145
@@ -3625,7 +3646,7 @@ def test_mi_access(self):
5 f B 6 A2 6
"""
- df = pd.read_csv(StringIO(data), sep='\s+', index_col=0)
+ df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 0f7bc02e24915..f07aadba175f2 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -778,6 +778,41 @@ def test_mixed_freq_irreg_period(self):
irreg.plot()
ps.plot()
+ def test_mixed_freq_shared_ax(self):
+
+ # GH13341, using sharex=True
+ idx1 = date_range('2015-01-01', periods=3, freq='M')
+ idx2 = idx1[:1].union(idx1[2:])
+ s1 = Series(range(len(idx1)), idx1)
+ s2 = Series(range(len(idx2)), idx2)
+
+ fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
+ s1.plot(ax=ax1)
+ s2.plot(ax=ax2)
+
+ self.assertEqual(ax1.freq, 'M')
+ self.assertEqual(ax2.freq, 'M')
+ self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
+ ax2.lines[0].get_xydata()[0, 0])
+
+ # using twinx
+ fig, ax1 = self.plt.subplots()
+ ax2 = ax1.twinx()
+ s1.plot(ax=ax1)
+ s2.plot(ax=ax2)
+
+ self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
+ ax2.lines[0].get_xydata()[0, 0])
+
+ # TODO (GH14330, GH14322)
+ # plotting the irregular first does not yet work
+ # fig, ax1 = plt.subplots()
+ # ax2 = ax1.twinx()
+ # s2.plot(ax=ax1)
+ # s1.plot(ax=ax2)
+ # self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
+ # ax2.lines[0].get_xydata()[0, 0])
+
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index e752197c6ad77..6878ca0e1bc06 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -216,15 +216,22 @@ def test_bar_log(self):
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
+ if self.mpl_ge_2_0_0:
+ expected = np.hstack((1.0e-05, expected))
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar')
+ ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001
ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001
- self.assertEqual(ax.get_ylim(), (0.001, ymax))
+ res = ax.get_ylim()
+ self.assertAlmostEqual(res[0], ymin)
+ self.assertAlmostEqual(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh')
- self.assertEqual(ax.get_xlim(), (0.001, ymax))
+ res = ax.get_xlim()
+ self.assertAlmostEqual(res[0], ymin)
+ self.assertAlmostEqual(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@slow
@@ -569,7 +576,11 @@ def test_kde_missing_vals(self):
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
- _check_plot_works(s.plot.kde)
+ axes = _check_plot_works(s.plot.kde)
+ # check if the values have any missing values
+ # GH14821
+ self.assertTrue(any(~np.isnan(axes.lines[0].get_xdata())),
+ msg='Missing Values not dropped')
@slow
def test_hist_kwargs(self):
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 24e3a0ff5f325..26c2220c4811a 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1515,6 +1515,15 @@ def test_nsmallest_nlargest(self):
with tm.assertRaisesRegexp(ValueError, msg):
s.nlargest(keep='invalid')
+ # GH 13412
+ s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
+ result = s.nlargest(3)
+ expected = s.sort_values(ascending=False).head(3)
+ assert_series_equal(result, expected)
+ result = s.nsmallest(3)
+ expected = s.sort_values().head(3)
+ assert_series_equal(result, expected)
+
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
@@ -1618,7 +1627,7 @@ def test_reshape_bad_kwarg(self):
tm.assertRaisesRegexp(TypeError, msg, a.reshape, (2, 2), foo=2)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- msg = "reshape\(\) got an unexpected keyword argument 'foo'"
+ msg = r"reshape\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, a.reshape, a.shape, foo=2)
def test_numpy_reshape(self):
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 7c16fd060b181..c44a7a898bb8d 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -1947,6 +1947,40 @@ def test_multilevel_preserve_name(self):
self.assertEqual(result.name, s.name)
self.assertEqual(result2.name, s.name)
+ def test_setitem_scalar_into_readonly_backing_data(self):
+ # GH14359: test that you cannot mutate a read only buffer
+
+ array = np.zeros(5)
+ array.flags.writeable = False # make the array immutable
+ series = Series(array)
+
+ for n in range(len(series)):
+ with self.assertRaises(ValueError):
+ series[n] = 1
+
+ self.assertEqual(
+ array[n],
+ 0,
+ msg='even though the ValueError was raised, the underlying'
+ ' array was still mutated!',
+ )
+
+ def test_setitem_slice_into_readonly_backing_data(self):
+ # GH14359: test that you cannot mutate a read only buffer
+
+ array = np.zeros(5)
+ array.flags.writeable = False # make the array immutable
+ series = Series(array)
+
+ with self.assertRaises(ValueError):
+ series[1:3] = 1
+
+ self.assertTrue(
+ not array.any(),
+ msg='even though the ValueError was raised, the underlying'
+ ' array was still mutated!',
+ )
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 4e6c58df54dfd..27bc04ac043be 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -1,7 +1,8 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-from datetime import timedelta
+import pytz
+from datetime import timedelta, datetime
from numpy import nan
import numpy as np
@@ -10,7 +11,6 @@
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
-
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
@@ -250,6 +250,24 @@ def test_datetime64_tz_fillna(self):
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
+ def test_datetime64tz_fillna_round_issue(self):
+ # GH 14872
+
+ data = pd.Series([pd.NaT, pd.NaT,
+ datetime(2016, 12, 12, 22, 24, 6, 100001,
+ tzinfo=pytz.utc)])
+
+ filled = data.fillna(method='bfill')
+
+ expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
+ 100001, tzinfo=pytz.utc),
+ datetime(2016, 12, 12, 22, 24, 6,
+ 100001, tzinfo=pytz.utc),
+ datetime(2016, 12, 12, 22, 24, 6,
+ 100001, tzinfo=pytz.utc)])
+
+ assert_series_equal(filled, expected)
+
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py
index 7d2517987e526..76db6c90a685f 100644
--- a/pandas/tests/series/test_quantile.py
+++ b/pandas/tests/series/test_quantile.py
@@ -184,3 +184,35 @@ def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
+
+ def test_quantile_empty(self):
+
+ # floats
+ s = Series([], dtype='float64')
+
+ res = s.quantile(0.5)
+ self.assertTrue(np.isnan(res))
+
+ res = s.quantile([0.5])
+ exp = Series([np.nan], index=[0.5])
+ tm.assert_series_equal(res, exp)
+
+ # int
+ s = Series([], dtype='int64')
+
+ res = s.quantile(0.5)
+ self.assertTrue(np.isnan(res))
+
+ res = s.quantile([0.5])
+ exp = Series([np.nan], index=[0.5])
+ tm.assert_series_equal(res, exp)
+
+ # datetime
+ s = Series([], dtype='datetime64[ns]')
+
+ res = s.quantile(0.5)
+ self.assertTrue(res is pd.NaT)
+
+ res = s.quantile([0.5])
+ exp = Series([pd.NaT], index=[0.5])
+ tm.assert_series_equal(res, exp)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index eaa316bfd8157..da8cf120b8ed4 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -900,7 +900,7 @@ def test_duplicated_drop_duplicates_index(self):
tm.assert_index_equal(result, idx[~expected])
with tm.assertRaisesRegexp(
- TypeError, "drop_duplicates\(\) got an unexpected "
+ TypeError, r"drop_duplicates\(\) got an unexpected "
"keyword argument"):
idx.drop_duplicates(inplace=True)
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index cdcd8b1bcba60..84df82db69f77 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -1740,8 +1740,8 @@ def test_numpy_squeeze(self):
np.squeeze, s, axis=0)
def test_transpose(self):
- msg = ("transpose\(\) got multiple values for "
- "keyword argument 'axes'")
+ msg = (r"transpose\(\) got multiple values for "
+ r"keyword argument 'axes'")
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
# calls implementation in pandas/core/base.py
@@ -1831,7 +1831,7 @@ def test_take_invalid_kwargs(self):
p4d = tm.makePanel4D()
for obj in (s, df, p, p4d):
- msg = "take\(\) got an unexpected keyword argument 'foo'"
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, obj.take,
indices, foo=2)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f3791ee1d5c91..6d23d4f152d3e 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -442,6 +442,36 @@ def test_grouper_creation_bug(self):
result = g.sum()
assert_frame_equal(result, expected)
+ # GH14334
+ # pd.Grouper(key=...) may be passed in a list
+ df = DataFrame({'A': [0, 0, 0, 1, 1, 1],
+ 'B': [1, 1, 2, 2, 3, 3],
+ 'C': [1, 2, 3, 4, 5, 6]})
+ # Group by single column
+ expected = df.groupby('A').sum()
+ g = df.groupby([pd.Grouper(key='A')])
+ result = g.sum()
+ assert_frame_equal(result, expected)
+
+ # Group by two columns
+ # using a combination of strings and Grouper objects
+ expected = df.groupby(['A', 'B']).sum()
+
+ # Group with two Grouper objects
+ g = df.groupby([pd.Grouper(key='A'), pd.Grouper(key='B')])
+ result = g.sum()
+ assert_frame_equal(result, expected)
+
+ # Group with a string and a Grouper object
+ g = df.groupby(['A', pd.Grouper(key='B')])
+ result = g.sum()
+ assert_frame_equal(result, expected)
+
+ # Group with a Grouper object and a string
+ g = df.groupby([pd.Grouper(key='A'), 'B'])
+ result = g.sum()
+ assert_frame_equal(result, expected)
+
# GH8866
s = Series(np.arange(8, dtype='int64'),
index=pd.MultiIndex.from_product(
@@ -1336,6 +1366,18 @@ def nsum(x):
for result in results:
assert_series_equal(result, expected, check_names=False)
+ def test_transform_coercion(self):
+
+ # 14457
+ # when we are transforming be sure to not coerce
+ # via assignment
+ df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))
+ g = df.groupby('A')
+
+ expected = g.transform(np.mean)
+ result = g.transform(lambda x: np.mean(x))
+ assert_frame_equal(result, expected)
+
def test_with_na(self):
index = Index(np.arange(10))
@@ -4694,6 +4736,25 @@ def test_groupby_multiindex_not_lexsorted(self):
result = not_lexsorted_df.groupby('a').mean()
tm.assert_frame_equal(expected, result)
+ # a transforming function should work regardless of sort
+ # GH 14776
+ df = DataFrame({'x': ['a', 'a', 'b', 'a'],
+ 'y': [1, 1, 2, 2],
+ 'z': [1, 2, 3, 4]}).set_index(['x', 'y'])
+ self.assertFalse(df.index.is_lexsorted())
+
+ for level in [0, 1, [0, 1]]:
+ for sort in [False, True]:
+ result = df.groupby(level=level, sort=sort).apply(
+ DataFrame.drop_duplicates)
+ expected = df
+ tm.assert_frame_equal(expected, result)
+
+ result = df.sort_index().groupby(level=level, sort=sort).apply(
+ DataFrame.drop_duplicates)
+ expected = df.sort_index()
+ tm.assert_frame_equal(expected, result)
+
def test_groupby_levels_and_columns(self):
# GH9344, GH9049
idx_names = ['x', 'y']
@@ -6712,6 +6773,13 @@ def test_nunique_with_object(self):
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
+ def test_nunique_with_empty_series(self):
+ # GH 12553
+ data = pd.Series(name='name')
+ result = data.groupby(level=0).nunique()
+ expected = pd.Series(name='name', dtype='int64')
+ tm.assert_series_equal(result, expected)
+
def test_transform_with_non_scalar_group(self):
# GH 10165
cols = pd.MultiIndex.from_tuples([
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 6a97f195abba7..db1c8da4cae73 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -82,7 +82,7 @@ def create_block(typestr, placement, item_shape=None, num_offset=0):
values = (mat * 1e9).astype('M8[ns]')
elif typestr.startswith('M8[ns'):
# datetime with tz
- m = re.search('M8\[ns,\s*(\w+\/?\w*)\]', typestr)
+ m = re.search(r'M8\[ns,\s*(\w+\/?\w*)\]', typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 516c406f8d54f..4e7ace4173227 100755
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -554,7 +554,7 @@ def test_xs_level_multiple(self):
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
- df = read_table(StringIO(text), sep='\s+', engine='python')
+ df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
@@ -588,7 +588,7 @@ def test_xs_level0(self):
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
- df = read_table(StringIO(text), sep='\s+', engine='python')
+ df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index f00fdd196abea..be634228b1b6e 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1004,13 +1004,20 @@ def prng(self):
def test_int64_add_overflow():
# see gh-14068
- msg = "too (big|large) to convert"
+ msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
+ n = np.iinfo(np.int64).min
with tm.assertRaisesRegexp(OverflowError, msg):
nanops._checked_add_with_arr(np.array([m, m]), m)
with tm.assertRaisesRegexp(OverflowError, msg):
nanops._checked_add_with_arr(np.array([m, m]), np.array([m, m]))
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ nanops._checked_add_with_arr(np.array([n, n]), n)
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ nanops._checked_add_with_arr(np.array([n, n]), np.array([n, n]))
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ nanops._checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assertRaisesRegexp(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
nanops._checked_add_with_arr(np.array([m, m]),
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index a197037789fd2..9cb2dd5a40ac4 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -496,8 +496,8 @@ def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
- "shape of value must be \(3, 2\), "
- "shape of given object was \(4, 2\)"):
+ r"shape of value must be \(3, 2\), "
+ r"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
@@ -1128,24 +1128,24 @@ def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
- "Shape of passed values is \(3, 4, 5\), "
- "indices imply \(4, 5, 5\)",
+ r"Shape of passed values is \(3, 4, 5\), "
+ r"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
- "Shape of passed values is \(3, 4, 5\), "
- "indices imply \(5, 4, 5\)",
+ r"Shape of passed values is \(3, 4, 5\), "
+ r"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
- "Shape of passed values is \(3, 4, 5\), "
- "indices imply \(5, 5, 4\)",
+ r"Shape of passed values is \(3, 4, 5\), "
+ r"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 9a3505c3421e0..bbcd856250c51 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -426,7 +426,7 @@ def test_replace(self):
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
- result = values.str.replace("(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
+ result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
@@ -670,12 +670,12 @@ def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
- result = s.str.extract('(\d)', expand=False)
+ result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
- '(?P<letter>\D)(?P<number>\d)?', expand=False)
+ r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
@@ -828,12 +828,13 @@ def test_extract_optional_groups(self):
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
- result = Series(data, index=index).str.extract('(\d)', expand=True)
+ result = Series(data, index=index).str.extract(
+ r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
- '(?P<letter>\D)(?P<number>\d)?', expand=True)
+ r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
@@ -1023,7 +1024,7 @@ def test_extractall_no_matches(self):
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
- res = s.str.extractall("[ab](?P<digit>\d)")
+ res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
@@ -1034,12 +1035,12 @@ def test_extractall_stringindex(self):
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
- res = idx.str.extractall("[ab](?P<digit>\d)")
+ res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
- res = s.str.extractall("[ab](?P<digit>\d)")
+ res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py
index c242213ee226f..7a217ed9dbd86 100644
--- a/pandas/tests/test_testing.py
+++ b/pandas/tests/test_testing.py
@@ -319,10 +319,10 @@ def test_numpy_array_equal_copy_flag(self):
a = np.array([1, 2, 3])
b = a.copy()
c = a.view()
- expected = 'array\(\[1, 2, 3\]\) is not array\(\[1, 2, 3\]\)'
+ expected = r'array\(\[1, 2, 3\]\) is not array\(\[1, 2, 3\]\)'
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(a, b, check_same='same')
- expected = 'array\(\[1, 2, 3\]\) is array\(\[1, 2, 3\]\)'
+ expected = r'array\(\[1, 2, 3\]\) is array\(\[1, 2, 3\]\)'
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(a, c, check_same='copy')
diff --git a/pandas/tests/test_util.py b/pandas/tests/test_util.py
index 9193880df7feb..cb12048676d26 100644
--- a/pandas/tests/test_util.py
+++ b/pandas/tests/test_util.py
@@ -2,7 +2,10 @@
import nose
from collections import OrderedDict
-from pandas.util._move import move_into_mutable_buffer, BadMove
+import sys
+import unittest
+from uuid import uuid4
+from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf
from pandas.util.decorators import deprecate_kwarg
from pandas.util.validators import (validate_args, validate_kwargs,
validate_args_and_kwargs)
@@ -94,8 +97,8 @@ def test_bad_arg_length_max_value_single(self):
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(args) + min_fname_arg_count
- msg = ("{fname}\(\) takes at most {max_length} "
- "argument \({actual_length} given\)"
+ msg = (r"{fname}\(\) takes at most {max_length} "
+ r"argument \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
@@ -111,8 +114,8 @@ def test_bad_arg_length_max_value_multiple(self):
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(args) + min_fname_arg_count
- msg = ("{fname}\(\) takes at most {max_length} "
- "arguments \({actual_length} given\)"
+ msg = (r"{fname}\(\) takes at most {max_length} "
+ r"arguments \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
@@ -124,7 +127,7 @@ def test_bad_arg_length_max_value_multiple(self):
def test_not_all_defaults(self):
bad_arg = 'foo'
msg = ("the '{arg}' parameter is not supported "
- "in the pandas implementation of {func}\(\)".
+ r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=self.fname))
compat_args = OrderedDict()
@@ -160,8 +163,8 @@ def test_bad_kwarg(self):
compat_args[goodarg] = 'foo'
compat_args[badarg + 'o'] = 'bar'
kwargs = {goodarg: 'foo', badarg: 'bar'}
- msg = ("{fname}\(\) got an unexpected "
- "keyword argument '{arg}'".format(
+ msg = (r"{fname}\(\) got an unexpected "
+ r"keyword argument '{arg}'".format(
fname=self.fname, arg=badarg))
with tm.assertRaisesRegexp(TypeError, msg):
@@ -169,8 +172,8 @@ def test_bad_kwarg(self):
def test_not_all_none(self):
bad_arg = 'foo'
- msg = ("the '{arg}' parameter is not supported "
- "in the pandas implementation of {func}\(\)".
+ msg = (r"the '{arg}' parameter is not supported "
+ r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=self.fname))
compat_args = OrderedDict()
@@ -209,8 +212,8 @@ def test_invalid_total_length_max_length_one(self):
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
- msg = ("{fname}\(\) takes at most {max_length} "
- "argument \({actual_length} given\)"
+ msg = (r"{fname}\(\) takes at most {max_length} "
+ r"argument \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
@@ -227,8 +230,8 @@ def test_invalid_total_length_max_length_multiple(self):
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
- msg = ("{fname}\(\) takes at most {max_length} "
- "arguments \({actual_length} given\)"
+ msg = (r"{fname}\(\) takes at most {max_length} "
+ r"arguments \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
@@ -245,8 +248,8 @@ def test_no_args_with_kwargs(self):
compat_args['foo'] = -5
compat_args[bad_arg] = 1
- msg = ("the '{arg}' parameter is not supported "
- "in the pandas implementation of {func}\(\)".
+ msg = (r"the '{arg}' parameter is not supported "
+ r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=self.fname))
args = ()
@@ -272,8 +275,8 @@ def test_duplicate_argument(self):
kwargs = {'foo': None, 'bar': None}
args = (None,) # duplicate value for 'foo'
- msg = ("{fname}\(\) got multiple values for keyword "
- "argument '{arg}'".format(fname=self.fname, arg='foo'))
+ msg = (r"{fname}\(\) got multiple values for keyword "
+ r"argument '{arg}'".format(fname=self.fname, arg='foo'))
with tm.assertRaisesRegexp(TypeError, msg):
validate_args_and_kwargs(self.fname, args, kwargs,
@@ -296,6 +299,14 @@ def test_validation(self):
class TestMove(tm.TestCase):
+ def test_cannot_create_instance_of_stolenbuffer(self):
+ """Stolen buffers need to be created through the smart constructor
+ ``move_into_mutable_buffer`` which has a bunch of checks in it.
+ """
+ msg = "cannot create 'pandas.util._move.stolenbuf' instances"
+ with tm.assertRaisesRegexp(TypeError, msg):
+ stolenbuf()
+
def test_more_than_one_ref(self):
"""Test case for when we try to use ``move_into_mutable_buffer`` when
the object being moved has other references.
@@ -325,6 +336,46 @@ def test_exactly_one_ref(self):
# materialize as bytearray to show that it is mutable
self.assertEqual(bytearray(as_stolen_buf), b'test')
+ @unittest.skipIf(
+ sys.version_info[0] > 2,
+ 'bytes objects cannot be interned in py3',
+ )
+ def test_interned(self):
+ salt = uuid4().hex
+
+ def make_string():
+ # We need to actually create a new string so that it has refcount
+ # one. We use a uuid so that we know the string could not already
+ # be in the intern table.
+ return ''.join(('testing: ', salt))
+
+ # This should work, the string has one reference on the stack.
+ move_into_mutable_buffer(make_string())
+
+ refcount = [None] # nonlocal
+
+ def ref_capture(ob):
+ # Subtract two because those are the references owned by this
+ # frame:
+ # 1. The local variables of this stack frame.
+ # 2. The python data stack of this stack frame.
+ refcount[0] = sys.getrefcount(ob) - 2
+ return ob
+
+ with tm.assertRaises(BadMove):
+ # If we intern the string it will still have one reference but now
+ # it is in the intern table so if other people intern the same
+ # string while the mutable buffer holds the first string they will
+ # be the same instance.
+ move_into_mutable_buffer(ref_capture(intern(make_string()))) # noqa
+
+ self.assertEqual(
+ refcount[0],
+ 1,
+ msg='The BadMove was probably raised for refcount reasons instead'
+ ' of interning reasons',
+ )
+
def test_numpy_errstate_is_default():
# The defaults since numpy 1.6.0
diff --git a/pandas/tools/hashing.py b/pandas/tools/hashing.py
new file mode 100644
index 0000000000000..aa18b8bc70c37
--- /dev/null
+++ b/pandas/tools/hashing.py
@@ -0,0 +1,137 @@
+"""
+data hash pandas / numpy objects
+"""
+
+import numpy as np
+from pandas import _hash, Series, factorize, Categorical, Index
+from pandas.lib import infer_dtype
+from pandas.types.generic import ABCIndexClass, ABCSeries, ABCDataFrame
+from pandas.types.common import is_categorical_dtype
+
+# 16 byte long hashing key
+_default_hash_key = '0123456789123456'
+
+
+def hash_pandas_object(obj, index=True, encoding='utf8', hash_key=None):
+ """
+ Return a data hash of the Index/Series/DataFrame
+
+ .. versionadded:: 0.19.2
+
+ Parameters
+ ----------
+ index : boolean, default True
+ include the index in the hash (if Series/DataFrame)
+ encoding : string, default 'utf8'
+ encoding for data & key when strings
+ hash_key : string key to encode, default to _default_hash_key
+
+ Returns
+ -------
+ Series of uint64, same length as the object
+
+ """
+ if hash_key is None:
+ hash_key = _default_hash_key
+
+ def adder(h, hashed_to_add):
+ h = np.multiply(h, np.uint(3), h)
+ return np.add(h, hashed_to_add, h)
+
+ if isinstance(obj, ABCIndexClass):
+ h = hash_array(obj.values, encoding, hash_key).astype('uint64')
+ h = Series(h, index=obj, dtype='uint64')
+ elif isinstance(obj, ABCSeries):
+ h = hash_array(obj.values, encoding, hash_key).astype('uint64')
+ if index:
+ h = adder(h, hash_pandas_object(obj.index,
+ index=False,
+ encoding=encoding,
+ hash_key=hash_key).values)
+ h = Series(h, index=obj.index, dtype='uint64')
+ elif isinstance(obj, ABCDataFrame):
+ cols = obj.iteritems()
+ first_series = next(cols)[1]
+ h = hash_array(first_series.values, encoding,
+ hash_key).astype('uint64')
+ for _, col in cols:
+ h = adder(h, hash_array(col.values, encoding, hash_key))
+ if index:
+ h = adder(h, hash_pandas_object(obj.index,
+ index=False,
+ encoding=encoding,
+ hash_key=hash_key).values)
+
+ h = Series(h, index=obj.index, dtype='uint64')
+ else:
+ raise TypeError("Unexpected type for hashing %s" % type(obj))
+ return h
+
+
+def hash_array(vals, encoding='utf8', hash_key=None):
+ """
+ Given a 1d array, return an array of deterministic integers.
+
+ .. versionadded:: 0.19.2
+
+ Parameters
+ ----------
+ vals : ndarray
+ encoding : string, default 'utf8'
+ encoding for data & key when strings
+ hash_key : string key to encode, default to _default_hash_key
+
+ Returns
+ -------
+ 1d uint64 numpy array of hash values, same length as the vals
+
+ """
+
+ # work with cagegoricals as ints. (This check is above the complex
+ # check so that we don't ask numpy if categorical is a subdtype of
+ # complex, as it will choke.
+ if hash_key is None:
+ hash_key = _default_hash_key
+
+ if is_categorical_dtype(vals.dtype):
+ vals = vals.codes
+
+ # we'll be working with everything as 64-bit values, so handle this
+ # 128-bit value early
+ if np.issubdtype(vals.dtype, np.complex128):
+ return hash_array(vals.real) + 23 * hash_array(vals.imag)
+
+ # MAIN LOGIC:
+ inferred = infer_dtype(vals)
+
+ # First, turn whatever array this is into unsigned 64-bit ints, if we can
+ # manage it.
+ if inferred == 'boolean':
+ vals = vals.astype('u8')
+
+ if (np.issubdtype(vals.dtype, np.datetime64) or
+ np.issubdtype(vals.dtype, np.timedelta64) or
+ np.issubdtype(vals.dtype, np.number)) and vals.dtype.itemsize <= 8:
+
+ vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8')
+ else:
+
+ # its MUCH faster to categorize object dtypes, then hash and rename
+ codes, categories = factorize(vals, sort=False)
+ categories = Index(categories)
+ c = Series(Categorical(codes, categories,
+ ordered=False, fastpath=True))
+ vals = _hash.hash_object_array(categories.values,
+ hash_key,
+ encoding)
+
+ # rename & extract
+ vals = c.cat.rename_categories(Index(vals)).astype(np.uint64).values
+
+ # Then, redistribute these 64-bit ints within the space of 64-bit ints
+ vals ^= vals >> 30
+ vals *= np.uint64(0xbf58476d1ce4e5b9)
+ vals ^= vals >> 27
+ vals *= np.uint64(0x94d049bb133111eb)
+ vals ^= vals >> 31
+ return vals
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index a8c43195f5552..36f88074bbbc6 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -5,6 +5,8 @@
import copy
import warnings
+import string
+
import numpy as np
from pandas.compat import range, lrange, lzip, zip, map, filter
import pandas.compat as compat
@@ -28,7 +30,8 @@
is_list_like,
_ensure_int64,
_ensure_float64,
- _ensure_object)
+ _ensure_object,
+ _get_dtype)
from pandas.types.missing import na_value_for_dtype
from pandas.core.generic import NDFrame
@@ -259,7 +262,8 @@ def _merger(x, y):
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
- by=None,
+ left_index=False, right_index=False,
+ by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True):
@@ -270,8 +274,8 @@ def merge_asof(left, right, on=None,
DataFrame whose 'on' key is less than or equal to the left's key. Both
DataFrames must be sorted by the key.
- Optionally perform group-wise merge. This searches for the nearest match
- on the 'on' key within the same group according to 'by'.
+ Optionally match on equivalent keys with 'by' before searching for nearest
+ match with 'on'.
.. versionadded:: 0.19.0
@@ -288,9 +292,28 @@ def merge_asof(left, right, on=None,
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
- by : column name
- Group both the left and right DataFrames by the group column; perform
- the merge operation on these pieces and recombine.
+ left_index : boolean
+ Use the index of the left DataFrame as the join key.
+
+ .. versionadded:: 0.19.2
+
+ right_index : boolean
+ Use the index of the right DataFrame as the join key.
+
+ .. versionadded:: 0.19.2
+
+ by : column name or list of column names
+ Match on these columns before performing merge operation.
+ left_by : column name
+ Field names to match on in the left DataFrame.
+
+ .. versionadded:: 0.19.2
+
+ right_by : column name
+ Field names to match on in the right DataFrame.
+
+ .. versionadded:: 0.19.2
+
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
@@ -348,6 +371,28 @@ def merge_asof(left, right, on=None,
3 5 b 3.0
6 10 c 7.0
+ We can use indexed DataFrames as well.
+
+ >>> left
+ left_val
+ 1 a
+ 5 b
+ 10 c
+
+ >>> right
+ right_val
+ 1 1
+ 2 2
+ 3 3
+ 6 6
+ 7 7
+
+ >>> pd.merge_asof(left, right, left_index=True, right_index=True)
+ left_val right_val
+ 1 a 1
+ 5 b 3
+ 10 c 7
+
Here is a real-world times-series example
>>> quotes
@@ -371,7 +416,7 @@ def merge_asof(left, right, on=None,
By default we are taking the asof of the quotes
- >>> pd.asof_merge(trades, quotes,
+ >>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
@@ -383,7 +428,7 @@ def merge_asof(left, right, on=None,
We only asof within 2ms betwen the quote time and the trade time
- >>> pd.asof_merge(trades, quotes,
+ >>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
@@ -398,7 +443,7 @@ def merge_asof(left, right, on=None,
and we exclude exact matches on time. However *prior* data will
propogate forward
- >>> pd.asof_merge(trades, quotes,
+ >>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
@@ -418,7 +463,9 @@ def merge_asof(left, right, on=None,
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
- by=by, suffixes=suffixes,
+ left_index=left_index, right_index=right_index,
+ by=by, left_by=left_by, right_by=right_by,
+ suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches)
return op.get_result()
@@ -472,6 +519,15 @@ def __init__(self, left, right, how='inner', on=None,
'can not merge DataFrame with instance of '
'type {0}'.format(type(right)))
+ if not is_bool(left_index):
+ raise ValueError(
+ 'left_index parameter must be of type bool, not '
+ '{0}'.format(type(left_index)))
+ if not is_bool(right_index):
+ raise ValueError(
+ 'right_index parameter must be of type bool, not '
+ '{0}'.format(type(right_index)))
+
# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
@@ -641,7 +697,7 @@ def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
- if self.left_index and self.right_index:
+ if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True)
elif self.right_index and self.how == 'left':
@@ -722,6 +778,16 @@ def _get_merge_keys(self):
is_rkey = lambda x: isinstance(
x, (np.ndarray, ABCSeries)) and len(x) == len(right)
+ # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
+ # user could, for example, request 'left_index' and 'left_by'. In a
+ # regular pd.merge(), users cannot specify both 'left_index' and
+ # 'left_on'. (Instead, users have a MultiIndex). That means the
+ # self.left_on in this function is always empty in a pd.merge(), but
+ # a pd.merge_asof(left_index=True, left_by=...) will result in a
+ # self.left_on array with a None in the middle of it. This requires
+ # a work-around as designated in the code below.
+ # See _validate_specification() for where this happens.
+
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
@@ -731,12 +797,21 @@ def _get_merge_keys(self):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
- right_keys.append(right[rk]._values)
- join_names.append(rk)
+ if rk is not None:
+ right_keys.append(right[rk]._values)
+ join_names.append(rk)
+ else:
+ # work-around for merge_asof(right_index=True)
+ right_keys.append(right.index)
+ join_names.append(right.index.name)
else:
if not is_rkey(rk):
- right_keys.append(right[rk]._values)
- if lk == rk:
+ if rk is not None:
+ right_keys.append(right[rk]._values)
+ else:
+ # work-around for merge_asof(right_index=True)
+ right_keys.append(right.index)
+ if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
@@ -744,8 +819,13 @@ def _get_merge_keys(self):
left_drop.append(lk)
else:
right_keys.append(rk)
- left_keys.append(left[lk]._values)
- join_names.append(lk)
+ if lk is not None:
+ left_keys.append(left[lk]._values)
+ join_names.append(lk)
+ else:
+ # work-around for merge_asof(left_index=True)
+ left_keys.append(left.index)
+ join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
@@ -870,13 +950,15 @@ def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
- def __init__(self, left, right, on=None, left_on=None,
- right_on=None, axis=1,
+ def __init__(self, left, right, on=None, left_on=None, right_on=None,
+ left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
+ left_index=left_index,
+ right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
@@ -917,17 +999,13 @@ def get_result(self):
return result
-_asof_functions = {
- 'int64_t': _join.asof_join_int64_t,
- 'double': _join.asof_join_double,
-}
+def _asof_function(on_type):
+ return getattr(_join, 'asof_join_%s' % on_type, None)
+
+
+def _asof_by_function(on_type, by_type):
+ return getattr(_join, 'asof_join_%s_by_%s' % (on_type, by_type), None)
-_asof_by_functions = {
- ('int64_t', 'int64_t'): _join.asof_join_int64_t_by_int64_t,
- ('double', 'int64_t'): _join.asof_join_double_by_int64_t,
- ('int64_t', 'object'): _join.asof_join_int64_t_by_object,
- ('double', 'object'): _join.asof_join_double_by_object,
-}
_type_casters = {
'int64_t': _ensure_int64,
@@ -935,9 +1013,32 @@ def get_result(self):
'object': _ensure_object,
}
+_cython_types = {
+ 'uint8': 'uint8_t',
+ 'uint32': 'uint32_t',
+ 'uint16': 'uint16_t',
+ 'uint64': 'uint64_t',
+ 'int8': 'int8_t',
+ 'int32': 'int32_t',
+ 'int16': 'int16_t',
+ 'int64': 'int64_t',
+ 'float16': 'error',
+ 'float32': 'float',
+ 'float64': 'double',
+}
+
def _get_cython_type(dtype):
- """ Given a dtype, return 'int64_t', 'double', or 'object' """
+ """ Given a dtype, return a C name like 'int64_t' or 'double' """
+ type_name = _get_dtype(dtype).name
+ ctype = _cython_types.get(type_name, 'object')
+ if ctype == 'error':
+ raise MergeError('unsupported type: ' + type_name)
+ return ctype
+
+
+def _get_cython_type_upcast(dtype):
+ """ Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
@@ -949,19 +1050,23 @@ def _get_cython_type(dtype):
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
- def __init__(self, left, right, on=None, by=None, left_on=None,
- right_on=None, axis=1,
- suffixes=('_x', '_y'), copy=True,
+ def __init__(self, left, right, on=None, left_on=None, right_on=None,
+ left_index=False, right_index=False,
+ by=None, left_by=None, right_by=None,
+ axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True):
self.by = by
+ self.left_by = left_by
+ self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
- right_on=right_on, axis=axis,
+ right_on=right_on, left_index=left_index,
+ right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
@@ -969,23 +1074,39 @@ def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
- if len(self.left_on) != 1:
+ if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
- if len(self.right_on) != 1:
+ if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
- # add by to our key-list so we can have it in the
- # output as a key
+ if self.left_index and isinstance(self.left.index, MultiIndex):
+ raise MergeError("left can only have one index")
+
+ if self.right_index and isinstance(self.right.index, MultiIndex):
+ raise MergeError("right can only have one index")
+
+ # set 'by' columns
if self.by is not None:
- if not is_list_like(self.by):
- self.by = [self.by]
+ if self.left_by is not None or self.right_by is not None:
+ raise MergeError('Can only pass by OR left_by '
+ 'and right_by')
+ self.left_by = self.right_by = self.by
+ if self.left_by is None and self.right_by is not None:
+ raise MergeError('missing left_by')
+ if self.left_by is not None and self.right_by is None:
+ raise MergeError('missing right_by')
- if len(self.by) != 1:
- raise MergeError("can only asof by a single key")
+ # add by to our key-list so we can have it in the
+ # output as a key
+ if self.left_by is not None:
+ if not is_list_like(self.left_by):
+ self.left_by = [self.left_by]
+ if not is_list_like(self.right_by):
+ self.right_by = [self.right_by]
- self.left_on = self.by + list(self.left_on)
- self.right_on = self.by + list(self.right_on)
+ self.left_on = self.left_by + list(self.left_on)
+ self.right_on = self.right_by + list(self.right_on)
@property
def _asof_key(self):
@@ -1008,11 +1129,11 @@ def _get_merge_keys(self):
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
- lt = left_join_keys[self.left_on.index(self._asof_key)]
+ lt = left_join_keys[-1]
msg = "incompatible tolerance, must be compat " \
"with type {0}".format(type(lt))
- if is_datetime64_dtype(lt):
+ if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
@@ -1025,7 +1146,7 @@ def _get_merge_keys(self):
raise MergeError("tolerance must be positive")
else:
- raise MergeError(msg)
+ raise MergeError("key must be integer or timestamp")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
@@ -1037,9 +1158,18 @@ def _get_merge_keys(self):
def _get_join_indexers(self):
""" return the join indexers """
+ def flip(xs):
+ """ unlike np.transpose, this returns an array of tuples """
+ labels = list(string.ascii_lowercase[:len(xs)])
+ dtypes = [x.dtype for x in xs]
+ labeled_dtypes = list(zip(labels, dtypes))
+ return np.array(lzip(*xs), labeled_dtypes)
+
# values to compare
- left_values = self.left_join_keys[-1]
- right_values = self.right_join_keys[-1]
+ left_values = (self.left.index.values if self.left_index else
+ self.left_join_keys[-1])
+ right_values = (self.right.index.values if self.right_index else
+ self.right_join_keys[-1])
tolerance = self.tolerance
# we required sortedness in the join keys
@@ -1057,23 +1187,24 @@ def _get_join_indexers(self):
tolerance = tolerance.value
# a "by" parameter requires special handling
- if self.by is not None:
- left_by_values = self.left_join_keys[0]
- right_by_values = self.right_join_keys[0]
-
- # choose appropriate function by type
- on_type = _get_cython_type(left_values.dtype)
- by_type = _get_cython_type(left_by_values.dtype)
+ if self.left_by is not None:
+ if len(self.left_join_keys) > 2:
+ # get tuple representation of values if more than one
+ left_by_values = flip(self.left_join_keys[0:-1])
+ right_by_values = flip(self.right_join_keys[0:-1])
+ else:
+ left_by_values = self.left_join_keys[0]
+ right_by_values = self.right_join_keys[0]
- on_type_caster = _type_casters[on_type]
+ # upcast 'by' parameter because HashTable is limited
+ by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
- func = _asof_by_functions[(on_type, by_type)]
-
- left_values = on_type_caster(left_values)
- right_values = on_type_caster(right_values)
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
+ # choose appropriate function by type
+ on_type = _get_cython_type(left_values.dtype)
+ func = _asof_by_function(on_type, by_type)
return func(left_values,
right_values,
left_by_values,
@@ -1083,12 +1214,7 @@ def _get_join_indexers(self):
else:
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
- type_caster = _type_casters[on_type]
- func = _asof_functions[on_type]
-
- left_values = type_caster(left_values)
- right_values = type_caster(right_values)
-
+ func = _asof_function(on_type)
return func(left_values,
right_values,
self.allow_exact_matches,
@@ -1283,7 +1409,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
- axis : {0, 1, ...}, default 0
+ axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
@@ -1411,6 +1537,12 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
sample = objs[0]
self.objs = objs
+ # Standardize axis parameter to int
+ if isinstance(sample, Series):
+ axis = DataFrame()._get_axis_number(axis)
+ else:
+ axis = sample._get_axis_number(axis)
+
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index 94b464f6fca6c..9e064a1d1fc99 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -101,10 +101,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
else:
values_multi = False
values = [values]
- else:
- values = list(data.columns.drop(keys))
- if values_passed:
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
@@ -117,6 +114,15 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
if len(to_filter) < len(data.columns):
data = data[to_filter]
+ else:
+ values = data.columns
+ for key in keys:
+ try:
+ values = values.drop(key)
+ except (TypeError, ValueError):
+ pass
+ values = list(values)
+
grouped = data.groupby(keys)
agged = grouped.agg(aggfunc)
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index d46dc4d355b4c..484270ecbda0b 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -2136,9 +2136,10 @@ def _args_adjust(self):
def _get_ind(self, y):
if self.ind is None:
- sample_range = max(y) - min(y)
- ind = np.linspace(min(y) - 0.5 * sample_range,
- max(y) + 0.5 * sample_range, 1000)
+ # np.nanmax() and np.nanmin() ignores the missing values
+ sample_range = np.nanmax(y) - np.nanmin(y)
+ ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
+ np.nanmax(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
diff --git a/pandas/tools/tests/test_hashing.py b/pandas/tools/tests/test_hashing.py
new file mode 100644
index 0000000000000..6e5f30fb7a52d
--- /dev/null
+++ b/pandas/tools/tests/test_hashing.py
@@ -0,0 +1,177 @@
+import numpy as np
+import pandas as pd
+
+from pandas import DataFrame, Series, Index
+from pandas.tools.hashing import hash_array, hash_pandas_object
+import pandas.util.testing as tm
+
+
+class TestHashing(tm.TestCase):
+
+ _multiprocess_can_split_ = True
+
+ def setUp(self):
+ self.df = DataFrame(
+ {'i32': np.array([1, 2, 3] * 3, dtype='int32'),
+ 'f32': np.array([None, 2.5, 3.5] * 3, dtype='float32'),
+ 'cat': Series(['a', 'b', 'c'] * 3).astype('category'),
+ 'obj': Series(['d', 'e', 'f'] * 3),
+ 'bool': np.array([True, False, True] * 3),
+ 'dt': Series(pd.date_range('20130101', periods=9)),
+ 'dt_tz': Series(pd.date_range('20130101', periods=9,
+ tz='US/Eastern')),
+ 'td': Series(pd.timedelta_range('2000', periods=9))})
+
+ def test_consistency(self):
+ # check that our hash doesn't change because of a mistake
+ # in the actual code; this is the ground truth
+ result = hash_pandas_object(Index(['foo', 'bar', 'baz']))
+ expected = Series(np.array([3600424527151052760, 1374399572096150070,
+ 477881037637427054], dtype='uint64'),
+ index=['foo', 'bar', 'baz'])
+ tm.assert_series_equal(result, expected)
+
+ def test_hash_array(self):
+ for name, s in self.df.iteritems():
+ a = s.values
+ tm.assert_numpy_array_equal(hash_array(a), hash_array(a))
+
+ def check_equal(self, obj, **kwargs):
+ a = hash_pandas_object(obj, **kwargs)
+ b = hash_pandas_object(obj, **kwargs)
+ tm.assert_series_equal(a, b)
+
+ kwargs.pop('index', None)
+ a = hash_pandas_object(obj, **kwargs)
+ b = hash_pandas_object(obj, **kwargs)
+ tm.assert_series_equal(a, b)
+
+ def check_not_equal_with_index(self, obj):
+
+ # check that we are not hashing the same if
+ # we include the index
+ if not isinstance(obj, Index):
+ a = hash_pandas_object(obj, index=True)
+ b = hash_pandas_object(obj, index=False)
+ self.assertFalse((a == b).all())
+
+ def test_hash_pandas_object(self):
+
+ for obj in [Series([1, 2, 3]),
+ Series([1.0, 1.5, 3.2]),
+ Series([1.0, 1.5, np.nan]),
+ Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
+ Series(['a', 'b', 'c']),
+ Series(['a', np.nan, 'c']),
+ Series(['a', None, 'c']),
+ Series([True, False, True]),
+ Index([1, 2, 3]),
+ Index([True, False, True]),
+ DataFrame({'x': ['a', 'b', 'c'], 'y': [1, 2, 3]}),
+ tm.makeMissingDataframe(),
+ tm.makeMixedDataFrame(),
+ tm.makeTimeDataFrame(),
+ tm.makeTimeSeries(),
+ tm.makeTimedeltaIndex()]:
+ self.check_equal(obj)
+ self.check_not_equal_with_index(obj)
+
+ def test_hash_pandas_object2(self):
+ for name, s in self.df.iteritems():
+ self.check_equal(s)
+ self.check_not_equal_with_index(s)
+
+ def test_hash_pandas_empty_object(self):
+ for obj in [Series([], dtype='float64'),
+ Series([], dtype='object'),
+ Index([])]:
+ self.check_equal(obj)
+
+ # these are by-definition the same with
+ # or w/o the index as the data is empty
+
+ def test_errors(self):
+
+ for obj in [pd.Timestamp('20130101'), tm.makePanel()]:
+ def f():
+ hash_pandas_object(f)
+
+ self.assertRaises(TypeError, f)
+
+ def test_hash_keys(self):
+ # using different hash keys, should have different hashes
+ # for the same data
+
+ # this only matters for object dtypes
+ obj = Series(list('abc'))
+ a = hash_pandas_object(obj, hash_key='9876543210123456')
+ b = hash_pandas_object(obj, hash_key='9876543210123465')
+ self.assertTrue((a != b).all())
+
+ def test_invalid_key(self):
+ # this only matters for object dtypes
+ def f():
+ hash_pandas_object(Series(list('abc')), hash_key='foo')
+ self.assertRaises(ValueError, f)
+
+ def test_unsupported_objects(self):
+
+ # mixed objects are not supported
+ obj = Series(['1', 2, 3])
+
+ def f():
+ hash_pandas_object(obj)
+ self.assertRaises(TypeError, f)
+
+ # MultiIndex are represented as tuples
+ obj = Series([1, 2, 3], index=pd.MultiIndex.from_tuples(
+ [('a', 1), ('a', 2), ('b', 1)]))
+
+ def f():
+ hash_pandas_object(obj)
+ self.assertRaises(TypeError, f)
+
+ def test_alread_encoded(self):
+ # if already encoded then ok
+
+ obj = Series(list('abc')).str.encode('utf8')
+ self.check_equal(obj)
+
+ def test_alternate_encoding(self):
+
+ obj = Series(list('abc'))
+ self.check_equal(obj, encoding='ascii')
+
+ def test_same_len_hash_collisions(self):
+
+ for l in range(8):
+ length = 2**(l + 8) + 1
+ s = tm.rands_array(length, 2)
+ result = hash_array(s, 'utf8')
+ self.assertFalse(result[0] == result[1])
+
+ for l in range(8):
+ length = 2**(l + 8)
+ s = tm.rands_array(length, 2)
+ result = hash_array(s, 'utf8')
+ self.assertFalse(result[0] == result[1])
+
+ def test_hash_collisions(self):
+
+ # hash collisions are bad
+ # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726
+ L = ['Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9', # noqa
+ 'Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe'] # noqa
+
+ # these should be different!
+ result1 = hash_array(np.asarray(L[0:1], dtype=object), 'utf8')
+ expected1 = np.array([14963968704024874985], dtype=np.uint64)
+ self.assert_numpy_array_equal(result1, expected1)
+
+ result2 = hash_array(np.asarray(L[1:2], dtype=object), 'utf8')
+ expected2 = np.array([16428432627716348016], dtype=np.uint64)
+ self.assert_numpy_array_equal(result2, expected2)
+
+ result = hash_array(np.asarray(L, dtype=object), 'utf8')
+ self.assert_numpy_array_equal(
+ result, np.concatenate([expected1, expected2], axis=0))
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 6e36100ddd0b4..f078959608f91 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -109,6 +109,15 @@ def test_merge_misspecified(self):
self.assertRaises(ValueError, merge, self.df, self.df2,
left_on=['key1'], right_on=['key1', 'key2'])
+ def test_index_and_on_parameters_confusion(self):
+ self.assertRaises(ValueError, merge, self.df, self.df2, how='left',
+ left_index=False, right_index=['key1', 'key2'])
+ self.assertRaises(ValueError, merge, self.df, self.df2, how='left',
+ left_index=['key1', 'key2'], right_index=False)
+ self.assertRaises(ValueError, merge, self.df, self.df2, how='left',
+ left_index=['key1', 'key2'],
+ right_index=['key1', 'key2'])
+
def test_merge_overlap(self):
merged = merge(self.left, self.left, on='key')
exp_len = (self.left['key'].value_counts() ** 2).sum()
diff --git a/pandas/tools/tests/test_merge_asof.py b/pandas/tools/tests/test_merge_asof.py
index f413618624592..bbbf1a3bdfff9 100644
--- a/pandas/tools/tests/test_merge_asof.py
+++ b/pandas/tools/tests/test_merge_asof.py
@@ -1,6 +1,7 @@
import nose
import os
+import pytz
import numpy as np
import pandas as pd
from pandas import (merge_asof, read_csv,
@@ -117,6 +118,96 @@ def test_basic_categorical(self):
by='ticker')
assert_frame_equal(result, expected)
+ def test_basic_left_index(self):
+
+ # GH14253
+ expected = self.asof
+ trades = self.trades.set_index('time')
+ quotes = self.quotes
+
+ result = merge_asof(trades, quotes,
+ left_index=True,
+ right_on='time',
+ by='ticker')
+ # left-only index uses right's index, oddly
+ expected.index = result.index
+ # time column appears after left's columns
+ expected = expected[result.columns]
+ assert_frame_equal(result, expected)
+
+ def test_basic_right_index(self):
+
+ expected = self.asof
+ trades = self.trades
+ quotes = self.quotes.set_index('time')
+
+ result = merge_asof(trades, quotes,
+ left_on='time',
+ right_index=True,
+ by='ticker')
+ assert_frame_equal(result, expected)
+
+ def test_basic_left_index_right_index(self):
+
+ expected = self.asof.set_index('time')
+ trades = self.trades.set_index('time')
+ quotes = self.quotes.set_index('time')
+
+ result = merge_asof(trades, quotes,
+ left_index=True,
+ right_index=True,
+ by='ticker')
+ assert_frame_equal(result, expected)
+
+ def test_multi_index(self):
+
+ # MultiIndex is prohibited
+ trades = self.trades.set_index(['time', 'price'])
+ quotes = self.quotes.set_index('time')
+ with self.assertRaises(MergeError):
+ merge_asof(trades, quotes,
+ left_index=True,
+ right_index=True)
+
+ trades = self.trades.set_index('time')
+ quotes = self.quotes.set_index(['time', 'bid'])
+ with self.assertRaises(MergeError):
+ merge_asof(trades, quotes,
+ left_index=True,
+ right_index=True)
+
+ def test_on_and_index(self):
+
+ # 'on' parameter and index together is prohibited
+ trades = self.trades.set_index('time')
+ quotes = self.quotes.set_index('time')
+ with self.assertRaises(MergeError):
+ merge_asof(trades, quotes,
+ left_on='price',
+ left_index=True,
+ right_index=True)
+
+ trades = self.trades.set_index('time')
+ quotes = self.quotes.set_index('time')
+ with self.assertRaises(MergeError):
+ merge_asof(trades, quotes,
+ right_on='bid',
+ left_index=True,
+ right_index=True)
+
+ def test_basic_left_by_right_by(self):
+
+ # GH14253
+ expected = self.asof
+ trades = self.trades
+ quotes = self.quotes
+
+ result = merge_asof(trades, quotes,
+ on='time',
+ left_by='ticker',
+ right_by='ticker')
+ assert_frame_equal(result, expected)
+
def test_missing_right_by(self):
expected = self.asof
@@ -130,6 +221,117 @@ def test_missing_right_by(self):
expected.loc[expected.ticker == 'MSFT', ['bid', 'ask']] = np.nan
assert_frame_equal(result, expected)
+ def test_multiby(self):
+ # GH13936
+ trades = pd.DataFrame({
+ 'time': pd.to_datetime(['20160525 13:30:00.023',
+ '20160525 13:30:00.023',
+ '20160525 13:30:00.046',
+ '20160525 13:30:00.048',
+ '20160525 13:30:00.050']),
+ 'ticker': ['MSFT', 'MSFT',
+ 'GOOG', 'GOOG', 'AAPL'],
+ 'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
+ 'price': [51.95, 51.95,
+ 720.77, 720.92, 98.00],
+ 'quantity': [75, 155,
+ 100, 100, 100]},
+ columns=['time', 'ticker', 'exch',
+ 'price', 'quantity'])
+
+ quotes = pd.DataFrame({
+ 'time': pd.to_datetime(['20160525 13:30:00.023',
+ '20160525 13:30:00.023',
+ '20160525 13:30:00.030',
+ '20160525 13:30:00.041',
+ '20160525 13:30:00.045',
+ '20160525 13:30:00.049']),
+ 'ticker': ['GOOG', 'MSFT', 'MSFT',
+ 'MSFT', 'GOOG', 'AAPL'],
+ 'exch': ['BATS', 'NSDQ', 'ARCA', 'ARCA',
+ 'NSDQ', 'ARCA'],
+ 'bid': [720.51, 51.95, 51.97, 51.99,
+ 720.50, 97.99],
+ 'ask': [720.92, 51.96, 51.98, 52.00,
+ 720.93, 98.01]},
+ columns=['time', 'ticker', 'exch', 'bid', 'ask'])
+
+ expected = pd.DataFrame({
+ 'time': pd.to_datetime(['20160525 13:30:00.023',
+ '20160525 13:30:00.023',
+ '20160525 13:30:00.046',
+ '20160525 13:30:00.048',
+ '20160525 13:30:00.050']),
+ 'ticker': ['MSFT', 'MSFT',
+ 'GOOG', 'GOOG', 'AAPL'],
+ 'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
+ 'price': [51.95, 51.95,
+ 720.77, 720.92, 98.00],
+ 'quantity': [75, 155,
+ 100, 100, 100],
+ 'bid': [np.nan, 51.95, 720.50, 720.51, np.nan],
+ 'ask': [np.nan, 51.96, 720.93, 720.92, np.nan]},
+ columns=['time', 'ticker', 'exch',
+ 'price', 'quantity', 'bid', 'ask'])
+
+ result = pd.merge_asof(trades, quotes, on='time',
+ by=['ticker', 'exch'])
+ assert_frame_equal(result, expected)
+
+ def test_multiby_heterogeneous_types(self):
+ # GH13936
+ trades = pd.DataFrame({
+ 'time': pd.to_datetime(['20160525 13:30:00.023',
+ '20160525 13:30:00.023',
+ '20160525 13:30:00.046',
+ '20160525 13:30:00.048',
+ '20160525 13:30:00.050']),
+ 'ticker': [0, 0, 1, 1, 2],
+ 'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
+ 'price': [51.95, 51.95,
+ 720.77, 720.92, 98.00],
+ 'quantity': [75, 155,
+ 100, 100, 100]},
+ columns=['time', 'ticker', 'exch',
+ 'price', 'quantity'])
+
+ quotes = pd.DataFrame({
+ 'time': pd.to_datetime(['20160525 13:30:00.023',
+ '20160525 13:30:00.023',
+ '20160525 13:30:00.030',
+ '20160525 13:30:00.041',
+ '20160525 13:30:00.045',
+ '20160525 13:30:00.049']),
+ 'ticker': [1, 0, 0, 0, 1, 2],
+ 'exch': ['BATS', 'NSDQ', 'ARCA', 'ARCA',
+ 'NSDQ', 'ARCA'],
+ 'bid': [720.51, 51.95, 51.97, 51.99,
+ 720.50, 97.99],
+ 'ask': [720.92, 51.96, 51.98, 52.00,
+ 720.93, 98.01]},
+ columns=['time', 'ticker', 'exch', 'bid', 'ask'])
+
+ expected = pd.DataFrame({
+ 'time': pd.to_datetime(['20160525 13:30:00.023',
+ '20160525 13:30:00.023',
+ '20160525 13:30:00.046',
+ '20160525 13:30:00.048',
+ '20160525 13:30:00.050']),
+ 'ticker': [0, 0, 1, 1, 2],
+ 'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
+ 'price': [51.95, 51.95,
+ 720.77, 720.92, 98.00],
+ 'quantity': [75, 155,
+ 100, 100, 100],
+ 'bid': [np.nan, 51.95, 720.50, 720.51, np.nan],
+ 'ask': [np.nan, 51.96, 720.93, 720.92, np.nan]},
+ columns=['time', 'ticker', 'exch',
+ 'price', 'quantity', 'bid', 'ask'])
+
+ result = pd.merge_asof(trades, quotes, on='time',
+ by=['ticker', 'exch'])
+ assert_frame_equal(result, expected)
+
def test_basic2(self):
expected = self.read_data('asof2.csv')
@@ -293,6 +495,29 @@ def test_tolerance(self):
expected = self.tolerance
assert_frame_equal(result, expected)
+ def test_tolerance_tz(self):
+ # GH 14844
+ left = pd.DataFrame(
+ {'date': pd.DatetimeIndex(start=pd.to_datetime('2016-01-02'),
+ freq='D', periods=5,
+ tz=pytz.timezone('UTC')),
+ 'value1': np.arange(5)})
+ right = pd.DataFrame(
+ {'date': pd.DatetimeIndex(start=pd.to_datetime('2016-01-01'),
+ freq='D', periods=5,
+ tz=pytz.timezone('UTC')),
+ 'value2': list("ABCDE")})
+ result = pd.merge_asof(left, right, on='date',
+ tolerance=pd.Timedelta('1 day'))
+
+ expected = pd.DataFrame(
+ {'date': pd.DatetimeIndex(start=pd.to_datetime('2016-01-02'),
+ freq='D', periods=5,
+ tz=pytz.timezone('UTC')),
+ 'value1': np.arange(5),
+ 'value2': list("BCDEE")})
+ assert_frame_equal(result, expected)
+
def test_allow_exact_matches(self):
result = merge_asof(self.trades, self.quotes,
@@ -428,6 +653,78 @@ def test_on_float(self):
assert_frame_equal(result, expected)
+ def test_on_specialized_type(self):
+ # GH13936
+ for dtype in [np.uint8, np.uint16, np.uint32, np.uint64,
+ np.int8, np.int16, np.int32, np.int64,
+ np.float16, np.float32, np.float64]:
+ df1 = pd.DataFrame({
+ 'value': [5, 2, 25, 100, 78, 120, 79],
+ 'symbol': list("ABCDEFG")},
+ columns=['symbol', 'value'])
+ df1.value = dtype(df1.value)
+
+ df2 = pd.DataFrame({
+ 'value': [0, 80, 120, 125],
+ 'result': list('xyzw')},
+ columns=['value', 'result'])
+ df2.value = dtype(df2.value)
+
+ df1 = df1.sort_values('value').reset_index(drop=True)
+
+ if dtype == np.float16:
+ with self.assertRaises(MergeError):
+ pd.merge_asof(df1, df2, on='value')
+ continue
+
+ result = pd.merge_asof(df1, df2, on='value')
+
+ expected = pd.DataFrame(
+ {'symbol': list("BACEGDF"),
+ 'value': [2, 5, 25, 78, 79, 100, 120],
+ 'result': list('xxxxxyz')
+ }, columns=['symbol', 'value', 'result'])
+ expected.value = dtype(expected.value)
+
+ assert_frame_equal(result, expected)
+
+ def test_on_specialized_type_by_int(self):
+ # GH13936
+ for dtype in [np.uint8, np.uint16, np.uint32, np.uint64,
+ np.int8, np.int16, np.int32, np.int64,
+ np.float16, np.float32, np.float64]:
+ df1 = pd.DataFrame({
+ 'value': [5, 2, 25, 100, 78, 120, 79],
+ 'key': [1, 2, 3, 2, 3, 1, 2],
+ 'symbol': list("ABCDEFG")},
+ columns=['symbol', 'key', 'value'])
+ df1.value = dtype(df1.value)
+
+ df2 = pd.DataFrame({
+ 'value': [0, 80, 120, 125],
+ 'key': [1, 2, 2, 3],
+ 'result': list('xyzw')},
+ columns=['value', 'key', 'result'])
+ df2.value = dtype(df2.value)
+
+ df1 = df1.sort_values('value').reset_index(drop=True)
+
+ if dtype == np.float16:
+ with self.assertRaises(MergeError):
+ pd.merge_asof(df1, df2, on='value', by='key')
+ else:
+ result = pd.merge_asof(df1, df2, on='value', by='key')
+
+ expected = pd.DataFrame({
+ 'symbol': list("BACEGDF"),
+ 'key': [2, 1, 3, 3, 2, 2, 1],
+ 'value': [2, 5, 25, 78, 79, 100, 120],
+ 'result': [np.nan, 'x', np.nan, np.nan, np.nan, 'y', 'x']},
+ columns=['symbol', 'key', 'value', 'result'])
+ expected.value = dtype(expected.value)
+
+ assert_frame_equal(result, expected)
+
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame({
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index 75c6db23b4bc7..5944fa1b34611 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -131,6 +131,39 @@ def test_pivot_dtypes(self):
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
+ def test_pivot_no_values(self):
+ # GH 14380
+ idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
+ '2011-01-01', '2011-01-02'])
+ df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
+ index=idx)
+ res = df.pivot_table(index=df.index.month, columns=df.index.day)
+
+ exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
+ exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
+ index=[1, 2], columns=exp_columns)
+ tm.assert_frame_equal(res, exp)
+
+ df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
+ 'dt': pd.date_range('2011-01-01', freq='D',
+ periods=5)},
+ index=idx)
+ res = df.pivot_table(index=df.index.month,
+ columns=pd.Grouper(key='dt', freq='M'))
+ exp_columns = pd.MultiIndex.from_tuples([('A',
+ pd.Timestamp('2011-01-31'))])
+ exp_columns.names = [None, 'dt']
+ exp = pd.DataFrame([3.25, 2.0],
+ index=[1, 2], columns=exp_columns)
+ tm.assert_frame_equal(res, exp)
+
+ res = df.pivot_table(index=pd.Grouper(freq='A'),
+ columns=pd.Grouper(key='dt', freq='M'))
+ exp = pd.DataFrame([3],
+ index=pd.DatetimeIndex(['2011-12-31']),
+ columns=exp_columns)
+ tm.assert_frame_equal(res, exp)
+
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
diff --git a/pandas/tools/tests/test_tile.py b/pandas/tools/tests/test_tile.py
index 16731620a1dcd..e5b9c65b515d6 100644
--- a/pandas/tools/tests/test_tile.py
+++ b/pandas/tools/tests/test_tile.py
@@ -271,6 +271,18 @@ def test_series_retbins(self):
np.array([0, 0, 1, 1], dtype=np.int8))
tm.assert_numpy_array_equal(bins, np.array([0, 1.5, 3]))
+ def test_single_bin(self):
+ # issue 14652
+ expected = Series([0, 0])
+
+ s = Series([9., 9.])
+ result = cut(s, 1, labels=False)
+ tm.assert_series_equal(result, expected)
+
+ s = Series([-9., -9.])
+ result = cut(s, 1, labels=False)
+ tm.assert_series_equal(result, expected)
+
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
diff --git a/pandas/tools/tests/test_util.py b/pandas/tools/tests/test_util.py
index 8c16308d79a31..f9647721e3c5b 100644
--- a/pandas/tools/tests/test_util.py
+++ b/pandas/tools/tests/test_util.py
@@ -4,9 +4,10 @@
import nose
import numpy as np
+from numpy import iinfo
import pandas as pd
-from pandas import date_range, Index
+from pandas import (date_range, Index, _np_version_under1p9)
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
@@ -401,6 +402,41 @@ def test_downcast(self):
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
+ def test_downcast_limits(self):
+ # Test the limits of each downcast. Bug: #14401.
+ # Check to make sure numpy is new enough to run this test.
+ if _np_version_under1p9:
+ raise nose.SkipTest("Numpy version is under 1.9")
+
+ i = 'integer'
+ u = 'unsigned'
+ dtype_downcast_min_max = [
+ ('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]),
+ ('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]),
+ ('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]),
+ ('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]),
+ ('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]),
+ ('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]),
+ ('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]),
+ # Test will be skipped until there is more uint64 support.
+ # ('uint64', u, [iinfo(uint64).min, iinfo(uint64).max]),
+ ('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
+ ('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
+ ('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
+ ('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
+ ('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
+ ('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
+ ('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
+ ('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
+ # Test will be skipped until there is more uint64 support.
+ # ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]),
+ ]
+
+ for dtype, downcast, min_max in dtype_downcast_min_max:
+ series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
+ tm.assert_equal(series.dtype, dtype)
+
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py
index 62bbfc2f630a5..ef75f2f84779b 100644
--- a/pandas/tools/tile.py
+++ b/pandas/tools/tile.py
@@ -98,8 +98,8 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
- mn -= .001 * mn
- mx += .001 * mx
+ mn -= .001 * abs(mn)
+ mx += .001 * abs(mx)
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
diff --git a/pandas/tools/util.py b/pandas/tools/util.py
index fec56328c1721..b50bf9dc448bc 100644
--- a/pandas/tools/util.py
+++ b/pandas/tools/util.py
@@ -205,7 +205,7 @@ def to_numeric(arg, errors='raise', downcast=None):
if downcast in ('integer', 'signed'):
typecodes = np.typecodes['Integer']
- elif downcast == 'unsigned' and np.min(values) > 0:
+ elif downcast == 'unsigned' and np.min(values) >= 0:
typecodes = np.typecodes['UnsignedInteger']
elif downcast == 'float':
typecodes = np.typecodes['Float']
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 96213a4aec34d..4645ae24684ff 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -100,7 +100,7 @@ def round(self, freq, *args, **kwargs):
def floor(self, freq):
return self._round(freq, np.floor)
- @Appender(_round_doc % "floor")
+ @Appender(_round_doc % "ceil")
def ceil(self, freq):
return self._round(freq, np.ceil)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index f68750e242f1f..024306edef2d8 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -439,7 +439,7 @@ def _generate(cls, start, end, periods, name, offset,
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
- if not inferred_tz == tz:
+ if not tslib.get_timezone(inferred_tz) == tslib.get_timezone(tz):
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
@@ -1453,8 +1453,9 @@ def _maybe_cast_slice_bound(self, label, side, kind):
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
- # the bounds need swapped if index is reverse sorted
- if self.is_monotonic_decreasing:
+ # the bounds need swapped if index is reverse sorted and has a
+ # length (is_monotonic_decreasing gives True for empty index)
+ if self.is_monotonic_decreasing and len(self):
return upper if side == 'left' else lower
return lower if side == 'left' else upper
else:
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 051cc8aa4d018..efcde100d1ce7 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -68,6 +68,7 @@ def wrapper(self, other):
other = other.tz_localize(None)
result = func(self, other)
+
if self._adjust_dst:
result = tslib._localize_pydatetime(result, tz)
@@ -552,6 +553,32 @@ def _repr_attrs(self):
out += ': ' + ', '.join(attrs)
return out
+ def __getstate__(self):
+ """Return a pickleable state"""
+ state = self.__dict__.copy()
+
+ # we don't want to actually pickle the calendar object
+ # as its a np.busyday; we recreate on deserilization
+ if 'calendar' in state:
+ del state['calendar']
+ try:
+ state['kwds'].pop('calendar')
+ except KeyError:
+ pass
+
+ return state
+
+ def __setstate__(self, state):
+ """Reconstruct an instance from a pickled state"""
+ self.__dict__ = state
+ if 'weekmask' in state and 'holidays' in state:
+ calendar, holidays = self.get_calendar(weekmask=self.weekmask,
+ holidays=self.holidays,
+ calendar=None)
+ self.kwds['calendar'] = self.calendar = calendar
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['weekmask'] = state['weekmask']
+
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
@@ -991,30 +1018,6 @@ def get_calendar(self, weekmask, holidays, calendar):
busdaycalendar = np.busdaycalendar(**kwargs)
return busdaycalendar, holidays
- def __getstate__(self):
- """Return a pickleable state"""
- state = self.__dict__.copy()
- del state['calendar']
-
- # we don't want to actually pickle the calendar object
- # as its a np.busyday; we recreate on deserilization
- try:
- state['kwds'].pop('calendar')
- except:
- pass
-
- return state
-
- def __setstate__(self, state):
- """Reconstruct an instance from a pickled state"""
- self.__dict__ = state
- calendar, holidays = self.get_calendar(weekmask=self.weekmask,
- holidays=self.holidays,
- calendar=None)
- self.kwds['calendar'] = self.calendar = calendar
- self.kwds['holidays'] = self.holidays = holidays
- self.kwds['weekmask'] = state['weekmask']
-
@apply_wraps
def apply(self, other):
if self.n <= 0:
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index d5d89c8dc2614..bff665833fbf5 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -64,6 +64,7 @@ def dt64arr_to_periodarr(data, freq, tz):
# --- Period index sketch
+
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
@@ -304,7 +305,7 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs):
if (len(values) > 0 and is_float_dtype(values)):
raise TypeError("PeriodIndex can't take floats")
else:
- return PeriodIndex(values, name=name, freq=freq, **kwargs)
+ return cls(values, name=name, freq=freq, **kwargs)
values = np.array(values, dtype='int64', copy=False)
@@ -325,6 +326,8 @@ def _shallow_copy(self, values=None, **kwargs):
if kwargs.get('freq') is None:
# freq must be provided
kwargs['freq'] = self.freq
+ if values is None:
+ values = self._values
return super(PeriodIndex, self)._shallow_copy(values=values, **kwargs)
def _coerce_scalar_to_index(self, item):
@@ -355,9 +358,8 @@ def __contains__(self, key):
def asi8(self):
return self._values.view('i8')
- @property
+ @cache_readonly
def _int64index(self):
- # do not cache, same as .asi8
return Int64Index(self.asi8, name=self.name, fastpath=True)
@property
diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py
index fe64af67af0ed..89aecf2acc07e 100644
--- a/pandas/tseries/plotting.py
+++ b/pandas/tseries/plotting.py
@@ -162,18 +162,37 @@ def _decorate_axes(ax, freq, kwargs):
ax.date_axis_info = None
-def _get_freq(ax, series):
- # get frequency from data
- freq = getattr(series.index, 'freq', None)
- if freq is None:
- freq = getattr(series.index, 'inferred_freq', None)
-
+def _get_ax_freq(ax):
+ """
+ Get the freq attribute of the ax object if set.
+ Also checks shared axes (eg when using secondary yaxis, sharex=True
+ or twinx)
+ """
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
+ # check for left/right ax in case of secondary yaxis
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
+ if ax_freq is None:
+ # check if a shared ax (sharex/twinx) has already freq set
+ shared_axes = ax.get_shared_x_axes().get_siblings(ax)
+ if len(shared_axes) > 1:
+ for shared_ax in shared_axes:
+ ax_freq = getattr(shared_ax, 'freq', None)
+ if ax_freq is not None:
+ break
+ return ax_freq
+
+
+def _get_freq(ax, series):
+ # get frequency from data
+ freq = getattr(series.index, 'freq', None)
+ if freq is None:
+ freq = getattr(series.index, 'inferred_freq', None)
+
+ ax_freq = _get_ax_freq(ax)
# use axes freq if no data freq
if freq is None:
@@ -191,7 +210,7 @@ def _get_freq(ax, series):
def _use_dynamic_x(ax, data):
freq = _get_index_freq(data)
- ax_freq = getattr(ax, 'freq', None)
+ ax_freq = _get_ax_freq(ax)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
@@ -244,7 +263,7 @@ def _maybe_convert_index(ax, data):
freq = freq.rule_code
if freq is None:
- freq = getattr(ax, 'freq', None)
+ freq = _get_ax_freq(ax)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
old mode 100644
new mode 100755
index d02c403cb3c66..31781eb3fc131
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -1283,9 +1283,18 @@ def _adjust_dates_anchored(first, last, offset, closed='right', base=0):
#
# See https://github.com/pandas-dev/pandas/issues/8683
+ # 14682 - Since we need to drop the TZ information to perform
+ # the adjustment in the presence of a DST change,
+ # save TZ Info and the DST state of the first and last parameters
+ # so that we can accurately rebuild them at the end.
first_tzinfo = first.tzinfo
+ last_tzinfo = last.tzinfo
+ first_dst = bool(first.dst())
+ last_dst = bool(last.dst())
+
first = first.tz_localize(None)
last = last.tz_localize(None)
+
start_day_nanos = first.normalize().value
base_nanos = (base % offset.n) * offset.nanos // offset.n
@@ -1320,11 +1329,8 @@ def _adjust_dates_anchored(first, last, offset, closed='right', base=0):
else:
lresult = last.value + offset.nanos
-# return (Timestamp(fresult, tz=first.tz),
-# Timestamp(lresult, tz=last.tz))
-
- return (Timestamp(fresult).tz_localize(first_tzinfo),
- Timestamp(lresult).tz_localize(first_tzinfo))
+ return (Timestamp(fresult).tz_localize(first_tzinfo, ambiguous=first_dst),
+ Timestamp(lresult).tz_localize(last_tzinfo, ambiguous=last_dst))
def asfreq(obj, freq, method=None, how=None, normalize=False):
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index a6d58fa3e7ef3..bca50237081e1 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -807,7 +807,7 @@ def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
- msg = "take\(\) got an unexpected keyword argument 'foo'"
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
@@ -1639,7 +1639,7 @@ def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
- msg = "take\(\) got an unexpected keyword argument 'foo'"
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index 1735ac4e2efa5..768e9212e6c42 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -1,4 +1,5 @@
import os
+from distutils.version import LooseVersion
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from pandas.compat import range, iteritems
@@ -4851,6 +4852,7 @@ def _test_all_offsets(self, n, **kwds):
def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
offset = DateOffset(**{offset_name: offset_n})
+
t = tstart + offset
if expected_utc_offset is not None:
self.assertTrue(get_utc_offset_hours(t) == expected_utc_offset)
@@ -4890,17 +4892,23 @@ def _make_timestamp(self, string, hrs_offset, tz):
return Timestamp(string + offset_string).tz_convert(tz)
def test_fallback_plural(self):
- """test moving from daylight savings to standard time"""
+ # test moving from daylight savings to standard time
+ import dateutil
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
- self._test_all_offsets(
- n=3, tstart=self._make_timestamp(self.ts_pre_fallback,
- hrs_pre, tz),
- expected_utc_offset=hrs_post)
+
+ if dateutil.__version__ != LooseVersion('2.6.0'):
+ # buggy ambiguous behavior in 2.6.0
+ # GH 14621
+ # https://github.com/dateutil/dateutil/issues/321
+ self._test_all_offsets(
+ n=3, tstart=self._make_timestamp(self.ts_pre_fallback,
+ hrs_pre, tz),
+ expected_utc_offset=hrs_post)
def test_springforward_plural(self):
- """test moving from standard to daylight savings"""
+ # test moving from standard to daylight savings
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
hrs_post = utc_offsets['utc_offset_daylight']
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index e314081eac373..48f9d82e481d0 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -2101,14 +2101,6 @@ def test_comp_period(self):
exp = idx.values < idx.values[10]
self.assert_numpy_array_equal(result, exp)
- def test_getitem_ndim2(self):
- idx = period_range('2007-01', periods=3, freq='M')
-
- result = idx[:, None]
- # MPL kludge, internally has incorrect shape
- tm.assertIsInstance(result, PeriodIndex)
- self.assertEqual(result.shape, (len(idx), ))
-
def test_getitem_index(self):
idx = period_range('2007-01', periods=10, freq='M', name='x')
@@ -3730,11 +3722,11 @@ def test_add_raises(self):
# GH 4731
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
- msg = "unsupported operand type\(s\)"
+ msg = r"unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + "str"
- msg = "unsupported operand type\(s\)"
+ msg = r"unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
"str" + dt1
@@ -3748,7 +3740,7 @@ def test_sub(self):
self.assertEqual(dt1 - dt2, -14)
self.assertEqual(dt2 - dt1, 14)
- msg = "Input has different freq=M from Period\(freq=D\)"
+ msg = r"Input has different freq=M from Period\(freq=D\)"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
dt1 - pd.Period('2011-02', freq='M')
@@ -4112,7 +4104,7 @@ def test_period_ops_offset(self):
exp = pd.Period('2011-03-30', freq='D')
self.assertEqual(result, exp)
- msg = "Input cannot be converted to Period\(freq=D\)"
+ msg = r"Input cannot be converted to Period\(freq=D\)"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
p + offsets.Hour(2)
@@ -4161,7 +4153,7 @@ def test_pi_ops_errors(self):
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
- msg = "unsupported operand type\(s\)"
+ msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
@@ -4265,8 +4257,8 @@ def test_pi_offset_errors(self):
# Series op is applied per Period instance, thus error is raised
# from Period
- msg_idx = "Input has different freq from PeriodIndex\(freq=D\)"
- msg_s = "Input cannot be converted to Period\(freq=D\)"
+ msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
+ msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj + offsets.Hour(2)
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
old mode 100644
new mode 100755
index 9d3d27f3224b4..b8c060c024867
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -1912,7 +1912,33 @@ def test_resample_size(self):
right = Series(val, index=ix)
assert_series_equal(left, right)
- def test_resmaple_dst_anchor(self):
+ def test_resample_across_dst(self):
+ # The test resamples a DatetimeIndex with values before and after a
+ # DST change
+ # Issue: 14682
+
+ # The DatetimeIndex we will start with
+ # (note that DST happens at 03:00+02:00 -> 02:00+01:00)
+ # 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
+ df1 = DataFrame([1477786980, 1477790580], columns=['ts'])
+ dti1 = DatetimeIndex(pd.to_datetime(df1.ts, unit='s')
+ .dt.tz_localize('UTC')
+ .dt.tz_convert('Europe/Madrid'))
+
+ # The expected DatetimeIndex after resampling.
+ # 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
+ df2 = DataFrame([1477785600, 1477789200], columns=['ts'])
+ dti2 = DatetimeIndex(pd.to_datetime(df2.ts, unit='s')
+ .dt.tz_localize('UTC')
+ .dt.tz_convert('Europe/Madrid'))
+ df = DataFrame([5, 5], index=dti1)
+
+ result = df.resample(rule='H').sum()
+ expected = DataFrame([5, 5], index=dti2)
+
+ assert_frame_equal(result, expected)
+
+ def test_resample_dst_anchor(self):
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
df = DataFrame([5], index=dti)
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 38e210d698035..f0d14014d6559 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -1957,6 +1957,8 @@ def test_add_overflow(self):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
+
+ msg = "Overflow in int64 addition"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index ac48fcc2551ea..906c0bbb7a479 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -3911,6 +3911,18 @@ def test_slice_with_zero_step_raises(self):
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
+ def test_slice_bounds_empty(self):
+ # GH 14354
+ empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015')
+
+ right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc')
+ exp = Timestamp('2015-01-02 23:59:59.999999999')
+ self.assertEqual(right, exp)
+
+ left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc')
+ exp = Timestamp('2015-01-02 00:00:00')
+ self.assertEqual(left, exp)
+
class TestDatetime64(tm.TestCase):
"""
@@ -4463,6 +4475,15 @@ def test_basics_nanos(self):
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
+ # GH 14415
+ val = np.iinfo(np.int64).min + 80000000000000
+ stamp = Timestamp(val)
+ self.assertEqual(stamp.year, 1677)
+ self.assertEqual(stamp.month, 9)
+ self.assertEqual(stamp.day, 21)
+ self.assertEqual(stamp.microsecond, 145224)
+ self.assertEqual(stamp.nanosecond, 192)
+
def test_unit(self):
def check(val, unit=None, h=1, s=1, us=0):
@@ -5139,11 +5160,13 @@ def test_partial_slice_doesnt_require_monotonicity(self):
timestamp = pd.Timestamp('2014-01-10')
assert_series_equal(nonmonotonic['2014-01-10':], expected)
- self.assertRaisesRegexp(KeyError, "Timestamp\('2014-01-10 00:00:00'\)",
+ self.assertRaisesRegexp(KeyError,
+ r"Timestamp\('2014-01-10 00:00:00'\)",
lambda: nonmonotonic[timestamp:])
assert_series_equal(nonmonotonic.ix['2014-01-10':], expected)
- self.assertRaisesRegexp(KeyError, "Timestamp\('2014-01-10 00:00:00'\)",
+ self.assertRaisesRegexp(KeyError,
+ r"Timestamp\('2014-01-10 00:00:00'\)",
lambda: nonmonotonic.ix[timestamp:])
@@ -5263,7 +5286,7 @@ def test_to_datetime_with_non_exact(self):
s = Series(['19MAY11', 'foobar19MAY11', '19MAY11:00:00:00',
'19MAY11 00:00:00Z'])
result = to_datetime(s, format='%d%b%y', exact=False)
- expected = to_datetime(s.str.extract('(\d+\w+\d+)', expand=False),
+ expected = to_datetime(s.str.extract(r'(\d+\w+\d+)', expand=False),
format='%d%b%y')
assert_series_equal(result, expected)
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index 00e8ee631f463..db8cda5c76479 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -4,7 +4,7 @@
import numpy as np
import pytz
-
+from distutils.version import LooseVersion
from pandas.types.dtypes import DatetimeTZDtype
from pandas import (Index, Series, DataFrame, isnull, Timestamp)
@@ -518,8 +518,12 @@ def f():
times = date_range("2013-10-26 23:00", "2013-10-27 01:00", freq="H",
tz=tz, ambiguous='infer')
- self.assertEqual(times[0], Timestamp('2013-10-26 23:00', tz=tz))
- self.assertEqual(times[-1], Timestamp('2013-10-27 01:00', tz=tz))
+ self.assertEqual(times[0], Timestamp('2013-10-26 23:00', tz=tz,
+ freq="H"))
+ if dateutil.__version__ != LooseVersion('2.6.0'):
+ # GH 14621
+ self.assertEqual(times[-1], Timestamp('2013-10-27 01:00', tz=tz,
+ freq="H"))
def test_ambiguous_nat(self):
tz = self.tz('US/Eastern')
@@ -1163,6 +1167,85 @@ class TestTimeZones(tm.TestCase):
def setUp(self):
tm._skip_if_no_pytz()
+ def test_replace(self):
+ # GH 14621
+ # GH 7825
+ # replacing datetime components with and w/o presence of a timezone
+ dt = Timestamp('2016-01-01 09:00:00')
+ result = dt.replace(hour=0)
+ expected = Timestamp('2016-01-01 00:00:00')
+ self.assertEqual(result, expected)
+
+ for tz in self.timezones:
+ dt = Timestamp('2016-01-01 09:00:00', tz=tz)
+ result = dt.replace(hour=0)
+ expected = Timestamp('2016-01-01 00:00:00', tz=tz)
+ self.assertEqual(result, expected)
+
+ # we preserve nanoseconds
+ dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)
+ result = dt.replace(hour=0)
+ expected = Timestamp('2016-01-01 00:00:00.000000123', tz=tz)
+ self.assertEqual(result, expected)
+
+ # test all
+ dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)
+ result = dt.replace(year=2015, month=2, day=2, hour=0, minute=5,
+ second=5, microsecond=5, nanosecond=5)
+ expected = Timestamp('2015-02-02 00:05:05.000005005', tz=tz)
+ self.assertEqual(result, expected)
+
+ # error
+ def f():
+ dt.replace(foo=5)
+ self.assertRaises(ValueError, f)
+
+ def f():
+ dt.replace(hour=0.1)
+ self.assertRaises(ValueError, f)
+
+ # assert conversion to naive is the same as replacing tzinfo with None
+ dt = Timestamp('2013-11-03 01:59:59.999999-0400', tz='US/Eastern')
+ self.assertEqual(dt.tz_localize(None), dt.replace(tzinfo=None))
+
+ def test_ambiguous_compat(self):
+ # validate that pytz and dateutil are compat for dst
+ # when the transition happens
+ tm._skip_if_no_dateutil()
+ tm._skip_if_no_pytz()
+
+ pytz_zone = 'Europe/London'
+ dateutil_zone = 'dateutil/Europe/London'
+ result_pytz = (Timestamp('2013-10-27 01:00:00')
+ .tz_localize(pytz_zone, ambiguous=0))
+ result_dateutil = (Timestamp('2013-10-27 01:00:00')
+ .tz_localize(dateutil_zone, ambiguous=0))
+ self.assertEqual(result_pytz.value, result_dateutil.value)
+ self.assertEqual(result_pytz.value, 1382835600000000000)
+
+ # dateutil 2.6 buggy w.r.t. ambiguous=0
+ if dateutil.__version__ != LooseVersion('2.6.0'):
+ # GH 14621
+ # https://github.com/dateutil/dateutil/issues/321
+ self.assertEqual(result_pytz.to_pydatetime().tzname(),
+ result_dateutil.to_pydatetime().tzname())
+ self.assertEqual(str(result_pytz), str(result_dateutil))
+
+ # 1 hour difference
+ result_pytz = (Timestamp('2013-10-27 01:00:00')
+ .tz_localize(pytz_zone, ambiguous=1))
+ result_dateutil = (Timestamp('2013-10-27 01:00:00')
+ .tz_localize(dateutil_zone, ambiguous=1))
+ self.assertEqual(result_pytz.value, result_dateutil.value)
+ self.assertEqual(result_pytz.value, 1382832000000000000)
+
+ # dateutil < 2.6 is buggy w.r.t. ambiguous timezones
+ if dateutil.__version__ > LooseVersion('2.5.3'):
+ # GH 14621
+ self.assertEqual(str(result_pytz), str(result_dateutil))
+ self.assertEqual(result_pytz.to_pydatetime().tzname(),
+ result_dateutil.to_pydatetime().tzname())
+
def test_index_equals_with_tz(self):
left = date_range('1/1/2011', periods=100, freq='H', tz='utc')
right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern')
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 21cfe84f153fa..b45f867be65dd 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -327,8 +327,9 @@ def test_repr(self):
# dateutil zone change (only matters for repr)
import dateutil
- if dateutil.__version__ >= LooseVersion(
- '2.3') and dateutil.__version__ <= LooseVersion('2.4.0'):
+ if (dateutil.__version__ >= LooseVersion('2.3') and
+ (dateutil.__version__ <= LooseVersion('2.4.0') or
+ dateutil.__version__ >= LooseVersion('2.6.0'))):
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/US/Pacific']
else:
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 93d35ff964e69..7f567affc87c5 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -43,6 +43,7 @@ def _infer(a, b):
raise AssertionError('Inputs must both have the same timezone,'
' {0} != {1}'.format(tz, b.tzinfo))
return tz
+
tz = None
if start is not None:
tz = _infer(start, end)
@@ -267,10 +268,15 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
1 2016-03-05
dtype: datetime64[ns]
- If a date that does not meet timestamp limitations, passing errors='coerce'
- will force to NaT. Furthermore this will force non-dates to NaT as well.
+ If a date does not meet the `timestamp limitations
+ <http://pandas.pydata.org/pandas-docs/stable/timeseries.html
+ #timeseries-timestamp-limits>`_, passing errors='ignore'
+ will return the original input instead of raising any exception.
+
+ Passing errors='coerce' will force an out-of-bounds date to NaT,
+ in addition to forcing non-dates (or non-parseable dates) to NaT.
- >>> pd.to_datetime('13000101', format='%Y%m%d')
+ >>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
@@ -423,6 +429,7 @@ def _convert_listlike(arg, box, format, name=None, tz=tz):
return _convert_listlike(np.array([arg]), box, format)[0]
+
# mappings for assembling units
_unit_map = {'year': 'year',
'years': 'year',
@@ -555,7 +562,7 @@ def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslib.iNaT
- result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)).\
+ result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)). \
astype('M8[ns]')
return result
@@ -640,7 +647,6 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
DateParseError = tslib.DateParseError
normalize_date = tslib.normalize_date
-
# Fixed time formats for time parsing
_time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"]
@@ -766,6 +772,7 @@ def format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime('%Y%m%d')
+
OLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index bab45595cd60f..61ea7794971e2 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -24,6 +24,7 @@ from cpython cimport (
PyUnicode_AsUTF8String,
)
+
# Cython < 0.17 doesn't have this in cpython
cdef extern from "Python.h":
cdef PyTypeObject *Py_TYPE(object)
@@ -37,7 +38,7 @@ from datetime cimport cmp_pandas_datetimestruct
from libc.stdlib cimport free
from util cimport (is_integer_object, is_float_object, is_datetime64_object,
- is_timedelta64_object)
+ is_timedelta64_object, INT64_MAX)
cimport util
from datetime cimport *
@@ -97,6 +98,7 @@ except NameError: # py3
cdef inline object create_timestamp_from_ts(
int64_t value, pandas_datetimestruct dts,
object tz, object freq):
+ """ convenience routine to construct a Timestamp from its parts """
cdef _Timestamp ts_base
ts_base = _Timestamp.__new__(Timestamp, dts.year, dts.month,
dts.day, dts.hour, dts.min,
@@ -111,6 +113,7 @@ cdef inline object create_timestamp_from_ts(
cdef inline object create_datetime_from_ts(
int64_t value, pandas_datetimestruct dts,
object tz, object freq):
+ """ convenience routine to construct a datetime.datetime from its parts """
return datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
@@ -377,7 +380,6 @@ class Timestamp(_Timestamp):
# Mixing pydatetime positional and keyword arguments is forbidden!
cdef _TSObject ts
- cdef _Timestamp ts_base
if offset is not None:
# deprecate offset kwd in 0.19.0, GH13593
@@ -411,17 +413,7 @@ class Timestamp(_Timestamp):
from pandas.tseries.frequencies import to_offset
freq = to_offset(freq)
- # make datetime happy
- ts_base = _Timestamp.__new__(cls, ts.dts.year, ts.dts.month,
- ts.dts.day, ts.dts.hour, ts.dts.min,
- ts.dts.sec, ts.dts.us, ts.tzinfo)
-
- # fill out rest of data
- ts_base.value = ts.value
- ts_base.freq = freq
- ts_base.nanosecond = ts.dts.ps / 1000
-
- return ts_base
+ return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq)
def _round(self, freq, rounder):
@@ -659,8 +651,81 @@ class Timestamp(_Timestamp):
astimezone = tz_convert
def replace(self, **kwds):
- return Timestamp(datetime.replace(self, **kwds),
- freq=self.freq)
+ """
+ implements datetime.replace, handles nanoseconds
+
+ Parameters
+ ----------
+ kwargs: key-value dict
+
+ accepted keywords are:
+ year, month, day, hour, minute, second, microsecond, nanosecond, tzinfo
+
+ values must be integer, or for tzinfo, a tz-convertible
+
+ Returns
+ -------
+ Timestamp with fields replaced
+ """
+
+ cdef:
+ pandas_datetimestruct dts
+ int64_t value
+ object tzinfo, result, k, v
+ _TSObject ts
+
+ # set to naive if needed
+ tzinfo = self.tzinfo
+ value = self.value
+ if tzinfo is not None:
+ value = tz_convert_single(value, 'UTC', tzinfo)
+
+ # setup components
+ pandas_datetime_to_datetimestruct(value, PANDAS_FR_ns, &dts)
+ dts.ps = self.nanosecond * 1000
+
+ # replace
+ def validate(k, v):
+ """ validate integers """
+ if not isinstance(v, int):
+ raise ValueError("value must be an integer, received "
+ "{v} for {k}".format(v=type(v), k=k))
+ return v
+
+ for k, v in kwds.items():
+ if k == 'year':
+ dts.year = validate(k, v)
+ elif k == 'month':
+ dts.month = validate(k, v)
+ elif k == 'day':
+ dts.day = validate(k, v)
+ elif k == 'hour':
+ dts.hour = validate(k, v)
+ elif k == 'minute':
+ dts.min = validate(k, v)
+ elif k == 'second':
+ dts.sec = validate(k, v)
+ elif k == 'microsecond':
+ dts.us = validate(k, v)
+ elif k == 'nanosecond':
+ dts.ps = validate(k, v) * 1000
+ elif k == 'tzinfo':
+ tzinfo = v
+ else:
+ raise ValueError("invalid name {} passed".format(k))
+
+ # reconstruct & check bounds
+ value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ if value != NPY_NAT:
+ _check_dts_bounds(&dts)
+
+ # set tz if needed
+ if tzinfo is not None:
+ value = tz_convert_single(value, tzinfo, 'UTC')
+
+ result = create_timestamp_from_ts(value, dts, tzinfo, self.freq)
+
+ return result
def isoformat(self, sep='T'):
base = super(_Timestamp, self).isoformat(sep=sep)
@@ -738,7 +803,8 @@ class NaTType(_NaT):
cdef _NaT base
base = _NaT.__new__(cls, 1, 1, 1)
- mangle_nat(base)
+ base._day = -1
+ base._month = -1
base.value = NPY_NAT
return base
@@ -904,10 +970,12 @@ cpdef object get_value_box(ndarray arr, object loc):
# Add the min and max fields at the class level
-# These are defined as magic numbers due to strange
-# wraparound behavior when using the true int64 lower boundary
-cdef int64_t _NS_LOWER_BOUND = -9223285636854775000LL
-cdef int64_t _NS_UPPER_BOUND = 9223372036854775807LL
+cdef int64_t _NS_UPPER_BOUND = INT64_MAX
+# the smallest value we could actually represent is
+# INT64_MIN + 1 == -9223372036854775807
+# but to allow overflow free conversion with a microsecond resolution
+# use the smallest value with a 0 nanosecond unit (0s in last 3 digits)
+cdef int64_t _NS_LOWER_BOUND = -9223372036854775000
cdef pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS
pandas_datetime_to_datetimestruct(_NS_LOWER_BOUND, PANDAS_FR_ns, &_NS_MIN_DTS)
@@ -1038,6 +1106,12 @@ cdef class _Timestamp(datetime):
self._assert_tzawareness_compat(other)
return _cmp_scalar(self.value, ots.value, op)
+ def __reduce_ex__(self, protocol):
+ # python 3.6 compat
+ # http://bugs.python.org/issue28730
+ # now __reduce_ex__ is defined and higher priority than __reduce__
+ return self.__reduce__()
+
def __repr__(self):
stamp = self._repr_base
zone = None
@@ -1472,7 +1546,8 @@ cdef convert_to_tsobject(object ts, object tz, object unit,
"Cannot convert Period to Timestamp "
"unambiguously. Use to_timestamp")
else:
- raise TypeError('Cannot convert input to Timestamp')
+ raise TypeError('Cannot convert input [{}] of type {} to '
+ 'Timestamp'.format(ts, type(ts)))
if obj.value != NPY_NAT:
_check_dts_bounds(&obj.dts)
@@ -5038,7 +5113,10 @@ cpdef normalize_date(object dt):
-------
normalized : datetime.datetime or Timestamp
"""
- if PyDateTime_Check(dt):
+ if is_timestamp(dt):
+ return dt.replace(hour=0, minute=0, second=0, microsecond=0,
+ nanosecond=0)
+ elif PyDateTime_Check(dt):
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif PyDate_Check(dt):
return datetime(dt.year, dt.month, dt.day)
diff --git a/pandas/types/common.py b/pandas/types/common.py
index e0e4501738745..5d161efa838de 100644
--- a/pandas/types/common.py
+++ b/pandas/types/common.py
@@ -1,7 +1,8 @@
""" common type operations """
import numpy as np
-from pandas.compat import string_types, text_type, binary_type
+from pandas.compat import (string_types, text_type, binary_type,
+ PY3, PY36)
from pandas import lib, algos
from .dtypes import (CategoricalDtype, CategoricalDtypeType,
DatetimeTZDtype, DatetimeTZDtypeType,
@@ -21,6 +22,7 @@
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
+
_DATELIKE_DTYPES = set([np.dtype(t)
for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',
'm8[ns]', '<m8[ns]', '>m8[ns]']])
@@ -188,6 +190,20 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype):
return issubclass(tipo, (np.datetime64, np.timedelta64))
+def _is_unorderable_exception(e):
+ """
+ return a boolean if we an unorderable exception error message
+
+ These are different error message for PY>=3<=3.5 and PY>=3.6
+ """
+ if PY36:
+ return "'>' not supported between instances of" in str(e)
+
+ elif PY3:
+ return 'unorderable' in str(e)
+ return False
+
+
def is_numeric_v_string_like(a, b):
"""
numpy doesn't like to compare numeric arrays vs scalar string-likes
diff --git a/pandas/util/clipboard.py b/pandas/util/clipboard.py
deleted file mode 100644
index 02da0d5b8159f..0000000000000
--- a/pandas/util/clipboard.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# Pyperclip v1.5.15
-# A cross-platform clipboard module for Python.
-# By Al Sweigart al@inventwithpython.com
-
-# Usage:
-# import pyperclip
-# pyperclip.copy('The text to be copied to the clipboard.')
-# spam = pyperclip.paste()
-
-# On Windows, no additional modules are needed.
-# On Mac, this module makes use of the pbcopy and pbpaste commands, which
-# should come with the os.
-# On Linux, this module makes use of the xclip or xsel commands, which should
-# come with the os. Otherwise run "sudo apt-get install xclip" or
-# "sudo apt-get install xsel"
-# Otherwise on Linux, you will need the gtk or PyQt4 modules installed.
-# The gtk module is not available for Python 3, and this module does not work
-# with PyGObject yet.
-
-
-# Copyright (c) 2015, Albert Sweigart
-# All rights reserved.
-#
-# BSD-style license:
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-# * Neither the name of the pyperclip nor the
-# names of its contributors may be used to endorse or promote products
-# derived from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# flake8: noqa
-
-import platform
-import os
-from subprocess import call, Popen, PIPE
-
-PY2 = '2' == platform.python_version_tuple()[0]
-text_type = unicode if PY2 else str
-
-
-class NoClipboardProgramError(OSError):
- pass
-
-
-def _pasteWindows():
- CF_UNICODETEXT = 13
- d = ctypes.windll
- d.user32.OpenClipboard(0)
- handle = d.user32.GetClipboardData(CF_UNICODETEXT)
- data = ctypes.c_wchar_p(handle).value
- d.user32.CloseClipboard()
- return data
-
-
-def _copyWindows(text):
- GMEM_DDESHARE = 0x2000
- CF_UNICODETEXT = 13
- d = ctypes.windll # cdll expects 4 more bytes in user32.OpenClipboard(0)
- if not isinstance(text, text_type):
- text = text.decode('mbcs')
-
- d.user32.OpenClipboard(0)
-
- d.user32.EmptyClipboard()
- hCd = d.kernel32.GlobalAlloc(GMEM_DDESHARE,
- len(text.encode('utf-16-le')) + 2)
- pchData = d.kernel32.GlobalLock(hCd)
- ctypes.cdll.msvcrt.wcscpy(ctypes.c_wchar_p(pchData), text)
- d.kernel32.GlobalUnlock(hCd)
- d.user32.SetClipboardData(CF_UNICODETEXT, hCd)
- d.user32.CloseClipboard()
-
-
-def _pasteCygwin():
- CF_UNICODETEXT = 13
- d = ctypes.cdll
- d.user32.OpenClipboard(0)
- handle = d.user32.GetClipboardData(CF_UNICODETEXT)
- data = ctypes.c_wchar_p(handle).value
- d.user32.CloseClipboard()
- return data
-
-
-def _copyCygwin(text):
- GMEM_DDESHARE = 0x2000
- CF_UNICODETEXT = 13
- d = ctypes.cdll
- if not isinstance(text, text_type):
- text = text.decode('mbcs')
- d.user32.OpenClipboard(0)
- d.user32.EmptyClipboard()
- hCd = d.kernel32.GlobalAlloc(GMEM_DDESHARE,
- len(text.encode('utf-16-le')) + 2)
- pchData = d.kernel32.GlobalLock(hCd)
- ctypes.cdll.msvcrt.wcscpy(ctypes.c_wchar_p(pchData), text)
- d.kernel32.GlobalUnlock(hCd)
- d.user32.SetClipboardData(CF_UNICODETEXT, hCd)
- d.user32.CloseClipboard()
-
-
-def _copyOSX(text):
- p = Popen(['pbcopy', 'w'], stdin=PIPE, close_fds=True)
- p.communicate(input=text.encode('utf-8'))
-
-
-def _pasteOSX():
- p = Popen(['pbpaste', 'r'], stdout=PIPE, close_fds=True)
- stdout, stderr = p.communicate()
- return stdout.decode('utf-8')
-
-
-def _pasteGtk():
- return gtk.Clipboard().wait_for_text()
-
-
-def _copyGtk(text):
- global cb
- cb = gtk.Clipboard()
- cb.set_text(text)
- cb.store()
-
-
-def _pasteQt():
- return str(cb.text())
-
-
-def _copyQt(text):
- cb.setText(text)
-
-
-def _copyXclip(text):
- p = Popen(['xclip', '-selection', 'c'], stdin=PIPE, close_fds=True)
- p.communicate(input=text.encode('utf-8'))
-
-
-def _pasteXclip():
- p = Popen(['xclip', '-selection', 'c', '-o'], stdout=PIPE, close_fds=True)
- stdout, stderr = p.communicate()
- return stdout.decode('utf-8')
-
-
-def _copyXsel(text):
- p = Popen(['xsel', '-b', '-i'], stdin=PIPE, close_fds=True)
- p.communicate(input=text.encode('utf-8'))
-
-
-def _pasteXsel():
- p = Popen(['xsel', '-b', '-o'], stdout=PIPE, close_fds=True)
- stdout, stderr = p.communicate()
- return stdout.decode('utf-8')
-
-
-def _copyKlipper(text):
- p = Popen(['qdbus', 'org.kde.klipper', '/klipper',
- 'setClipboardContents', text.encode('utf-8')],
- stdin=PIPE, close_fds=True)
- p.communicate(input=None)
-
-
-def _pasteKlipper():
- p = Popen(['qdbus', 'org.kde.klipper', '/klipper',
- 'getClipboardContents'], stdout=PIPE, close_fds=True)
- stdout, stderr = p.communicate()
- return stdout.decode('utf-8')
-
-
-# Determine the OS/platform and set the copy() and paste() functions
-# accordingly.
-if 'cygwin' in platform.system().lower():
- _functions = 'Cygwin' # for debugging
- import ctypes
- paste = _pasteCygwin
- copy = _copyCygwin
-elif os.name == 'nt' or platform.system() == 'Windows':
- _functions = 'Windows' # for debugging
- import ctypes
- paste = _pasteWindows
- copy = _copyWindows
-elif os.name == 'mac' or platform.system() == 'Darwin':
- _functions = 'OS X pbcopy/pbpaste' # for debugging
- paste = _pasteOSX
- copy = _copyOSX
-elif os.name == 'posix' or platform.system() == 'Linux':
- # Determine which command/module is installed, if any.
- xclipExists = call(['which', 'xclip'],
- stdout=PIPE, stderr=PIPE) == 0
-
- xselExists = call(['which', 'xsel'],
- stdout=PIPE, stderr=PIPE) == 0
-
- xklipperExists = (
- call(['which', 'klipper'], stdout=PIPE, stderr=PIPE) == 0 and
- call(['which', 'qdbus'], stdout=PIPE, stderr=PIPE) == 0
- )
-
- gtkInstalled = False
- try:
- # Check it gtk is installed.
- import gtk
- gtkInstalled = True
- except ImportError:
- pass
-
- if not gtkInstalled:
- # Check for either PyQt4 or PySide
- qtBindingInstalled = True
- try:
- from PyQt4 import QtGui
- except ImportError:
- try:
- from PySide import QtGui
- except ImportError:
- qtBindingInstalled = False
-
- # Set one of the copy & paste functions.
- if xclipExists:
- _functions = 'xclip command' # for debugging
- paste = _pasteXclip
- copy = _copyXclip
- elif xklipperExists:
- _functions = '(KDE Klipper) - qdbus (external)' # for debugging
- paste = _pasteKlipper
- copy = _copyKlipper
- elif gtkInstalled:
- _functions = 'gtk module' # for debugging
- paste = _pasteGtk
- copy = _copyGtk
- elif qtBindingInstalled:
- _functions = 'PyQt4 module' # for debugging
- app = QtGui.QApplication([])
- cb = QtGui.QApplication.clipboard()
- paste = _pasteQt
- copy = _copyQt
- elif xselExists:
- # TODO: xsel doesn't seem to work on Raspberry Pi (my test Linux
- # environment). Putting this as the last method tried.
- _functions = 'xsel command' # for debugging
- paste = _pasteXsel
- copy = _copyXsel
- else:
- raise NoClipboardProgramError('Pyperclip requires the gtk, PyQt4, or '
- 'PySide module installed, or either the '
- 'xclip or xsel command.')
-else:
- raise RuntimeError('pyperclip does not support your system.')
-
-# pandas aliases
-clipboard_get = paste
-clipboard_set = copy
diff --git a/pandas/util/clipboard/__init__.py b/pandas/util/clipboard/__init__.py
new file mode 100644
index 0000000000000..358c9b5f8035a
--- /dev/null
+++ b/pandas/util/clipboard/__init__.py
@@ -0,0 +1,110 @@
+"""
+Pyperclip
+
+A cross-platform clipboard module for Python. (only handles plain text for now)
+By Al Sweigart al@inventwithpython.com
+BSD License
+
+Usage:
+ import pyperclip
+ pyperclip.copy('The text to be copied to the clipboard.')
+ spam = pyperclip.paste()
+
+ if not pyperclip.copy:
+ print("Copy functionality unavailable!")
+
+On Windows, no additional modules are needed.
+On Mac, the module uses pbcopy and pbpaste, which should come with the os.
+On Linux, install xclip or xsel via package manager. For example, in Debian:
+sudo apt-get install xclip
+
+Otherwise on Linux, you will need the gtk or PyQt4 modules installed.
+
+gtk and PyQt4 modules are not available for Python 3,
+and this module does not work with PyGObject yet.
+"""
+__version__ = '1.5.27'
+
+# flake8: noqa
+
+import platform
+import os
+import subprocess
+from .clipboards import (init_osx_clipboard,
+ init_gtk_clipboard, init_qt_clipboard,
+ init_xclip_clipboard, init_xsel_clipboard,
+ init_klipper_clipboard, init_no_clipboard)
+from .windows import init_windows_clipboard
+
+# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
+# Thus, we need to detect the presence of $DISPLAY manually
+# and not load PyQt4 if it is absent.
+HAS_DISPLAY = os.getenv("DISPLAY", False)
+CHECK_CMD = "where" if platform.system() == "Windows" else "which"
+
+
+def _executable_exists(name):
+ return subprocess.call([CHECK_CMD, name],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
+
+
+def determine_clipboard():
+ # Determine the OS/platform and set
+ # the copy() and paste() functions accordingly.
+ if 'cygwin' in platform.system().lower():
+ # FIXME: pyperclip currently does not support Cygwin,
+ # see https://github.com/asweigart/pyperclip/issues/55
+ pass
+ elif os.name == 'nt' or platform.system() == 'Windows':
+ return init_windows_clipboard()
+ if os.name == 'mac' or platform.system() == 'Darwin':
+ return init_osx_clipboard()
+ if HAS_DISPLAY:
+ # Determine which command/module is installed, if any.
+ try:
+ import gtk # check if gtk is installed
+ except ImportError:
+ pass
+ else:
+ return init_gtk_clipboard()
+
+ try:
+ import PyQt4 # check if PyQt4 is installed
+ except ImportError:
+ pass
+ else:
+ return init_qt_clipboard()
+
+ if _executable_exists("xclip"):
+ return init_xclip_clipboard()
+ if _executable_exists("xsel"):
+ return init_xsel_clipboard()
+ if _executable_exists("klipper") and _executable_exists("qdbus"):
+ return init_klipper_clipboard()
+
+ return init_no_clipboard()
+
+
+def set_clipboard(clipboard):
+ global copy, paste
+
+ clipboard_types = {'osx': init_osx_clipboard,
+ 'gtk': init_gtk_clipboard,
+ 'qt': init_qt_clipboard,
+ 'xclip': init_xclip_clipboard,
+ 'xsel': init_xsel_clipboard,
+ 'klipper': init_klipper_clipboard,
+ 'windows': init_windows_clipboard,
+ 'no': init_no_clipboard}
+
+ copy, paste = clipboard_types[clipboard]()
+
+
+copy, paste = determine_clipboard()
+
+__all__ = ["copy", "paste"]
+
+
+# pandas aliases
+clipboard_get = paste
+clipboard_set = copy
\ No newline at end of file
diff --git a/pandas/util/clipboard/clipboards.py b/pandas/util/clipboard/clipboards.py
new file mode 100644
index 0000000000000..182a685f956e6
--- /dev/null
+++ b/pandas/util/clipboard/clipboards.py
@@ -0,0 +1,136 @@
+# flake8: noqa
+
+import sys
+import subprocess
+from .exceptions import PyperclipException
+
+EXCEPT_MSG = """
+ Pyperclip could not find a copy/paste mechanism for your system.
+ For more information, please visit https://pyperclip.readthedocs.org """
+PY2 = sys.version_info[0] == 2
+text_type = unicode if PY2 else str
+
+
+def init_osx_clipboard():
+ def copy_osx(text):
+ p = subprocess.Popen(['pbcopy', 'w'],
+ stdin=subprocess.PIPE, close_fds=True)
+ p.communicate(input=text.encode('utf-8'))
+
+ def paste_osx():
+ p = subprocess.Popen(['pbpaste', 'r'],
+ stdout=subprocess.PIPE, close_fds=True)
+ stdout, stderr = p.communicate()
+ return stdout.decode('utf-8')
+
+ return copy_osx, paste_osx
+
+
+def init_gtk_clipboard():
+ import gtk
+
+ def copy_gtk(text):
+ global cb
+ cb = gtk.Clipboard()
+ cb.set_text(text)
+ cb.store()
+
+ def paste_gtk():
+ clipboardContents = gtk.Clipboard().wait_for_text()
+ # for python 2, returns None if the clipboard is blank.
+ if clipboardContents is None:
+ return ''
+ else:
+ return clipboardContents
+
+ return copy_gtk, paste_gtk
+
+
+def init_qt_clipboard():
+ # $DISPLAY should exist
+ from PyQt4.QtGui import QApplication
+
+ app = QApplication([])
+
+ def copy_qt(text):
+ cb = app.clipboard()
+ cb.setText(text)
+
+ def paste_qt():
+ cb = app.clipboard()
+ return text_type(cb.text())
+
+ return copy_qt, paste_qt
+
+
+def init_xclip_clipboard():
+ def copy_xclip(text):
+ p = subprocess.Popen(['xclip', '-selection', 'c'],
+ stdin=subprocess.PIPE, close_fds=True)
+ p.communicate(input=text.encode('utf-8'))
+
+ def paste_xclip():
+ p = subprocess.Popen(['xclip', '-selection', 'c', '-o'],
+ stdout=subprocess.PIPE, close_fds=True)
+ stdout, stderr = p.communicate()
+ return stdout.decode('utf-8')
+
+ return copy_xclip, paste_xclip
+
+
+def init_xsel_clipboard():
+ def copy_xsel(text):
+ p = subprocess.Popen(['xsel', '-b', '-i'],
+ stdin=subprocess.PIPE, close_fds=True)
+ p.communicate(input=text.encode('utf-8'))
+
+ def paste_xsel():
+ p = subprocess.Popen(['xsel', '-b', '-o'],
+ stdout=subprocess.PIPE, close_fds=True)
+ stdout, stderr = p.communicate()
+ return stdout.decode('utf-8')
+
+ return copy_xsel, paste_xsel
+
+
+def init_klipper_clipboard():
+ def copy_klipper(text):
+ p = subprocess.Popen(
+ ['qdbus', 'org.kde.klipper', '/klipper', 'setClipboardContents',
+ text.encode('utf-8')],
+ stdin=subprocess.PIPE, close_fds=True)
+ p.communicate(input=None)
+
+ def paste_klipper():
+ p = subprocess.Popen(
+ ['qdbus', 'org.kde.klipper', '/klipper', 'getClipboardContents'],
+ stdout=subprocess.PIPE, close_fds=True)
+ stdout, stderr = p.communicate()
+
+ # Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
+ # TODO: https://github.com/asweigart/pyperclip/issues/43
+ clipboardContents = stdout.decode('utf-8')
+ # even if blank, Klipper will append a newline at the end
+ assert len(clipboardContents) > 0
+ # make sure that newline is there
+ assert clipboardContents.endswith('\n')
+ if clipboardContents.endswith('\n'):
+ clipboardContents = clipboardContents[:-1]
+ return clipboardContents
+
+ return copy_klipper, paste_klipper
+
+
+def init_no_clipboard():
+ class ClipboardUnavailable(object):
+ def __call__(self, *args, **kwargs):
+ raise PyperclipException(EXCEPT_MSG)
+
+ if PY2:
+ def __nonzero__(self):
+ return False
+ else:
+ def __bool__(self):
+ return False
+
+ return ClipboardUnavailable(), ClipboardUnavailable()
diff --git a/pandas/util/clipboard/exceptions.py b/pandas/util/clipboard/exceptions.py
new file mode 100644
index 0000000000000..615335f3a58da
--- /dev/null
+++ b/pandas/util/clipboard/exceptions.py
@@ -0,0 +1,12 @@
+# flake8: noqa
+import ctypes
+
+
+class PyperclipException(RuntimeError):
+ pass
+
+
+class PyperclipWindowsException(PyperclipException):
+ def __init__(self, message):
+ message += " (%s)" % ctypes.WinError()
+ super(PyperclipWindowsException, self).__init__(message)
diff --git a/pandas/util/clipboard/windows.py b/pandas/util/clipboard/windows.py
new file mode 100644
index 0000000000000..956d5b9d34025
--- /dev/null
+++ b/pandas/util/clipboard/windows.py
@@ -0,0 +1,152 @@
+# flake8: noqa
+"""
+This module implements clipboard handling on Windows using ctypes.
+"""
+import time
+import contextlib
+import ctypes
+from ctypes import c_size_t, sizeof, c_wchar_p, get_errno, c_wchar
+from .exceptions import PyperclipWindowsException
+
+
+class CheckedCall(object):
+ def __init__(self, f):
+ super(CheckedCall, self).__setattr__("f", f)
+
+ def __call__(self, *args):
+ ret = self.f(*args)
+ if not ret and get_errno():
+ raise PyperclipWindowsException("Error calling " + self.f.__name__)
+ return ret
+
+ def __setattr__(self, key, value):
+ setattr(self.f, key, value)
+
+
+def init_windows_clipboard():
+ from ctypes.wintypes import (HGLOBAL, LPVOID, DWORD, LPCSTR, INT, HWND,
+ HINSTANCE, HMENU, BOOL, UINT, HANDLE)
+
+ windll = ctypes.windll
+
+ safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
+ safeCreateWindowExA.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, INT, INT,
+ INT, INT, HWND, HMENU, HINSTANCE, LPVOID]
+ safeCreateWindowExA.restype = HWND
+
+ safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
+ safeDestroyWindow.argtypes = [HWND]
+ safeDestroyWindow.restype = BOOL
+
+ OpenClipboard = windll.user32.OpenClipboard
+ OpenClipboard.argtypes = [HWND]
+ OpenClipboard.restype = BOOL
+
+ safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
+ safeCloseClipboard.argtypes = []
+ safeCloseClipboard.restype = BOOL
+
+ safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
+ safeEmptyClipboard.argtypes = []
+ safeEmptyClipboard.restype = BOOL
+
+ safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
+ safeGetClipboardData.argtypes = [UINT]
+ safeGetClipboardData.restype = HANDLE
+
+ safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
+ safeSetClipboardData.argtypes = [UINT, HANDLE]
+ safeSetClipboardData.restype = HANDLE
+
+ safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
+ safeGlobalAlloc.argtypes = [UINT, c_size_t]
+ safeGlobalAlloc.restype = HGLOBAL
+
+ safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
+ safeGlobalLock.argtypes = [HGLOBAL]
+ safeGlobalLock.restype = LPVOID
+
+ safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
+ safeGlobalUnlock.argtypes = [HGLOBAL]
+ safeGlobalUnlock.restype = BOOL
+
+ GMEM_MOVEABLE = 0x0002
+ CF_UNICODETEXT = 13
+
+ @contextlib.contextmanager
+ def window():
+ """
+ Context that provides a valid Windows hwnd.
+ """
+ # we really just need the hwnd, so setting "STATIC"
+ # as predefined lpClass is just fine.
+ hwnd = safeCreateWindowExA(0, b"STATIC", None, 0, 0, 0, 0, 0,
+ None, None, None, None)
+ try:
+ yield hwnd
+ finally:
+ safeDestroyWindow(hwnd)
+
+ @contextlib.contextmanager
+ def clipboard(hwnd):
+ """
+ Context manager that opens the clipboard and prevents
+ other applications from modifying the clipboard content.
+ """
+ # We may not get the clipboard handle immediately because
+ # some other application is accessing it (?)
+ # We try for at least 500ms to get the clipboard.
+ t = time.time() + 0.5
+ success = False
+ while time.time() < t:
+ success = OpenClipboard(hwnd)
+ if success:
+ break
+ time.sleep(0.01)
+ if not success:
+ raise PyperclipWindowsException("Error calling OpenClipboard")
+
+ try:
+ yield
+ finally:
+ safeCloseClipboard()
+
+ def copy_windows(text):
+ # This function is heavily based on
+ # http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
+ with window() as hwnd:
+ # http://msdn.com/ms649048
+ # If an application calls OpenClipboard with hwnd set to NULL,
+ # EmptyClipboard sets the clipboard owner to NULL;
+ # this causes SetClipboardData to fail.
+ # => We need a valid hwnd to copy something.
+ with clipboard(hwnd):
+ safeEmptyClipboard()
+
+ if text:
+ # http://msdn.com/ms649051
+ # If the hMem parameter identifies a memory object,
+ # the object must have been allocated using the
+ # function with the GMEM_MOVEABLE flag.
+ count = len(text) + 1
+ handle = safeGlobalAlloc(GMEM_MOVEABLE,
+ count * sizeof(c_wchar))
+ locked_handle = safeGlobalLock(handle)
+
+ ctypes.memmove(c_wchar_p(locked_handle), c_wchar_p(text), count * sizeof(c_wchar))
+
+ safeGlobalUnlock(handle)
+ safeSetClipboardData(CF_UNICODETEXT, handle)
+
+ def paste_windows():
+ with clipboard(None):
+ handle = safeGetClipboardData(CF_UNICODETEXT)
+ if not handle:
+ # GetClipboardData may return NULL with errno == NO_ERROR
+ # if the clipboard is empty.
+ # (Also, it may return a handle to an empty buffer,
+ # but technically that's not empty)
+ return ""
+ return c_wchar_p(handle).value
+
+ return copy_windows, paste_windows
diff --git a/pandas/util/move.c b/pandas/util/move.c
index 68fcad793e16c..9a8af5bbfbdf6 100644
--- a/pandas/util/move.c
+++ b/pandas/util/move.c
@@ -7,6 +7,9 @@
#define PyString_CheckExact PyBytes_CheckExact
#define PyString_AS_STRING PyBytes_AS_STRING
#define PyString_GET_SIZE PyBytes_GET_SIZE
+
+/* in python 3, we cannot intern bytes objects so this is always false */
+#define PyString_CHECK_INTERNED(cs) 0
#endif /* !COMPILING_IN_PY2 */
#ifndef Py_TPFLAGS_HAVE_GETCHARBUFFER
@@ -85,53 +88,37 @@ PyBufferProcs stolenbuf_as_buffer = {
#endif /* COMPILING_IN_PY2 */
-static PyObject *
-stolenbuf_new(PyObject *self, PyObject *args, PyObject *kwargs)
-{
- stolenbufobject *ret;
- PyObject *bytes_rvalue;
-
- if (kwargs && PyDict_Size(kwargs)) {
- PyErr_SetString(PyExc_TypeError,
- "stolenbuf does not accept keyword arguments");
- return NULL;
- }
-
- if (PyTuple_GET_SIZE(args) != 1) {
- PyErr_SetString(PyExc_TypeError,
- "stolenbuf requires exactly 1 positional argument");
- return NULL;
-
- }
-
- /* pull out the single, positional argument */
- bytes_rvalue = PyTuple_GET_ITEM(args, 0);
-
- if (!PyString_CheckExact(bytes_rvalue)) {
- PyErr_SetString(PyExc_TypeError,
- "stolenbuf can only steal from bytes objects");
- return NULL;
- }
-
- if (Py_REFCNT(bytes_rvalue) != 1) {
- /* there is a reference other than the caller's stack */
- PyErr_SetObject(badmove, bytes_rvalue);
- return NULL;
- }
-
- if (!(ret = PyObject_New(stolenbufobject, &stolenbuf_type))) {
- return NULL;
- }
+PyDoc_STRVAR(stolenbuf_doc,
+ "A buffer that is wrapping a stolen bytes object's buffer.");
- /* store the original bytes object in a field that is not
- exposed to python */
- Py_INCREF(bytes_rvalue);
- ret->invalid_bytes = bytes_rvalue;
- return (PyObject*) ret;
-}
+PyTypeObject stolenbuf_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pandas.util._move.stolenbuf", /* tp_name */
+ sizeof(stolenbufobject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor) stolenbuf_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_reserved */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ &stolenbuf_as_buffer, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT |
+ Py_TPFLAGS_HAVE_NEWBUFFER |
+ Py_TPFLAGS_HAVE_GETCHARBUFFER, /* tp_flags */
+ stolenbuf_doc, /* tp_doc */
+};
PyDoc_STRVAR(
- stolenbuf_doc,
+ move_into_mutable_buffer_doc,
"Moves a bytes object that is about to be destroyed into a mutable buffer\n"
"without copying the data.\n"
"\n"
@@ -155,49 +142,55 @@ PyDoc_STRVAR(
"\n"
"Notes\n"
"-----\n"
- "If you want to use this function you are probably wrong.\n");
+ "If you want to use this function you are probably wrong.\n"
+ "\n"
+ "Warning: Do not call this function through *unpacking. This can\n"
+ "potentially trick the reference checks which may allow you to get a\n"
+ "mutable reference to a shared string!\n"
+ "\n");
+
+/* This is implemented as a standalone function instead of the ``tp_new`` of
+ ``stolenbuf`` because we need to create a function using the METH_O flag
+ to support Python 3.6. In python 3.6, PyCFunction calls from python code now
+ count the reference owned by the argument tuple. This would cause the object
+ to have 2 references if used with a direct call like: ``stolenbuf(a)``;
+ however, if called through *unpacking like ``stolenbuf(*(a,))`` it would
+ only have the one reference (the tuple). */
+static PyObject*
+move_into_mutable_buffer(PyObject *self, PyObject *bytes_rvalue)
+{
+ stolenbufobject *ret;
-PyTypeObject stolenbuf_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pandas.util._move.stolenbuf", /* tp_name */
- sizeof(stolenbufobject), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor) stolenbuf_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- &stolenbuf_as_buffer, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT |
- Py_TPFLAGS_HAVE_NEWBUFFER |
- Py_TPFLAGS_HAVE_GETCHARBUFFER, /* tp_flags */
- stolenbuf_doc, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- (newfunc) stolenbuf_new, /* tp_new */
+ if (!PyString_CheckExact(bytes_rvalue)) {
+ PyErr_SetString(PyExc_TypeError,
+ "stolenbuf can only steal from bytes objects");
+ return NULL;
+ }
+
+ if (Py_REFCNT(bytes_rvalue) != 1 || PyString_CHECK_INTERNED(bytes_rvalue)) {
+ /* there is a reference other than the caller's stack or the string is
+ interned */
+ PyErr_SetObject(badmove, bytes_rvalue);
+ return NULL;
+ }
+
+ if (!(ret = PyObject_New(stolenbufobject, &stolenbuf_type))) {
+ return NULL;
+ }
+
+ /* store the original bytes object in a field that is not
+ exposed to python */
+ Py_INCREF(bytes_rvalue);
+ ret->invalid_bytes = bytes_rvalue;
+ return (PyObject*) ret;
+}
+
+PyMethodDef methods[] = {
+ {"move_into_mutable_buffer",
+ (PyCFunction) move_into_mutable_buffer,
+ METH_O,
+ move_into_mutable_buffer_doc},
+ {NULL},
};
#define MODULE_NAME "pandas.util._move"
@@ -208,6 +201,7 @@ PyModuleDef _move_module = {
MODULE_NAME,
NULL,
-1,
+ methods,
};
#endif /* !COMPILING_IN_PY2 */
@@ -219,7 +213,7 @@ PyDoc_STRVAR(
"Parameters\n"
"----------\n"
"data : any\n"
- " The data which was passed to ``_move_into_mutable_buffer``.\n"
+ " The data which was passed to ``move_into_mutable_buffer``.\n"
"\n"
"See Also\n"
"--------\n"
@@ -250,14 +244,14 @@ init_move(void)
#if !COMPILING_IN_PY2
if (!(m = PyModule_Create(&_move_module)))
#else
- if (!(m = Py_InitModule(MODULE_NAME, NULL)))
+ if (!(m = Py_InitModule(MODULE_NAME, methods)))
#endif /* !COMPILING_IN_PY2 */
{
return ERROR_RETURN;
}
if (PyModule_AddObject(m,
- "move_into_mutable_buffer",
+ "stolenbuf",
(PyObject*) &stolenbuf_type)) {
Py_DECREF(m);
return ERROR_RETURN;
diff --git a/setup.py b/setup.py
index 846e2b7fa2d88..2bef65c9719dc 100755
--- a/setup.py
+++ b/setup.py
@@ -85,7 +85,11 @@ def is_platform_mac():
try:
if not _CYTHON_INSTALLED:
raise ImportError('No supported version of Cython installed.')
- from Cython.Distutils import build_ext as _build_ext
+ try:
+ from Cython.Distutils.old_build_ext import old_build_ext as _build_ext
+ except ImportError:
+ # Pre 0.25
+ from Cython.Distutils import build_ext as _build_ext
cython = True
except ImportError:
cython = False
@@ -125,25 +129,25 @@ def is_platform_mac():
class build_ext(_build_ext):
def build_extensions(self):
- if not cython:
- raise ImportError('Building pandas requires cython')
-
- for pxifile in _pxifiles:
- # build pxifiles first, template extention must be .pxi.in
- assert pxifile.endswith('.pxi.in')
- outfile = pxifile[:-3]
-
- if (os.path.exists(outfile) and
- os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime):
- # if .pxi.in is not updated, no need to output .pxi
- continue
+ # if builing from c files, don't need to
+ # generate template output
+ if cython:
+ for pxifile in _pxifiles:
+ # build pxifiles first, template extention must be .pxi.in
+ assert pxifile.endswith('.pxi.in')
+ outfile = pxifile[:-3]
+
+ if (os.path.exists(outfile) and
+ os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime):
+ # if .pxi.in is not updated, no need to output .pxi
+ continue
- with open(pxifile, "r") as f:
- tmpl = f.read()
- pyxcontent = tempita.sub(tmpl)
+ with open(pxifile, "r") as f:
+ tmpl = f.read()
+ pyxcontent = tempita.sub(tmpl)
- with open(outfile, "w") as f:
- f.write(pyxcontent)
+ with open(outfile, "w") as f:
+ f.write(pyxcontent)
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
@@ -240,6 +244,7 @@ def build_extensions(self):
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering',
]
@@ -288,6 +293,11 @@ def initialize_options(self):
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
+ # clean the generated pxi files
+ for pxifile in _pxifiles:
+ pxifile = pxifile.replace(".pxi.in", ".pxi")
+ self._clean_me.append(pxifile)
+
for d in ('build', 'dist'):
if os.path.exists(d):
self._clean_trees.append(d)
@@ -326,6 +336,7 @@ class CheckSDist(sdist_class):
'pandas/src/period.pyx',
'pandas/src/sparse.pyx',
'pandas/src/testing.pyx',
+ 'pandas/src/hash.pyx',
'pandas/io/sas/saslib.pyx']
def initialize_options(self):
@@ -450,7 +461,8 @@ def pxd(name):
tseries_depends = ['pandas/src/datetime/np_datetime.h',
'pandas/src/datetime/np_datetime_strings.h',
- 'pandas/src/period_helper.h']
+ 'pandas/src/period_helper.h',
+ 'pandas/src/datetime.pxd']
# some linux distros require it
@@ -476,7 +488,8 @@ def pxd(name):
'pandas/src/period_helper.c']},
index={'pyxfile': 'index',
'sources': ['pandas/src/datetime/np_datetime.c',
- 'pandas/src/datetime/np_datetime_strings.c']},
+ 'pandas/src/datetime/np_datetime_strings.c'],
+ 'pxdfiles': ['src/util']},
algos={'pyxfile': 'algos',
'pxdfiles': ['src/util'],
'depends': _pxi_dep['algos']},
@@ -494,10 +507,12 @@ def pxd(name):
'sources': ['pandas/src/parser/tokenizer.c',
'pandas/src/parser/io.c']},
_sparse={'pyxfile': 'src/sparse',
- 'depends': ([srcpath('sparse', suffix='.pyx')]
- + _pxi_dep['_sparse'])},
+ 'depends': ([srcpath('sparse', suffix='.pyx')] +
+ _pxi_dep['_sparse'])},
_testing={'pyxfile': 'src/testing',
'depends': [srcpath('testing', suffix='.pyx')]},
+ _hash={'pyxfile': 'src/hash',
+ 'depends': [srcpath('hash', suffix='.pyx')]},
)
ext_data["io.sas.saslib"] = {'pyxfile': 'io/sas/saslib'}
@@ -637,7 +652,8 @@ def pxd(name):
'pandas.io.tests.parser',
'pandas.io.tests.sas',
'pandas.stats.tests',
- 'pandas.msgpack'
+ 'pandas.msgpack',
+ 'pandas.util.clipboard'
],
package_data={'pandas.io': ['tests/data/legacy_hdf/*.h5',
'tests/data/legacy_pickle/*/*.pickle',
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15648 | 2017-03-10T16:56:07Z | 2017-03-10T21:22:34Z | null | 2017-03-10T21:22:35Z |
DOC: doc warnings | diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index 67072ff9fb224..0bfb2b635f53a 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -116,7 +116,6 @@ See the package overview for more detail about what's in the library.
whatsnew
install
contributing
- faq
overview
10min
tutorials
diff --git a/doc/source/install.rst b/doc/source/install.rst
index fe2a9fa4ba509..578caae605471 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -260,7 +260,7 @@ Optional Dependencies
<http://www.vergenet.net/~conrad/software/xsel/>`__, or `xclip
<https://github.com/astrand/xclip/>`__: necessary to use
:func:`~pandas.read_clipboard`. Most package managers on Linux distributions will have ``xclip`` and/or ``xsel`` immediately available for installation.
-* For Google BigQuery I/O - see :ref:`here <io.bigquery_deps>`.
+* For Google BigQuery I/O - see `here <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__
* `Backports.lzma <https://pypi.python.org/pypi/backports.lzma/>`__: Only for Python 2, for writing to and/or reading from an xz compressed DataFrame in CSV; Python 3 support is built into the standard library.
* One of the following combinations of libraries is needed to use the
diff --git a/doc/source/whatsnew/v0.10.0.txt b/doc/source/whatsnew/v0.10.0.txt
index fed3ba3ce3a84..cf5369466308c 100644
--- a/doc/source/whatsnew/v0.10.0.txt
+++ b/doc/source/whatsnew/v0.10.0.txt
@@ -303,11 +303,10 @@ Updated PyTables Support
store.append('wp',wp)
# selecting via A QUERY
- store.select('wp',
- [ Term('major_axis>20000102'), Term('minor_axis', '=', ['A','B']) ])
+ store.select('wp', "major_axis>20000102 and minor_axis=['A','B']")
# removing data from tables
- store.remove('wp', Term('major_axis>20000103'))
+ store.remove('wp', "major_axis>20000103")
store.select('wp')
# deleting a store
diff --git a/doc/source/whatsnew/v0.10.1.txt b/doc/source/whatsnew/v0.10.1.txt
index edc628fe85027..d5880e44e46c6 100644
--- a/doc/source/whatsnew/v0.10.1.txt
+++ b/doc/source/whatsnew/v0.10.1.txt
@@ -58,7 +58,7 @@ perform queries on a table, by passing a list to ``data_columns``
# on-disk operations
store.append('df', df, data_columns = ['B','C','string','string2'])
- store.select('df',[ 'B > 0', 'string == foo' ])
+ store.select('df', "B>0 and string=='foo'")
# this is in-memory version of this type of selection
df[(df.B > 0) & (df.string == 'foo')]
@@ -110,7 +110,7 @@ columns, this is equivalent to passing a
store.select('mi')
# the levels are automatically included as data columns
- store.select('mi', Term('foo=bar'))
+ store.select('mi', "foo='bar'")
Multi-table creation via ``append_to_multiple`` and selection via
``select_as_multiple`` can create/select from multiple tables and return a
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 9cb299593076d..a3bbaf73c01ca 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -329,7 +329,7 @@ has been changed to make this keyword unnecessary - the change is shown below.
Google BigQuery Enhancements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Added ability to automatically create a table/dataset using the :func:`pandas.io.gbq.to_gbq` function if the destination table/dataset does not exist. (:issue:`8325`, :issue:`11121`).
-- Added ability to replace an existing table and schema when calling the :func:`pandas.io.gbq.to_gbq` function via the ``if_exists`` argument. See the :ref:`docs <io.bigquery_writer>` for more details (:issue:`8325`).
+- Added ability to replace an existing table and schema when calling the :func:`pandas.io.gbq.to_gbq` function via the ``if_exists`` argument. See the `docs <https://pandas-gbq.readthedocs.io/en/latest/writing.html>`__ for more details (:issue:`8325`).
- ``InvalidColumnOrder`` and ``InvalidPageToken`` in the gbq module will raise ``ValueError`` instead of ``IOError``.
- The ``generate_bq_schema()`` function is now deprecated and will be removed in a future version (:issue:`11121`)
- The gbq module will now support Python 3 (:issue:`11094`).
diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 893922b719b34..4b27cf706f9b2 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -518,7 +518,7 @@ Other enhancements
- Added ``DataFrame.style.format`` for more flexible formatting of cell values (:issue:`11692`)
- ``DataFrame.select_dtypes`` now allows the ``np.float16`` typecode (:issue:`11990`)
- ``pivot_table()`` now accepts most iterables for the ``values`` parameter (:issue:`12017`)
-- Added Google ``BigQuery`` service account authentication support, which enables authentication on remote servers. (:issue:`11881`, :issue:`12572`). For further details see :ref:`here <io.bigquery_authentication>`
+- Added Google ``BigQuery`` service account authentication support, which enables authentication on remote servers. (:issue:`11881`, :issue:`12572`). For further details see `here <https://pandas-gbq.readthedocs.io/en/latest/intro.html>`__
- ``HDFStore`` is now iterable: ``for k in store`` is equivalent to ``for k in store.keys()`` (:issue:`12221`).
- Add missing methods/fields to ``.dt`` for ``Period`` (:issue:`8848`)
- The entire codebase has been ``PEP``-ified (:issue:`12096`)
diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 8e7e95c071ea4..9b003034aa94a 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -377,7 +377,7 @@ For ``MultiIndex``, values are dropped if any level is missing by default. Speci
Google BigQuery Enhancements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- The :func:`read_gbq` method has gained the ``dialect`` argument to allow users to specify whether to use BigQuery's legacy SQL or BigQuery's standard SQL. See the :ref:`docs <io.bigquery_reader>` for more details (:issue:`13615`).
+- The :func:`read_gbq` method has gained the ``dialect`` argument to allow users to specify whether to use BigQuery's legacy SQL or BigQuery's standard SQL. See the `docs <https://pandas-gbq.readthedocs.io/en/latest/reading.html>`__ for more details (:issue:`13615`).
- The :func:`~DataFrame.to_gbq` method now allows the DataFrame column order to differ from the destination table schema (:issue:`11359`).
.. _whatsnew_0190.errstate:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 47aa4450b897f..7b24264cd09db 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -203,7 +203,7 @@ New Behavior:
df[df.chromosomes != '1'].groupby('chromosomes', sort=False).sum()
-.. _whatsnew_0200.enhancements.table_schema
+.. _whatsnew_0200.enhancements.table_schema:
Table Schema Output
^^^^^^^^^^^^^^^^^^^
@@ -337,7 +337,7 @@ Using ``.iloc``. Here we will get the location of the 'A' column, then use *posi
df.iloc[[0, 2], df.columns.get_loc('A')]
-.. _whatsnew.api_breaking.io_compat
+.. _whatsnew.api_breaking.io_compat:
Possible incompat for HDF5 formats for pandas < 0.13.0
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -758,7 +758,7 @@ Bug Fixes
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
-- Bug in ``.asfreq()``, where frequency was not set for empty ``Series` (:issue:`14320`)
+- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
- Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`)
- Bug in ``Series.ffill()`` with mixed dtypes containing tz-aware datetimes. (:issue:`14956`)
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index 9cfb27a92bfef..b4dc9173f11ba 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -14,8 +14,9 @@ def _try_import():
"the pandas-gbq package is not installed\n"
"see the docs: https://pandas-gbq.readthedocs.io\n"
"\n"
- "you can install via:\n"
- "pip install pandas-gbq\n")
+ "you can install via pip or conda:\n"
+ "pip install pandas-gbq\n"
+ "conda install pandas-gbq -c conda-forge\n")
return pandas_gbq
| https://api.github.com/repos/pandas-dev/pandas/pulls/15647 | 2017-03-10T11:47:29Z | 2017-03-10T14:25:06Z | null | 2017-03-10T23:36:57Z | |
DOC GH15643 Removed pytest-xdist from requirements_dev.txt file | diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt
index b0a8adc8df5cb..1e051802ec9f8 100644
--- a/ci/requirements_dev.txt
+++ b/ci/requirements_dev.txt
@@ -4,5 +4,4 @@ numpy
cython
pytest
pytest-cov
-pytest-xdist
flake8
| - [ x] closes #15643
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15646 | 2017-03-10T10:30:48Z | 2017-03-10T12:07:11Z | 2017-03-10T12:07:10Z | 2017-03-13T06:42:03Z |
remove useless semicolons | diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index e8998bf6f6f5c..4a220193f6da0 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -131,7 +131,7 @@ For example, a bar plot can be created the following way:
.. ipython:: python
- plt.figure();
+ plt.figure()
@savefig bar_plot_ex.png
df.iloc[5].plot(kind='bar'); plt.axhline(0, color='k')
@@ -176,7 +176,7 @@ For labeled, non-time series data, you may wish to produce a bar plot:
.. ipython:: python
- plt.figure();
+ plt.figure()
@savefig bar_plot_ex.png
df.iloc[5].plot.bar(); plt.axhline(0, color='k')
@@ -196,7 +196,7 @@ bar plot:
df2 = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
@savefig bar_plot_multi_ex.png
- df2.plot.bar();
+ df2.plot.bar()
To produce a stacked bar plot, pass ``stacked=True``:
@@ -209,7 +209,7 @@ To produce a stacked bar plot, pass ``stacked=True``:
.. ipython:: python
@savefig bar_plot_stacked_ex.png
- df2.plot.bar(stacked=True);
+ df2.plot.bar(stacked=True)
To get horizontal bar plots, use the ``barh`` method:
@@ -222,7 +222,7 @@ To get horizontal bar plots, use the ``barh`` method:
.. ipython:: python
@savefig barh_plot_stacked_ex.png
- df2.plot.barh(stacked=True);
+ df2.plot.barh(stacked=True)
.. _visualization.hist:
@@ -238,7 +238,7 @@ Histogram can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Serie
df4 = pd.DataFrame({'a': np.random.randn(1000) + 1, 'b': np.random.randn(1000),
'c': np.random.randn(1000) - 1}, columns=['a', 'b', 'c'])
- plt.figure();
+ plt.figure()
@savefig hist_new.png
df4.plot.hist(alpha=0.5)
@@ -253,7 +253,7 @@ Histogram can be stacked by ``stacked=True``. Bin size can be changed by ``bins`
.. ipython:: python
- plt.figure();
+ plt.figure()
@savefig hist_new_stacked.png
df4.plot.hist(stacked=True, bins=20)
@@ -267,7 +267,7 @@ You can pass other keywords supported by matplotlib ``hist``. For example, horiz
.. ipython:: python
- plt.figure();
+ plt.figure()
@savefig hist_new_kwargs.png
df4['a'].plot.hist(orientation='horizontal', cumulative=True)
@@ -285,7 +285,7 @@ The existing interface ``DataFrame.hist`` to plot histogram still can be used.
.. ipython:: python
- plt.figure();
+ plt.figure()
@savefig hist_plot_ex.png
df['A'].diff().hist()
@@ -403,7 +403,7 @@ The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used.
:okwarning:
df = pd.DataFrame(np.random.rand(10,5))
- plt.figure();
+ plt.figure()
@savefig box_plot_ex.png
bp = df.boxplot()
@@ -423,7 +423,7 @@ groupings. For instance,
df = pd.DataFrame(np.random.rand(10,2), columns=['Col1', 'Col2'] )
df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B'])
- plt.figure();
+ plt.figure()
@savefig box_plot_ex2.png
bp = df.boxplot(by='X')
@@ -444,7 +444,7 @@ columns:
df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B'])
df['Y'] = pd.Series(['A','B','A','B','A','B','A','B','A','B'])
- plt.figure();
+ plt.figure()
@savefig box_plot_ex3.png
bp = df.boxplot(column=['Col1','Col2'], by=['X','Y'])
@@ -532,7 +532,7 @@ When input data contains `NaN`, it will be automatically filled by 0. If you wan
df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
@savefig area_plot_stacked.png
- df.plot.area();
+ df.plot.area()
To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5 unless otherwise specified:
@@ -545,7 +545,7 @@ To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5
.. ipython:: python
@savefig area_plot_unstacked.png
- df.plot.area(stacked=False);
+ df.plot.area(stacked=False)
.. _visualization.scatter:
@@ -570,16 +570,16 @@ These can be specified by ``x`` and ``y`` keywords each.
df = pd.DataFrame(np.random.rand(50, 4), columns=['a', 'b', 'c', 'd'])
@savefig scatter_plot.png
- df.plot.scatter(x='a', y='b');
+ df.plot.scatter(x='a', y='b')
To plot multiple column groups in a single axes, repeat ``plot`` method specifying target ``ax``.
It is recommended to specify ``color`` and ``label`` keywords to distinguish each groups.
.. ipython:: python
- ax = df.plot.scatter(x='a', y='b', color='DarkBlue', label='Group 1');
+ ax = df.plot.scatter(x='a', y='b', color='DarkBlue', label='Group 1')
@savefig scatter_plot_repeated.png
- df.plot.scatter(x='c', y='d', color='DarkGreen', label='Group 2', ax=ax);
+ df.plot.scatter(x='c', y='d', color='DarkGreen', label='Group 2', ax=ax)
.. ipython:: python
:suppress:
@@ -592,7 +592,7 @@ each point:
.. ipython:: python
@savefig scatter_plot_colored.png
- df.plot.scatter(x='a', y='b', c='c', s=50);
+ df.plot.scatter(x='a', y='b', c='c', s=50)
.. ipython:: python
@@ -606,7 +606,7 @@ Below example shows a bubble chart using a dataframe column values as bubble siz
.. ipython:: python
@savefig scatter_plot_bubble.png
- df.plot.scatter(x='a', y='b', s=df['c']*200);
+ df.plot.scatter(x='a', y='b', s=df['c']*200)
.. ipython:: python
:suppress:
@@ -1073,7 +1073,7 @@ layout and formatting of the returned plot:
.. ipython:: python
@savefig series_plot_basic2.png
- plt.figure(); ts.plot(style='k--', label='Series');
+ plt.figure(); ts.plot(style='k--', label='Series')
.. ipython:: python
:suppress:
@@ -1266,7 +1266,7 @@ with the ``subplots`` keyword:
.. ipython:: python
@savefig frame_plot_subplots.png
- df.plot(subplots=True, figsize=(6, 6));
+ df.plot(subplots=True, figsize=(6, 6))
.. ipython:: python
:suppress:
@@ -1289,7 +1289,7 @@ or columns needed, given the other.
.. ipython:: python
@savefig frame_plot_subplots_layout.png
- df.plot(subplots=True, layout=(2, 3), figsize=(6, 6), sharex=False);
+ df.plot(subplots=True, layout=(2, 3), figsize=(6, 6), sharex=False)
.. ipython:: python
:suppress:
@@ -1300,7 +1300,7 @@ The above example is identical to using
.. ipython:: python
- df.plot(subplots=True, layout=(2, -1), figsize=(6, 6), sharex=False);
+ df.plot(subplots=True, layout=(2, -1), figsize=(6, 6), sharex=False)
.. ipython:: python
:suppress:
@@ -1320,14 +1320,14 @@ otherwise you will see a warning.
.. ipython:: python
- fig, axes = plt.subplots(4, 4, figsize=(6, 6));
- plt.subplots_adjust(wspace=0.5, hspace=0.5);
+ fig, axes = plt.subplots(4, 4, figsize=(6, 6))
+ plt.subplots_adjust(wspace=0.5, hspace=0.5)
target1 = [axes[0][0], axes[1][1], axes[2][2], axes[3][3]]
target2 = [axes[3][0], axes[2][1], axes[1][2], axes[0][3]]
- df.plot(subplots=True, ax=target1, legend=False, sharex=False, sharey=False);
+ df.plot(subplots=True, ax=target1, legend=False, sharex=False, sharey=False)
@savefig frame_plot_subplots_multi_ax.png
- (-df).plot(subplots=True, ax=target2, legend=False, sharex=False, sharey=False);
+ (-df).plot(subplots=True, ax=target2, legend=False, sharex=False, sharey=False)
.. ipython:: python
:suppress:
@@ -1354,12 +1354,12 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a
.. ipython:: python
fig, axes = plt.subplots(nrows=2, ncols=2)
- df['A'].plot(ax=axes[0,0]); axes[0,0].set_title('A');
- df['B'].plot(ax=axes[0,1]); axes[0,1].set_title('B');
- df['C'].plot(ax=axes[1,0]); axes[1,0].set_title('C');
+ df['A'].plot(ax=axes[0,0]); axes[0,0].set_title('A')
+ df['B'].plot(ax=axes[0,1]); axes[0,1].set_title('B')
+ df['C'].plot(ax=axes[1,0]); axes[1,0].set_title('C')
@savefig series_plot_multi.png
- df['D'].plot(ax=axes[1,1]); axes[1,1].set_title('D');
+ df['D'].plot(ax=axes[1,1]); axes[1,1].set_title('D')
.. ipython:: python
:suppress:
| Some semicolons were used without reason.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15645 | 2017-03-10T09:47:25Z | 2017-03-10T10:18:56Z | null | 2017-03-10T10:49:08Z |
BUG: Incorrect value updating for groupby.cummin/max (#15635) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index cf3dddc3a2933..47aa4450b897f 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -716,7 +716,7 @@ Performance Improvements
- Increased performance of ``pd.factorize()`` by releasing the GIL with ``object`` dtype when inferred as strings (:issue:`14859`)
- Improved performance of timeseries plotting with an irregular DatetimeIndex
(or with ``compat_x=True``) (:issue:`15073`).
-- Improved performance of ``groupby().cummin()`` and ``groupby().cummax()`` (:issue:`15048`, :issue:`15109`, :issue:`15561`)
+- Improved performance of ``groupby().cummin()`` and ``groupby().cummax()`` (:issue:`15048`, :issue:`15109`, :issue:`15561`, :issue:`15635`)
- Improved performance and reduced memory when indexing with a ``MultiIndex`` (:issue:`15245`)
- When reading buffer object in ``read_sas()`` method without specified format, filepath string is inferred rather than buffer object. (:issue:`14947`)
- Improved performance of ``.rank()`` for categorical data (:issue:`15498`)
diff --git a/pandas/_libs/algos_groupby_helper.pxi.in b/pandas/_libs/algos_groupby_helper.pxi.in
index 9552b4299fe6a..c86a4b4275a0d 100644
--- a/pandas/_libs/algos_groupby_helper.pxi.in
+++ b/pandas/_libs/algos_groupby_helper.pxi.in
@@ -603,7 +603,7 @@ def group_cummin_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
"""
cdef:
Py_ssize_t i, j, N, K, size
- {{dest_type2}} val, min_val = 0
+ {{dest_type2}} val
ndarray[{{dest_type2}}, ndim=2] accum
int64_t lab
@@ -629,8 +629,7 @@ def group_cummin_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
if val == val:
{{endif}}
if val < accum[lab, j]:
- min_val = val
- accum[lab, j] = min_val
+ accum[lab, j] = val
out[i, j] = accum[lab, j]
@@ -645,7 +644,7 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
"""
cdef:
Py_ssize_t i, j, N, K, size
- {{dest_type2}} val, max_val = 0
+ {{dest_type2}} val
ndarray[{{dest_type2}}, ndim=2] accum
int64_t lab
@@ -670,8 +669,7 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
if val == val:
{{endif}}
if val > accum[lab, j]:
- max_val = val
- accum[lab, j] = max_val
+ accum[lab, j] = val
out[i, j] = accum[lab, j]
{{endfor}}
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index e846963732883..d7fa3beda0abf 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -4303,6 +4303,17 @@ def test_cummin_cummax(self):
result = getattr(df.groupby('a')['b'], method)()
tm.assert_series_equal(expected, result)
+ # GH 15635
+ df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
+ result = df.groupby('a').b.cummax()
+ expected = pd.Series([2, 1, 2], name='b')
+ tm.assert_series_equal(result, expected)
+
+ df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
+ result = df.groupby('a').b.cummin()
+ expected = pd.Series([1, 2, 1], name='b')
+ tm.assert_series_equal(result, expected)
+
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
| - [x] closes #15635
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
Nice catch @adbull. The original implementation was incorrectly setting the incorrect cummin/max value. | https://api.github.com/repos/pandas-dev/pandas/pulls/15642 | 2017-03-10T05:47:17Z | 2017-03-10T11:40:10Z | null | 2017-12-20T02:00:54Z |
DOC: increase recursion limit on sphinx builds | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 6840f76866d2c..0b0de16411e9b 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -16,6 +16,14 @@
import inspect
from pandas.compat import u, PY3
+# https://github.com/sphinx-doc/sphinx/pull/2325/files
+# Workaround for sphinx-build recursion limit overflow:
+# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
+# RuntimeError: maximum recursion depth exceeded while pickling an object
+#
+# Python's default allowed recursion depth is 1000.
+sys.setrecursionlimit(5000)
+
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
| xref https://github.com/pandas-dev/pandas/pull/15637#issuecomment-285558452
hopefully should eliminate the non-deterministic pickle errors in doc-building :< | https://api.github.com/repos/pandas-dev/pandas/pulls/15641 | 2017-03-10T03:05:27Z | 2017-03-10T08:24:37Z | 2017-03-10T08:24:37Z | 2017-03-10T08:26:11Z |
BUG: Dense ranking with percent now uses 100% basis | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 233816600ec0f..bb513605b1c94 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -907,6 +907,7 @@ Offsets
Numeric
^^^^^^^
+- Bug in :meth:`DataFrame.rank` and :meth:`Series.rank` when ``method='dense'`` and ``pct=True`` in which percentile ranks were not being used with the number of distinct observations (:issue:`15630`)
- Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`)
- Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`)
- Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`)
diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in
index 2f40bd4349a2e..9348d7525c307 100644
--- a/pandas/_libs/algos_rank_helper.pxi.in
+++ b/pandas/_libs/algos_rank_helper.pxi.in
@@ -213,7 +213,10 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', ascending=True,
sum_ranks = dups = 0
{{endif}}
if pct:
- return ranks / count
+ if tiebreak == TIEBREAK_DENSE:
+ return ranks / total_tie_count
+ else:
+ return ranks / count
else:
return ranks
@@ -385,7 +388,10 @@ def rank_2d_{{dtype}}(object in_arr, axis=0, ties_method='average',
ranks[i, argsorted[i, z]] = total_tie_count
sum_ranks = dups = 0
if pct:
- ranks[i, :] /= count
+ if tiebreak == TIEBREAK_DENSE:
+ ranks[i, :] /= total_tie_count
+ else:
+ ranks[i, :] /= count
if axis == 0:
return ranks.T
else:
diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py
index 02fe0edf95577..b8ba408b54715 100644
--- a/pandas/tests/frame/test_rank.py
+++ b/pandas/tests/frame/test_rank.py
@@ -1,16 +1,16 @@
# -*- coding: utf-8 -*-
import pytest
-from datetime import timedelta, datetime
-from distutils.version import LooseVersion
-from numpy import nan
import numpy as np
+import pandas.util.testing as tm
-from pandas import Series, DataFrame
+from distutils.version import LooseVersion
+from datetime import timedelta, datetime
+from numpy import nan
-from pandas.compat import product
from pandas.util.testing import assert_frame_equal
-import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
+from pandas import Series, DataFrame
+from pandas.compat import product
class TestRank(TestData):
@@ -266,3 +266,34 @@ def _check2d(df, expected, method='average', axis=0):
continue
frame = df if dtype is None else df.astype(dtype)
_check2d(frame, results[method], method=method, axis=axis)
+
+
+@pytest.mark.parametrize(
+ "method,exp", [("dense",
+ [[1., 1., 1.],
+ [1., 0.5, 2. / 3],
+ [1., 0.5, 1. / 3]]),
+ ("min",
+ [[1. / 3, 1., 1.],
+ [1. / 3, 1. / 3, 2. / 3],
+ [1. / 3, 1. / 3, 1. / 3]]),
+ ("max",
+ [[1., 1., 1.],
+ [1., 2. / 3, 2. / 3],
+ [1., 2. / 3, 1. / 3]]),
+ ("average",
+ [[2. / 3, 1., 1.],
+ [2. / 3, 0.5, 2. / 3],
+ [2. / 3, 0.5, 1. / 3]]),
+ ("first",
+ [[1. / 3, 1., 1.],
+ [2. / 3, 1. / 3, 2. / 3],
+ [3. / 3, 2. / 3, 1. / 3]])])
+def test_rank_pct_true(method, exp):
+ # see gh-15630.
+
+ df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]])
+ result = df.rank(method=method, pct=True)
+
+ expected = DataFrame(exp)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index 6220ce8ff7669..d15325ca8ef0e 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -376,3 +376,96 @@ def test_rank_modify_inplace(self):
s.rank()
result = s
assert_series_equal(result, expected)
+
+
+# GH15630, pct should be on 100% basis when method='dense'
+
+@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
+@pytest.mark.parametrize('ser, exp', [
+ ([1], [1.]),
+ ([1, 2], [1. / 2, 2. / 2]),
+ ([2, 2], [1., 1.]),
+ ([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
+ ([1, 2, 2], [1. / 2, 2. / 2, 2. / 2]),
+ ([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
+ ([1, 1, 5, 5, 3], [1. / 3, 1. / 3, 3. / 3, 3. / 3, 2. / 3]),
+ ([1, 1, 3, 3, 5, 5], [1. / 3, 1. / 3, 2. / 3, 2. / 3, 3. / 3, 3. / 3]),
+ ([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
+def test_rank_dense_pct(dtype, ser, exp):
+ s = Series(ser).astype(dtype)
+ result = s.rank(method='dense', pct=True)
+ expected = Series(exp).astype(result.dtype)
+ assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
+@pytest.mark.parametrize('ser, exp', [
+ ([1], [1.]),
+ ([1, 2], [1. / 2, 2. / 2]),
+ ([2, 2], [1. / 2, 1. / 2]),
+ ([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
+ ([1, 2, 2], [1. / 3, 2. / 3, 2. / 3]),
+ ([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
+ ([1, 1, 5, 5, 3], [1. / 5, 1. / 5, 4. / 5, 4. / 5, 3. / 5]),
+ ([1, 1, 3, 3, 5, 5], [1. / 6, 1. / 6, 3. / 6, 3. / 6, 5. / 6, 5. / 6]),
+ ([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
+def test_rank_min_pct(dtype, ser, exp):
+ s = Series(ser).astype(dtype)
+ result = s.rank(method='min', pct=True)
+ expected = Series(exp).astype(result.dtype)
+ assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
+@pytest.mark.parametrize('ser, exp', [
+ ([1], [1.]),
+ ([1, 2], [1. / 2, 2. / 2]),
+ ([2, 2], [1., 1.]),
+ ([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
+ ([1, 2, 2], [1. / 3, 3. / 3, 3. / 3]),
+ ([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
+ ([1, 1, 5, 5, 3], [2. / 5, 2. / 5, 5. / 5, 5. / 5, 3. / 5]),
+ ([1, 1, 3, 3, 5, 5], [2. / 6, 2. / 6, 4. / 6, 4. / 6, 6. / 6, 6. / 6]),
+ ([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
+def test_rank_max_pct(dtype, ser, exp):
+ s = Series(ser).astype(dtype)
+ result = s.rank(method='max', pct=True)
+ expected = Series(exp).astype(result.dtype)
+ assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
+@pytest.mark.parametrize('ser, exp', [
+ ([1], [1.]),
+ ([1, 2], [1. / 2, 2. / 2]),
+ ([2, 2], [1.5 / 2, 1.5 / 2]),
+ ([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
+ ([1, 2, 2], [1. / 3, 2.5 / 3, 2.5 / 3]),
+ ([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
+ ([1, 1, 5, 5, 3], [1.5 / 5, 1.5 / 5, 4.5 / 5, 4.5 / 5, 3. / 5]),
+ ([1, 1, 3, 3, 5, 5],
+ [1.5 / 6, 1.5 / 6, 3.5 / 6, 3.5 / 6, 5.5 / 6, 5.5 / 6]),
+ ([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
+def test_rank_average_pct(dtype, ser, exp):
+ s = Series(ser).astype(dtype)
+ result = s.rank(method='average', pct=True)
+ expected = Series(exp).astype(result.dtype)
+ assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('dtype', ['f8', 'i8'])
+@pytest.mark.parametrize('ser, exp', [
+ ([1], [1.]),
+ ([1, 2], [1. / 2, 2. / 2]),
+ ([2, 2], [1. / 2, 2. / 2.]),
+ ([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
+ ([1, 2, 2], [1. / 3, 2. / 3, 3. / 3]),
+ ([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
+ ([1, 1, 5, 5, 3], [1. / 5, 2. / 5, 4. / 5, 5. / 5, 3. / 5]),
+ ([1, 1, 3, 3, 5, 5], [1. / 6, 2. / 6, 3. / 6, 4. / 6, 5. / 6, 6. / 6]),
+ ([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
+def test_rank_first_pct(dtype, ser, exp):
+ s = Series(ser).astype(dtype)
+ result = s.rank(method='first', pct=True)
+ expected = Series(exp).astype(result.dtype)
+ assert_series_equal(result, expected)
| - `DataFrame.rank()` and `Series.rank()` when `method='dense'` and
`pct=True` now scales to 100%.
See #15630
- [ ] closes #15630
- [ ] tests added / passed
- [X] passes ``git diff upstream/master | flake8 --diff``
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15639 | 2017-03-10T01:09:51Z | 2018-03-09T00:54:54Z | 2018-03-09T00:54:54Z | 2018-03-09T00:55:02Z |
DEPR: remove more .ix warnings from tests | diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 73167393cf35d..c7637a00910c6 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -1,5 +1,262 @@
""" common utilities """
+import itertools
+from warnings import catch_warnings
+import numpy as np
+
+from pandas.compat import lrange
+from pandas.types.common import is_scalar
+from pandas import Series, DataFrame, Panel, date_range, UInt64Index
+from pandas.util import testing as tm
+from pandas.formats.printing import pprint_thing
+
+_verbose = False
+
def _mklbl(prefix, n):
return ["%s%s" % (prefix, i) for i in range(n)]
+
+
+def _axify(obj, key, axis):
+ # create a tuple accessor
+ axes = [slice(None)] * obj.ndim
+ axes[axis] = key
+ return tuple(axes)
+
+
+class Base(object):
+ """ indexing comprehensive base class """
+
+ _objs = set(['series', 'frame', 'panel'])
+ _typs = set(['ints', 'uints', 'labels', 'mixed',
+ 'ts', 'floats', 'empty', 'ts_rev'])
+
+ def setUp(self):
+
+ self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
+ self.frame_ints = DataFrame(np.random.randn(4, 4),
+ index=lrange(0, 8, 2),
+ columns=lrange(0, 12, 3))
+ self.panel_ints = Panel(np.random.rand(4, 4, 4),
+ items=lrange(0, 8, 2),
+ major_axis=lrange(0, 12, 3),
+ minor_axis=lrange(0, 16, 4))
+
+ self.series_uints = Series(np.random.rand(4),
+ index=UInt64Index(lrange(0, 8, 2)))
+ self.frame_uints = DataFrame(np.random.randn(4, 4),
+ index=UInt64Index(lrange(0, 8, 2)),
+ columns=UInt64Index(lrange(0, 12, 3)))
+ self.panel_uints = Panel(np.random.rand(4, 4, 4),
+ items=UInt64Index(lrange(0, 8, 2)),
+ major_axis=UInt64Index(lrange(0, 12, 3)),
+ minor_axis=UInt64Index(lrange(0, 16, 4)))
+
+ self.series_labels = Series(np.random.randn(4), index=list('abcd'))
+ self.frame_labels = DataFrame(np.random.randn(4, 4),
+ index=list('abcd'), columns=list('ABCD'))
+ self.panel_labels = Panel(np.random.randn(4, 4, 4),
+ items=list('abcd'),
+ major_axis=list('ABCD'),
+ minor_axis=list('ZYXW'))
+
+ self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
+ self.frame_mixed = DataFrame(np.random.randn(4, 4),
+ index=[2, 4, 'null', 8])
+ self.panel_mixed = Panel(np.random.randn(4, 4, 4),
+ items=[2, 4, 'null', 8])
+
+ self.series_ts = Series(np.random.randn(4),
+ index=date_range('20130101', periods=4))
+ self.frame_ts = DataFrame(np.random.randn(4, 4),
+ index=date_range('20130101', periods=4))
+ self.panel_ts = Panel(np.random.randn(4, 4, 4),
+ items=date_range('20130101', periods=4))
+
+ dates_rev = (date_range('20130101', periods=4)
+ .sort_values(ascending=False))
+ self.series_ts_rev = Series(np.random.randn(4),
+ index=dates_rev)
+ self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
+ index=dates_rev)
+ self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
+ items=dates_rev)
+
+ self.frame_empty = DataFrame({})
+ self.series_empty = Series({})
+ self.panel_empty = Panel({})
+
+ # form agglomerates
+ for o in self._objs:
+
+ d = dict()
+ for t in self._typs:
+ d[t] = getattr(self, '%s_%s' % (o, t), None)
+
+ setattr(self, o, d)
+
+ def generate_indices(self, f, values=False):
+ """ generate the indicies
+ if values is True , use the axis values
+ is False, use the range
+ """
+
+ axes = f.axes
+ if values:
+ axes = [lrange(len(a)) for a in axes]
+
+ return itertools.product(*axes)
+
+ def get_result(self, obj, method, key, axis):
+ """ return the result for this obj with this key and this axis """
+
+ if isinstance(key, dict):
+ key = key[axis]
+
+ # use an artifical conversion to map the key as integers to the labels
+ # so ix can work for comparisions
+ if method == 'indexer':
+ method = 'ix'
+ key = obj._get_axis(axis)[key]
+
+ # in case we actually want 0 index slicing
+ try:
+ with catch_warnings(record=True):
+ xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
+ except:
+ xp = getattr(obj, method).__getitem__(key)
+
+ return xp
+
+ def get_value(self, f, i, values=False):
+ """ return the value for the location i """
+
+ # check agains values
+ if values:
+ return f.values[i]
+
+ # this is equiv of f[col][row].....
+ # v = f
+ # for a in reversed(i):
+ # v = v.__getitem__(a)
+ # return v
+ with catch_warnings(record=True):
+ return f.ix[i]
+
+ def check_values(self, f, func, values=False):
+
+ if f is None:
+ return
+ axes = f.axes
+ indicies = itertools.product(*axes)
+
+ for i in indicies:
+ result = getattr(f, func)[i]
+
+ # check agains values
+ if values:
+ expected = f.values[i]
+ else:
+ expected = f
+ for a in reversed(i):
+ expected = expected.__getitem__(a)
+
+ tm.assert_almost_equal(result, expected)
+
+ def check_result(self, name, method1, key1, method2, key2, typs=None,
+ objs=None, axes=None, fails=None):
+ def _eq(t, o, a, obj, k1, k2):
+ """ compare equal for these 2 keys """
+
+ if a is not None and a > obj.ndim - 1:
+ return
+
+ def _print(result, error=None):
+ if error is not None:
+ error = str(error)
+ v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
+ "key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
+ (name, result, t, o, method1, method2, a, error or ''))
+ if _verbose:
+ pprint_thing(v)
+
+ try:
+ rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
+
+ try:
+ xp = self.get_result(obj, method2, k2, a)
+ except:
+ result = 'no comp'
+ _print(result)
+ return
+
+ detail = None
+
+ try:
+ if is_scalar(rs) and is_scalar(xp):
+ self.assertEqual(rs, xp)
+ elif xp.ndim == 1:
+ tm.assert_series_equal(rs, xp)
+ elif xp.ndim == 2:
+ tm.assert_frame_equal(rs, xp)
+ elif xp.ndim == 3:
+ tm.assert_panel_equal(rs, xp)
+ result = 'ok'
+ except AssertionError as e:
+ detail = str(e)
+ result = 'fail'
+
+ # reverse the checks
+ if fails is True:
+ if result == 'fail':
+ result = 'ok (fail)'
+
+ _print(result)
+ if not result.startswith('ok'):
+ raise AssertionError(detail)
+
+ except AssertionError:
+ raise
+ except Exception as detail:
+
+ # if we are in fails, the ok, otherwise raise it
+ if fails is not None:
+ if isinstance(detail, fails):
+ result = 'ok (%s)' % type(detail).__name__
+ _print(result)
+ return
+
+ result = type(detail).__name__
+ raise AssertionError(_print(result, error=detail))
+
+ if typs is None:
+ typs = self._typs
+
+ if objs is None:
+ objs = self._objs
+
+ if axes is not None:
+ if not isinstance(axes, (tuple, list)):
+ axes = [axes]
+ else:
+ axes = list(axes)
+ else:
+ axes = [0, 1, 2]
+
+ # check
+ for o in objs:
+ if o not in self._objs:
+ continue
+
+ d = getattr(self, o)
+ for a in axes:
+ for t in typs:
+ if t not in self._typs:
+ continue
+
+ obj = d[t]
+ if obj is not None:
+ obj = obj.copy()
+
+ k2 = key2
+ _eq(t, o, a, obj, key1, k2)
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index 0e921aaf826f9..72e704537ba3f 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -1,3 +1,5 @@
+from warnings import catch_warnings
+
import numpy as np
import pandas as pd
from pandas.core import common as com
@@ -41,13 +43,13 @@ def test_setitem_cache_updating(self):
# ref the cache
if do_ref:
- df.ix[0, "c"]
+ df.loc[0, "c"]
# set it
- df.ix[7, 'c'] = 1
+ df.loc[7, 'c'] = 1
- self.assertEqual(df.ix[0, 'c'], 0.0)
- self.assertEqual(df.ix[7, 'c'], 1.0)
+ self.assertEqual(df.loc[0, 'c'], 0.0)
+ self.assertEqual(df.loc[7, 'c'], 1.0)
# GH 7084
# not updating cache on series setting with slices
@@ -226,21 +228,21 @@ def random_text(nobs=100):
# explicity copy
indexer = df.letters.apply(lambda x: len(x) > 10)
- df = df.ix[indexer].copy()
+ df = df.loc[indexer].copy()
self.assertIsNone(df.is_copy)
df['letters'] = df['letters'].apply(str.lower)
# implicity take
df = random_text(100000)
indexer = df.letters.apply(lambda x: len(x) > 10)
- df = df.ix[indexer]
+ df = df.loc[indexer]
self.assertIsNotNone(df.is_copy)
df['letters'] = df['letters'].apply(str.lower)
# implicity take 2
df = random_text(100000)
indexer = df.letters.apply(lambda x: len(x) > 10)
- df = df.ix[indexer]
+ df = df.loc[indexer]
self.assertIsNotNone(df.is_copy)
df.loc[:, 'letters'] = df['letters'].apply(str.lower)
@@ -251,7 +253,8 @@ def random_text(nobs=100):
df = random_text(100000)
indexer = df.letters.apply(lambda x: len(x) > 10)
- df.ix[indexer, 'letters'] = df.ix[indexer, 'letters'].apply(str.lower)
+ df.loc[indexer, 'letters'] = (
+ df.loc[indexer, 'letters'].apply(str.lower))
# an identical take, so no copy
df = DataFrame({'a': [1]}).dropna()
@@ -312,12 +315,12 @@ def f():
D=list('abcde')))
def f():
- df.ix[2]['D'] = 'foo'
+ df.loc[2]['D'] = 'foo'
self.assertRaises(com.SettingWithCopyError, f)
def f():
- df.ix[2]['C'] = 'foo'
+ df.loc[2]['C'] = 'foo'
self.assertRaises(com.SettingWithCopyError, f)
@@ -356,3 +359,76 @@ def test_detect_chained_assignment_warnings(self):
with tm.assert_produces_warning(
expected_warning=com.SettingWithCopyWarning):
df.loc[0]['A'] = 111
+
+ def test_chained_getitem_with_lists(self):
+
+ # GH6394
+ # Regression in chained getitem indexing with embedded list-like from
+ # 0.12
+ def check(result, expected):
+ tm.assert_numpy_array_equal(result, expected)
+ tm.assertIsInstance(result, np.ndarray)
+
+ df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
+ expected = df['A'].iloc[2]
+ result = df.loc[2, 'A']
+ check(result, expected)
+ result2 = df.iloc[2]['A']
+ check(result2, expected)
+ result3 = df['A'].loc[2]
+ check(result3, expected)
+ result4 = df['A'].iloc[2]
+ check(result4, expected)
+
+ def test_cache_updating(self):
+ # GH 4939, make sure to update the cache on setitem
+
+ df = tm.makeDataFrame()
+ df['A'] # cache series
+ with catch_warnings(record=True):
+ df.ix["Hello Friend"] = df.ix[0]
+ self.assertIn("Hello Friend", df['A'].index)
+ self.assertIn("Hello Friend", df['B'].index)
+
+ with catch_warnings(record=True):
+ panel = tm.makePanel()
+ panel.ix[0] # get first item into cache
+ panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
+ self.assertIn("A+1", panel.ix[0].columns)
+ self.assertIn("A+1", panel.ix[1].columns)
+
+ # 5216
+ # make sure that we don't try to set a dead cache
+ a = np.random.rand(10, 3)
+ df = DataFrame(a, columns=['x', 'y', 'z'])
+ tuples = [(i, j) for i in range(5) for j in range(2)]
+ index = MultiIndex.from_tuples(tuples)
+ df.index = index
+
+ # setting via chained assignment
+ # but actually works, since everything is a view
+ df.loc[0]['z'].iloc[0] = 1.
+ result = df.loc[(0, 0), 'z']
+ self.assertEqual(result, 1)
+
+ # correct setting
+ df.loc[(0, 0), 'z'] = 2
+ result = df.loc[(0, 0), 'z']
+ self.assertEqual(result, 2)
+
+ # 10264
+ df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
+ 'a', 'b', 'c', 'd', 'e'], index=range(5))
+ df['f'] = 0
+ df.f.values[3] = 1
+
+ # TODO(wesm): unused?
+ # y = df.iloc[np.arange(2, len(df))]
+
+ df.f.values[3] = 2
+ expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
+ 'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
+ expected.at[3, 'f'] = 2
+ tm.assert_frame_equal(df, expected)
+ expected = Series([0, 0, 0, 2, 0], name='f')
+ tm.assert_series_equal(df.f, expected)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
new file mode 100644
index 0000000000000..517194835ca73
--- /dev/null
+++ b/pandas/tests/indexing/test_iloc.py
@@ -0,0 +1,590 @@
+""" test positional based indexing with iloc """
+
+from warnings import catch_warnings
+import numpy as np
+
+import pandas as pd
+from pandas.compat import lrange, lmap
+from pandas import Series, DataFrame, date_range, concat, isnull
+from pandas.util import testing as tm
+from pandas.tests.indexing.common import Base
+
+
+class TestiLoc(Base, tm.TestCase):
+
+ def test_iloc_exceeds_bounds(self):
+
+ # GH6296
+ # iloc should allow indexers that exceed the bounds
+ df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
+ expected = df
+
+ # lists of positions should raise IndexErrror!
+ with tm.assertRaisesRegexp(IndexError,
+ 'positional indexers are out-of-bounds'):
+ df.iloc[:, [0, 1, 2, 3, 4, 5]]
+ self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
+ self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
+ self.assertRaises(IndexError, lambda: df.iloc[[100]])
+
+ s = df['A']
+ self.assertRaises(IndexError, lambda: s.iloc[[100]])
+ self.assertRaises(IndexError, lambda: s.iloc[[-100]])
+
+ # still raise on a single indexer
+ msg = 'single positional indexer is out-of-bounds'
+ with tm.assertRaisesRegexp(IndexError, msg):
+ df.iloc[30]
+ self.assertRaises(IndexError, lambda: df.iloc[-30])
+
+ # GH10779
+ # single positive/negative indexer exceeding Series bounds should raise
+ # an IndexError
+ with tm.assertRaisesRegexp(IndexError, msg):
+ s.iloc[30]
+ self.assertRaises(IndexError, lambda: s.iloc[-30])
+
+ # slices are ok
+ result = df.iloc[:, 4:10] # 0 < start < len < stop
+ expected = df.iloc[:, 4:]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[:, -4:-10] # stop < 0 < start < len
+ expected = df.iloc[:, :0]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
+ expected = df.iloc[:, :4:-1]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
+ expected = df.iloc[:, 4::-1]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[:, -10:4] # start < 0 < stop < len
+ expected = df.iloc[:, :4]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[:, 10:4] # 0 < stop < len < start
+ expected = df.iloc[:, :0]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
+ expected = df.iloc[:, :0]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[:, 10:11] # 0 < len < start < stop
+ expected = df.iloc[:, :0]
+ tm.assert_frame_equal(result, expected)
+
+ # slice bounds exceeding is ok
+ result = s.iloc[18:30]
+ expected = s.iloc[18:]
+ tm.assert_series_equal(result, expected)
+
+ result = s.iloc[30:]
+ expected = s.iloc[:0]
+ tm.assert_series_equal(result, expected)
+
+ result = s.iloc[30::-1]
+ expected = s.iloc[::-1]
+ tm.assert_series_equal(result, expected)
+
+ # doc example
+ def check(result, expected):
+ str(result)
+ result.dtypes
+ tm.assert_frame_equal(result, expected)
+
+ dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
+ check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
+ check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
+ check(dfl.iloc[4:6], dfl.iloc[[4]])
+
+ self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
+ self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
+
+ def test_iloc_getitem_int(self):
+
+ # integer
+ self.check_result('integer', 'iloc', 2, 'ix',
+ {0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
+ self.check_result('integer', 'iloc', 2, 'indexer', 2,
+ typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
+ fails=IndexError)
+
+ def test_iloc_getitem_neg_int(self):
+
+ # neg integer
+ self.check_result('neg int', 'iloc', -1, 'ix',
+ {0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
+ self.check_result('neg int', 'iloc', -1, 'indexer', -1,
+ typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
+ fails=IndexError)
+
+ def test_iloc_getitem_list_int(self):
+
+ # list of ints
+ self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
+ {0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
+ typs=['ints', 'uints'])
+ self.check_result('list int', 'iloc', [2], 'ix',
+ {0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
+ self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
+ typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
+ fails=IndexError)
+
+ # array of ints (GH5006), make sure that a single indexer is returning
+ # the correct type
+ self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
+ {0: [0, 2, 4],
+ 1: [0, 3, 6],
+ 2: [0, 4, 8]}, typs=['ints', 'uints'])
+ self.check_result('array int', 'iloc', np.array([2]), 'ix',
+ {0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
+ self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
+ [0, 1, 2],
+ typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
+ fails=IndexError)
+
+ def test_iloc_getitem_neg_int_can_reach_first_index(self):
+ # GH10547 and GH10779
+ # negative integers should be able to reach index 0
+ df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
+ s = df['A']
+
+ expected = df.iloc[0]
+ result = df.iloc[-3]
+ tm.assert_series_equal(result, expected)
+
+ expected = df.iloc[[0]]
+ result = df.iloc[[-3]]
+ tm.assert_frame_equal(result, expected)
+
+ expected = s.iloc[0]
+ result = s.iloc[-3]
+ self.assertEqual(result, expected)
+
+ expected = s.iloc[[0]]
+ result = s.iloc[[-3]]
+ tm.assert_series_equal(result, expected)
+
+ # check the length 1 Series case highlighted in GH10547
+ expected = pd.Series(['a'], index=['A'])
+ result = expected.iloc[[-1]]
+ tm.assert_series_equal(result, expected)
+
+ def test_iloc_getitem_dups(self):
+
+ # no dups in panel (bug?)
+ self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
+ {0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
+ objs=['series', 'frame'], typs=['ints', 'uints'])
+
+ # GH 6766
+ df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
+ df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
+ df = concat([df1, df2], axis=1)
+
+ # cross-sectional indexing
+ result = df.iloc[0, 0]
+ self.assertTrue(isnull(result))
+
+ result = df.iloc[0, :]
+ expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
+ name=0)
+ tm.assert_series_equal(result, expected)
+
+ def test_iloc_getitem_array(self):
+
+ # array like
+ s = Series(index=lrange(1, 4))
+ self.check_result('array like', 'iloc', s.index, 'ix',
+ {0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
+ typs=['ints', 'uints'])
+
+ def test_iloc_getitem_bool(self):
+
+ # boolean indexers
+ b = [True, False, True, False, ]
+ self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
+ self.check_result('bool', 'iloc', b, 'ix', b,
+ typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
+ fails=IndexError)
+
+ def test_iloc_getitem_slice(self):
+
+ # slices
+ self.check_result('slice', 'iloc', slice(1, 3), 'ix',
+ {0: [2, 4], 1: [3, 6], 2: [4, 8]},
+ typs=['ints', 'uints'])
+ self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
+ slice(1, 3),
+ typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
+ fails=IndexError)
+
+ def test_iloc_getitem_slice_dups(self):
+
+ df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
+ df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
+ columns=['A', 'C'])
+
+ # axis=1
+ df = concat([df1, df2], axis=1)
+ tm.assert_frame_equal(df.iloc[:, :4], df1)
+ tm.assert_frame_equal(df.iloc[:, 4:], df2)
+
+ df = concat([df2, df1], axis=1)
+ tm.assert_frame_equal(df.iloc[:, :2], df2)
+ tm.assert_frame_equal(df.iloc[:, 2:], df1)
+
+ exp = concat([df2, df1.iloc[:, [0]]], axis=1)
+ tm.assert_frame_equal(df.iloc[:, 0:3], exp)
+
+ # axis=0
+ df = concat([df, df], axis=0)
+ tm.assert_frame_equal(df.iloc[0:10, :2], df2)
+ tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
+ tm.assert_frame_equal(df.iloc[10:, :2], df2)
+ tm.assert_frame_equal(df.iloc[10:, 2:], df1)
+
+ def test_iloc_setitem(self):
+ df = self.frame_ints
+
+ df.iloc[1, 1] = 1
+ result = df.iloc[1, 1]
+ self.assertEqual(result, 1)
+
+ df.iloc[:, 2:3] = 0
+ expected = df.iloc[:, 2:3]
+ result = df.iloc[:, 2:3]
+ tm.assert_frame_equal(result, expected)
+
+ # GH5771
+ s = Series(0, index=[4, 5, 6])
+ s.iloc[1:2] += 1
+ expected = Series([0, 1, 0], index=[4, 5, 6])
+ tm.assert_series_equal(s, expected)
+
+ def test_iloc_setitem_list(self):
+
+ # setitem with an iloc list
+ df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
+ columns=["A", "B", "C"])
+ df.iloc[[0, 1], [1, 2]]
+ df.iloc[[0, 1], [1, 2]] += 100
+
+ expected = DataFrame(
+ np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
+ index=["A", "B", "C"], columns=["A", "B", "C"])
+ tm.assert_frame_equal(df, expected)
+
+ def test_iloc_setitem_dups(self):
+
+ # GH 6766
+ # iloc with a mask aligning from another iloc
+ df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
+ df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
+ df = concat([df1, df2], axis=1)
+
+ expected = df.fillna(3)
+ expected['A'] = expected['A'].astype('float64')
+ inds = np.isnan(df.iloc[:, 0])
+ mask = inds[inds].index
+ df.iloc[mask, 0] = df.iloc[mask, 2]
+ tm.assert_frame_equal(df, expected)
+
+ # del a dup column across blocks
+ expected = DataFrame({0: [1, 2], 1: [3, 4]})
+ expected.columns = ['B', 'B']
+ del df['A']
+ tm.assert_frame_equal(df, expected)
+
+ # assign back to self
+ df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
+ tm.assert_frame_equal(df, expected)
+
+ # reversed x 2
+ df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
+ drop=True)
+ df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
+ drop=True)
+ tm.assert_frame_equal(df, expected)
+
+ def test_iloc_getitem_frame(self):
+ df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
+ columns=lrange(0, 8, 2))
+
+ result = df.iloc[2]
+ with catch_warnings(record=True):
+ exp = df.ix[4]
+ tm.assert_series_equal(result, exp)
+
+ result = df.iloc[2, 2]
+ with catch_warnings(record=True):
+ exp = df.ix[4, 4]
+ self.assertEqual(result, exp)
+
+ # slice
+ result = df.iloc[4:8]
+ with catch_warnings(record=True):
+ expected = df.ix[8:14]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[:, 2:3]
+ with catch_warnings(record=True):
+ expected = df.ix[:, 4:5]
+ tm.assert_frame_equal(result, expected)
+
+ # list of integers
+ result = df.iloc[[0, 1, 3]]
+ with catch_warnings(record=True):
+ expected = df.ix[[0, 2, 6]]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[[0, 1, 3], [0, 1]]
+ with catch_warnings(record=True):
+ expected = df.ix[[0, 2, 6], [0, 2]]
+ tm.assert_frame_equal(result, expected)
+
+ # neg indicies
+ result = df.iloc[[-1, 1, 3], [-1, 1]]
+ with catch_warnings(record=True):
+ expected = df.ix[[18, 2, 6], [6, 2]]
+ tm.assert_frame_equal(result, expected)
+
+ # dups indicies
+ result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
+ with catch_warnings(record=True):
+ expected = df.ix[[18, 18, 2, 6], [6, 2]]
+ tm.assert_frame_equal(result, expected)
+
+ # with index-like
+ s = Series(index=lrange(1, 5))
+ result = df.iloc[s.index]
+ with catch_warnings(record=True):
+ expected = df.ix[[2, 4, 6, 8]]
+ tm.assert_frame_equal(result, expected)
+
+ def test_iloc_getitem_labelled_frame(self):
+ # try with labelled frame
+ df = DataFrame(np.random.randn(10, 4),
+ index=list('abcdefghij'), columns=list('ABCD'))
+
+ result = df.iloc[1, 1]
+ exp = df.loc['b', 'B']
+ self.assertEqual(result, exp)
+
+ result = df.iloc[:, 2:3]
+ expected = df.loc[:, ['C']]
+ tm.assert_frame_equal(result, expected)
+
+ # negative indexing
+ result = df.iloc[-1, -1]
+ exp = df.loc['j', 'D']
+ self.assertEqual(result, exp)
+
+ # out-of-bounds exception
+ self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
+
+ # trying to use a label
+ self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
+
+ def test_iloc_getitem_doc_issue(self):
+
+ # multi axis slicing issue with single block
+ # surfaced in GH 6059
+
+ arr = np.random.randn(6, 4)
+ index = date_range('20130101', periods=6)
+ columns = list('ABCD')
+ df = DataFrame(arr, index=index, columns=columns)
+
+ # defines ref_locs
+ df.describe()
+
+ result = df.iloc[3:5, 0:2]
+ str(result)
+ result.dtypes
+
+ expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
+ columns=columns[0:2])
+ tm.assert_frame_equal(result, expected)
+
+ # for dups
+ df.columns = list('aaaa')
+ result = df.iloc[3:5, 0:2]
+ str(result)
+ result.dtypes
+
+ expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
+ columns=list('aa'))
+ tm.assert_frame_equal(result, expected)
+
+ # related
+ arr = np.random.randn(6, 4)
+ index = list(range(0, 12, 2))
+ columns = list(range(0, 8, 2))
+ df = DataFrame(arr, index=index, columns=columns)
+
+ df._data.blocks[0].mgr_locs
+ result = df.iloc[1:5, 2:4]
+ str(result)
+ result.dtypes
+ expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
+ columns=columns[2:4])
+ tm.assert_frame_equal(result, expected)
+
+ def test_iloc_setitem_series(self):
+ df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
+ columns=list('ABCD'))
+
+ df.iloc[1, 1] = 1
+ result = df.iloc[1, 1]
+ self.assertEqual(result, 1)
+
+ df.iloc[:, 2:3] = 0
+ expected = df.iloc[:, 2:3]
+ result = df.iloc[:, 2:3]
+ tm.assert_frame_equal(result, expected)
+
+ s = Series(np.random.randn(10), index=lrange(0, 20, 2))
+
+ s.iloc[1] = 1
+ result = s.iloc[1]
+ self.assertEqual(result, 1)
+
+ s.iloc[:4] = 0
+ expected = s.iloc[:4]
+ result = s.iloc[:4]
+ tm.assert_series_equal(result, expected)
+
+ s = Series([-1] * 6)
+ s.iloc[0::2] = [0, 2, 4]
+ s.iloc[1::2] = [1, 3, 5]
+ result = s
+ expected = Series([0, 1, 2, 3, 4, 5])
+ tm.assert_series_equal(result, expected)
+
+ def test_iloc_setitem_list_of_lists(self):
+
+ # GH 7551
+ # list-of-list is set incorrectly in mixed vs. single dtyped frames
+ df = DataFrame(dict(A=np.arange(5, dtype='int64'),
+ B=np.arange(5, 10, dtype='int64')))
+ df.iloc[2:4] = [[10, 11], [12, 13]]
+ expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame(
+ dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
+ df.iloc[2:4] = [['x', 11], ['y', 13]]
+ expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
+ B=[5, 6, 11, 13, 9]))
+ tm.assert_frame_equal(df, expected)
+
+ def test_iloc_mask(self):
+
+ # GH 3631, iloc with a mask (of a series) should raise
+ df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
+ mask = (df.a % 2 == 0)
+ self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
+ mask.index = lrange(len(mask))
+ self.assertRaises(NotImplementedError, df.iloc.__getitem__,
+ tuple([mask]))
+
+ # ndarray ok
+ result = df.iloc[np.array([True] * len(mask), dtype=bool)]
+ tm.assert_frame_equal(result, df)
+
+ # the possibilities
+ locs = np.arange(4)
+ nums = 2 ** locs
+ reps = lmap(bin, nums)
+ df = DataFrame({'locs': locs, 'nums': nums}, reps)
+
+ expected = {
+ (None, ''): '0b1100',
+ (None, '.loc'): '0b1100',
+ (None, '.iloc'): '0b1100',
+ ('index', ''): '0b11',
+ ('index', '.loc'): '0b11',
+ ('index', '.iloc'): ('iLocation based boolean indexing '
+ 'cannot use an indexable as a mask'),
+ ('locs', ''): 'Unalignable boolean Series provided as indexer '
+ '(index of the boolean Series and of the indexed '
+ 'object do not match',
+ ('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
+ '(index of the boolean Series and of the '
+ 'indexed object do not match',
+ ('locs', '.iloc'): ('iLocation based boolean indexing on an '
+ 'integer type is not available'),
+ }
+
+ # UserWarnings from reindex of a boolean mask
+ with catch_warnings(record=True):
+ result = dict()
+ for idx in [None, 'index', 'locs']:
+ mask = (df.nums > 2).values
+ if idx:
+ mask = Series(mask, list(reversed(getattr(df, idx))))
+ for method in ['', '.loc', '.iloc']:
+ try:
+ if method:
+ accessor = getattr(df, method[1:])
+ else:
+ accessor = df
+ ans = str(bin(accessor[mask]['nums'].sum()))
+ except Exception as e:
+ ans = str(e)
+
+ key = tuple([idx, method])
+ r = expected.get(key)
+ if r != ans:
+ raise AssertionError(
+ "[%s] does not match [%s], received [%s]"
+ % (key, ans, r))
+
+ def test_iloc_non_unique_indexing(self):
+
+ # GH 4017, non-unique indexing (on the axis)
+ df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
+ idx = np.array(lrange(30)) * 99
+ expected = df.iloc[idx]
+
+ df3 = pd.concat([df, 2 * df, 3 * df])
+ result = df3.iloc[idx]
+
+ tm.assert_frame_equal(result, expected)
+
+ df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
+ df2 = pd.concat([df2, 2 * df2, 3 * df2])
+
+ sidx = df2.index.to_series()
+ expected = df2.iloc[idx[idx <= sidx.max()]]
+
+ new_list = []
+ for r, s in expected.iterrows():
+ new_list.append(s)
+ new_list.append(s * 2)
+ new_list.append(s * 3)
+
+ expected = DataFrame(new_list)
+ expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
+ ])
+ result = df2.loc[idx]
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+ def test_iloc_empty_list_indexer_is_ok(self):
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ df = mkdf(5, 2)
+ # vertical empty
+ tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
+ check_index_type=True, check_column_type=True)
+ # horizontal empty
+ tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
+ check_index_type=True, check_column_type=True)
+ # horizontal empty
+ tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
+ check_index_type=True,
+ check_column_type=True)
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 4502e0171dfbe..0d6ca383a1be1 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -1,1648 +1,64 @@
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
-import itertools
-import warnings
+
+""" test fancy indexing & misc """
+
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
- is_float_dtype,
- is_scalar)
-from pandas.compat import range, lrange, lzip, StringIO, lmap
-from pandas._libs.tslib import NaT
-from numpy import nan
-from numpy.random import randn
+ is_float_dtype)
+from pandas.compat import range, lrange, lzip, StringIO
import numpy as np
import pandas as pd
-from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
-from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
- MultiIndex, Timestamp, Timedelta, UInt64Index)
-from pandas.formats.printing import pprint_thing
-from pandas import concat
-from pandas.core.common import PerformanceWarning
-from pandas.tests.indexing.common import _mklbl
+from pandas import NaT, DataFrame, Index, Series, MultiIndex
import pandas.util.testing as tm
-from pandas import date_range
+from pandas.tests.indexing.common import Base, _mklbl
-_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
-def _generate_indices(f, values=False):
- """ generate the indicies
- if values is True , use the axis values
- is False, use the range
- """
-
- axes = f.axes
- if values:
- axes = [lrange(len(a)) for a in axes]
-
- return itertools.product(*axes)
-
-
-def _get_value(f, i, values=False):
- """ return the value for the location i """
-
- # check agains values
- if values:
- return f.values[i]
-
- # this is equiv of f[col][row].....
- # v = f
- # for a in reversed(i):
- # v = v.__getitem__(a)
- # return v
- with catch_warnings(record=True):
- return f.ix[i]
-
-
-def _get_result(obj, method, key, axis):
- """ return the result for this obj with this key and this axis """
-
- if isinstance(key, dict):
- key = key[axis]
-
- # use an artifical conversion to map the key as integers to the labels
- # so ix can work for comparisions
- if method == 'indexer':
- method = 'ix'
- key = obj._get_axis(axis)[key]
-
- # in case we actually want 0 index slicing
- try:
- xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
- except:
- xp = getattr(obj, method).__getitem__(key)
-
- return xp
-
-
-def _axify(obj, key, axis):
- # create a tuple accessor
- axes = [slice(None)] * obj.ndim
- axes[axis] = key
- return tuple(axes)
-
-
-class TestIndexing(tm.TestCase):
-
- _objs = set(['series', 'frame', 'panel'])
- _typs = set(['ints', 'uints', 'labels', 'mixed',
- 'ts', 'floats', 'empty', 'ts_rev'])
-
- def setUp(self):
-
- self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
- self.frame_ints = DataFrame(np.random.randn(4, 4),
- index=lrange(0, 8, 2),
- columns=lrange(0, 12, 3))
- self.panel_ints = Panel(np.random.rand(4, 4, 4),
- items=lrange(0, 8, 2),
- major_axis=lrange(0, 12, 3),
- minor_axis=lrange(0, 16, 4))
-
- self.series_uints = Series(np.random.rand(4),
- index=UInt64Index(lrange(0, 8, 2)))
- self.frame_uints = DataFrame(np.random.randn(4, 4),
- index=UInt64Index(lrange(0, 8, 2)),
- columns=UInt64Index(lrange(0, 12, 3)))
- self.panel_uints = Panel(np.random.rand(4, 4, 4),
- items=UInt64Index(lrange(0, 8, 2)),
- major_axis=UInt64Index(lrange(0, 12, 3)),
- minor_axis=UInt64Index(lrange(0, 16, 4)))
-
- self.series_labels = Series(np.random.randn(4), index=list('abcd'))
- self.frame_labels = DataFrame(np.random.randn(4, 4),
- index=list('abcd'), columns=list('ABCD'))
- self.panel_labels = Panel(np.random.randn(4, 4, 4),
- items=list('abcd'),
- major_axis=list('ABCD'),
- minor_axis=list('ZYXW'))
-
- self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
- self.frame_mixed = DataFrame(np.random.randn(4, 4),
- index=[2, 4, 'null', 8])
- self.panel_mixed = Panel(np.random.randn(4, 4, 4),
- items=[2, 4, 'null', 8])
-
- self.series_ts = Series(np.random.randn(4),
- index=date_range('20130101', periods=4))
- self.frame_ts = DataFrame(np.random.randn(4, 4),
- index=date_range('20130101', periods=4))
- self.panel_ts = Panel(np.random.randn(4, 4, 4),
- items=date_range('20130101', periods=4))
-
- dates_rev = (date_range('20130101', periods=4)
- .sort_values(ascending=False))
- self.series_ts_rev = Series(np.random.randn(4),
- index=dates_rev)
- self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
- index=dates_rev)
- self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
- items=dates_rev)
-
- self.frame_empty = DataFrame({})
- self.series_empty = Series({})
- self.panel_empty = Panel({})
-
- # form agglomerates
- for o in self._objs:
-
- d = dict()
- for t in self._typs:
- d[t] = getattr(self, '%s_%s' % (o, t), None)
-
- setattr(self, o, d)
-
- def check_values(self, f, func, values=False):
-
- if f is None:
- return
- axes = f.axes
- indicies = itertools.product(*axes)
-
- for i in indicies:
- result = getattr(f, func)[i]
-
- # check agains values
- if values:
- expected = f.values[i]
- else:
- expected = f
- for a in reversed(i):
- expected = expected.__getitem__(a)
-
- tm.assert_almost_equal(result, expected)
-
- def check_result(self, name, method1, key1, method2, key2, typs=None,
- objs=None, axes=None, fails=None):
- def _eq(t, o, a, obj, k1, k2):
- """ compare equal for these 2 keys """
-
- if a is not None and a > obj.ndim - 1:
- return
-
- def _print(result, error=None):
- if error is not None:
- error = str(error)
- v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
- "key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
- (name, result, t, o, method1, method2, a, error or ''))
- if _verbose:
- pprint_thing(v)
-
- try:
- rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
-
- try:
- xp = _get_result(obj, method2, k2, a)
- except:
- result = 'no comp'
- _print(result)
- return
-
- detail = None
-
- try:
- if is_scalar(rs) and is_scalar(xp):
- self.assertEqual(rs, xp)
- elif xp.ndim == 1:
- tm.assert_series_equal(rs, xp)
- elif xp.ndim == 2:
- tm.assert_frame_equal(rs, xp)
- elif xp.ndim == 3:
- tm.assert_panel_equal(rs, xp)
- result = 'ok'
- except AssertionError as e:
- detail = str(e)
- result = 'fail'
-
- # reverse the checks
- if fails is True:
- if result == 'fail':
- result = 'ok (fail)'
-
- _print(result)
- if not result.startswith('ok'):
- raise AssertionError(detail)
-
- except AssertionError:
- raise
- except Exception as detail:
-
- # if we are in fails, the ok, otherwise raise it
- if fails is not None:
- if isinstance(detail, fails):
- result = 'ok (%s)' % type(detail).__name__
- _print(result)
- return
-
- result = type(detail).__name__
- raise AssertionError(_print(result, error=detail))
-
- if typs is None:
- typs = self._typs
-
- if objs is None:
- objs = self._objs
-
- if axes is not None:
- if not isinstance(axes, (tuple, list)):
- axes = [axes]
- else:
- axes = list(axes)
- else:
- axes = [0, 1, 2]
-
- # check
- for o in objs:
- if o not in self._objs:
- continue
-
- d = getattr(self, o)
- for a in axes:
- for t in typs:
- if t not in self._typs:
- continue
-
- obj = d[t]
- if obj is not None:
- obj = obj.copy()
-
- k2 = key2
- _eq(t, o, a, obj, key1, k2)
-
- def test_ix_deprecation(self):
- # GH 15114
-
- df = DataFrame({'A': [1, 2, 3]})
- with tm.assert_produces_warning(DeprecationWarning,
- check_stacklevel=False):
- df.ix[1, 'A']
-
- def test_indexer_caching(self):
- # GH5727
- # make sure that indexers are in the _internal_names_set
- n = 1000001
- arrays = [lrange(n), lrange(n)]
- index = MultiIndex.from_tuples(lzip(*arrays))
- s = Series(np.zeros(n), index=index)
- str(s)
-
- # setitem
- expected = Series(np.ones(n), index=index)
- s = Series(np.zeros(n), index=index)
- s[s == 0] = 1
- tm.assert_series_equal(s, expected)
-
- def test_at_and_iat_get(self):
- def _check(f, func, values=False):
-
- if f is not None:
- indicies = _generate_indices(f, values)
- for i in indicies:
- result = getattr(f, func)[i]
- expected = _get_value(f, i, values)
- tm.assert_almost_equal(result, expected)
-
- for o in self._objs:
-
- d = getattr(self, o)
-
- # iat
- for f in [d['ints'], d['uints']]:
- _check(f, 'iat', values=True)
-
- for f in [d['labels'], d['ts'], d['floats']]:
- if f is not None:
- self.assertRaises(ValueError, self.check_values, f, 'iat')
-
- # at
- for f in [d['ints'], d['uints'], d['labels'],
- d['ts'], d['floats']]:
- _check(f, 'at')
-
- def test_at_and_iat_set(self):
- def _check(f, func, values=False):
-
- if f is not None:
- indicies = _generate_indices(f, values)
- for i in indicies:
- getattr(f, func)[i] = 1
- expected = _get_value(f, i, values)
- tm.assert_almost_equal(expected, 1)
-
- for t in self._objs:
-
- d = getattr(self, t)
-
- # iat
- for f in [d['ints'], d['uints']]:
- _check(f, 'iat', values=True)
-
- for f in [d['labels'], d['ts'], d['floats']]:
- if f is not None:
- self.assertRaises(ValueError, _check, f, 'iat')
-
- # at
- for f in [d['ints'], d['uints'], d['labels'],
- d['ts'], d['floats']]:
- _check(f, 'at')
-
- def test_at_iat_coercion(self):
-
- # as timestamp is not a tuple!
- dates = date_range('1/1/2000', periods=8)
- df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
- s = df['A']
-
- result = s.at[dates[5]]
- xp = s.values[5]
- self.assertEqual(result, xp)
-
- # GH 7729
- # make sure we are boxing the returns
- s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
- expected = Timestamp('2014-02-02')
-
- for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
- result = r()
- self.assertEqual(result, expected)
-
- s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
- expected = Timedelta('2 days')
-
- for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
- result = r()
- self.assertEqual(result, expected)
-
- def test_iat_invalid_args(self):
- pass
-
- def test_imethods_with_dups(self):
-
- # GH6493
- # iat/iloc with dups
-
- s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
- result = s.iloc[2]
- self.assertEqual(result, 2)
- result = s.iat[2]
- self.assertEqual(result, 2)
-
- self.assertRaises(IndexError, lambda: s.iat[10])
- self.assertRaises(IndexError, lambda: s.iat[-10])
-
- result = s.iloc[[2, 3]]
- expected = Series([2, 3], [2, 2], dtype='int64')
- tm.assert_series_equal(result, expected)
-
- df = s.to_frame()
- result = df.iloc[2]
- expected = Series(2, index=[0], name=2)
- tm.assert_series_equal(result, expected)
-
- result = df.iat[2, 0]
- expected = 2
- self.assertEqual(result, 2)
-
- def test_repeated_getitem_dups(self):
- # GH 5678
- # repeated gettitems on a dup index returing a ndarray
- df = DataFrame(
- np.random.random_sample((20, 5)),
- index=['ABCDE' [x % 5] for x in range(20)])
- expected = df.loc['A', 0]
- result = df.loc[:, 0].loc['A']
- tm.assert_series_equal(result, expected)
-
- def test_iloc_exceeds_bounds(self):
-
- # GH6296
- # iloc should allow indexers that exceed the bounds
- df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
- expected = df
-
- # lists of positions should raise IndexErrror!
- with tm.assertRaisesRegexp(IndexError,
- 'positional indexers are out-of-bounds'):
- df.iloc[:, [0, 1, 2, 3, 4, 5]]
- self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
- self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
- self.assertRaises(IndexError, lambda: df.iloc[[100]])
-
- s = df['A']
- self.assertRaises(IndexError, lambda: s.iloc[[100]])
- self.assertRaises(IndexError, lambda: s.iloc[[-100]])
-
- # still raise on a single indexer
- msg = 'single positional indexer is out-of-bounds'
- with tm.assertRaisesRegexp(IndexError, msg):
- df.iloc[30]
- self.assertRaises(IndexError, lambda: df.iloc[-30])
-
- # GH10779
- # single positive/negative indexer exceeding Series bounds should raise
- # an IndexError
- with tm.assertRaisesRegexp(IndexError, msg):
- s.iloc[30]
- self.assertRaises(IndexError, lambda: s.iloc[-30])
-
- # slices are ok
- result = df.iloc[:, 4:10] # 0 < start < len < stop
- expected = df.iloc[:, 4:]
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[:, -4:-10] # stop < 0 < start < len
- expected = df.iloc[:, :0]
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
- expected = df.iloc[:, :4:-1]
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
- expected = df.iloc[:, 4::-1]
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[:, -10:4] # start < 0 < stop < len
- expected = df.iloc[:, :4]
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[:, 10:4] # 0 < stop < len < start
- expected = df.iloc[:, :0]
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
- expected = df.iloc[:, :0]
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[:, 10:11] # 0 < len < start < stop
- expected = df.iloc[:, :0]
- tm.assert_frame_equal(result, expected)
-
- # slice bounds exceeding is ok
- result = s.iloc[18:30]
- expected = s.iloc[18:]
- tm.assert_series_equal(result, expected)
-
- result = s.iloc[30:]
- expected = s.iloc[:0]
- tm.assert_series_equal(result, expected)
-
- result = s.iloc[30::-1]
- expected = s.iloc[::-1]
- tm.assert_series_equal(result, expected)
-
- # doc example
- def check(result, expected):
- str(result)
- result.dtypes
- tm.assert_frame_equal(result, expected)
-
- dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
- check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
- check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
- check(dfl.iloc[4:6], dfl.iloc[[4]])
-
- self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
- self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
-
- def test_iloc_getitem_int(self):
-
- # integer
- self.check_result('integer', 'iloc', 2, 'ix',
- {0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
- self.check_result('integer', 'iloc', 2, 'indexer', 2,
- typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
- fails=IndexError)
-
- def test_iloc_getitem_neg_int(self):
-
- # neg integer
- self.check_result('neg int', 'iloc', -1, 'ix',
- {0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
- self.check_result('neg int', 'iloc', -1, 'indexer', -1,
- typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
- fails=IndexError)
-
- def test_iloc_getitem_list_int(self):
-
- # list of ints
- self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
- {0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
- typs=['ints', 'uints'])
- self.check_result('list int', 'iloc', [2], 'ix',
- {0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
- self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
- typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
- fails=IndexError)
-
- # array of ints (GH5006), make sure that a single indexer is returning
- # the correct type
- self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
- {0: [0, 2, 4],
- 1: [0, 3, 6],
- 2: [0, 4, 8]}, typs=['ints', 'uints'])
- self.check_result('array int', 'iloc', np.array([2]), 'ix',
- {0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
- self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
- [0, 1, 2],
- typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
- fails=IndexError)
-
- def test_iloc_getitem_neg_int_can_reach_first_index(self):
- # GH10547 and GH10779
- # negative integers should be able to reach index 0
- df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
- s = df['A']
-
- expected = df.iloc[0]
- result = df.iloc[-3]
- tm.assert_series_equal(result, expected)
-
- expected = df.iloc[[0]]
- result = df.iloc[[-3]]
- tm.assert_frame_equal(result, expected)
-
- expected = s.iloc[0]
- result = s.iloc[-3]
- self.assertEqual(result, expected)
-
- expected = s.iloc[[0]]
- result = s.iloc[[-3]]
- tm.assert_series_equal(result, expected)
-
- # check the length 1 Series case highlighted in GH10547
- expected = pd.Series(['a'], index=['A'])
- result = expected.iloc[[-1]]
- tm.assert_series_equal(result, expected)
-
- def test_iloc_getitem_dups(self):
-
- # no dups in panel (bug?)
- self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
- {0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
- objs=['series', 'frame'], typs=['ints', 'uints'])
-
- # GH 6766
- df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
- df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
- df = concat([df1, df2], axis=1)
-
- # cross-sectional indexing
- result = df.iloc[0, 0]
- self.assertTrue(isnull(result))
-
- result = df.iloc[0, :]
- expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
- name=0)
- tm.assert_series_equal(result, expected)
-
- def test_iloc_getitem_array(self):
-
- # array like
- s = Series(index=lrange(1, 4))
- self.check_result('array like', 'iloc', s.index, 'ix',
- {0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
- typs=['ints', 'uints'])
-
- def test_iloc_getitem_bool(self):
-
- # boolean indexers
- b = [True, False, True, False, ]
- self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
- self.check_result('bool', 'iloc', b, 'ix', b,
- typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
- fails=IndexError)
-
- def test_iloc_getitem_slice(self):
-
- # slices
- self.check_result('slice', 'iloc', slice(1, 3), 'ix',
- {0: [2, 4], 1: [3, 6], 2: [4, 8]},
- typs=['ints', 'uints'])
- self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
- slice(1, 3),
- typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
- fails=IndexError)
-
- def test_iloc_getitem_slice_dups(self):
-
- df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
- df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
- columns=['A', 'C'])
-
- # axis=1
- df = concat([df1, df2], axis=1)
- tm.assert_frame_equal(df.iloc[:, :4], df1)
- tm.assert_frame_equal(df.iloc[:, 4:], df2)
-
- df = concat([df2, df1], axis=1)
- tm.assert_frame_equal(df.iloc[:, :2], df2)
- tm.assert_frame_equal(df.iloc[:, 2:], df1)
-
- exp = concat([df2, df1.iloc[:, [0]]], axis=1)
- tm.assert_frame_equal(df.iloc[:, 0:3], exp)
-
- # axis=0
- df = concat([df, df], axis=0)
- tm.assert_frame_equal(df.iloc[0:10, :2], df2)
- tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
- tm.assert_frame_equal(df.iloc[10:, :2], df2)
- tm.assert_frame_equal(df.iloc[10:, 2:], df1)
-
- def test_iloc_setitem(self):
- df = self.frame_ints
-
- df.iloc[1, 1] = 1
- result = df.iloc[1, 1]
- self.assertEqual(result, 1)
-
- df.iloc[:, 2:3] = 0
- expected = df.iloc[:, 2:3]
- result = df.iloc[:, 2:3]
- tm.assert_frame_equal(result, expected)
-
- # GH5771
- s = Series(0, index=[4, 5, 6])
- s.iloc[1:2] += 1
- expected = Series([0, 1, 0], index=[4, 5, 6])
- tm.assert_series_equal(s, expected)
-
- def test_loc_setitem_slice(self):
- # GH10503
-
- # assigning the same type should not change the type
- df1 = DataFrame({'a': [0, 1, 1],
- 'b': Series([100, 200, 300], dtype='uint32')})
- ix = df1['a'] == 1
- newb1 = df1.loc[ix, 'b'] + 1
- df1.loc[ix, 'b'] = newb1
- expected = DataFrame({'a': [0, 1, 1],
- 'b': Series([100, 201, 301], dtype='uint32')})
- tm.assert_frame_equal(df1, expected)
-
- # assigning a new type should get the inferred type
- df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
- dtype='uint64')
- ix = df1['a'] == 1
- newb2 = df2.loc[ix, 'b']
- df1.loc[ix, 'b'] = newb2
- expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
- dtype='uint64')
- tm.assert_frame_equal(df2, expected)
-
- def test_ix_loc_setitem_consistency(self):
-
- # GH 5771
- # loc with slice and series
- s = Series(0, index=[4, 5, 6])
- s.loc[4:5] += 1
- expected = Series([1, 1, 0], index=[4, 5, 6])
- tm.assert_series_equal(s, expected)
-
- # GH 5928
- # chained indexing assignment
- df = DataFrame({'a': [0, 1, 2]})
- expected = df.copy()
- with catch_warnings(record=True):
- expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
-
- with catch_warnings(record=True):
- df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
- tm.assert_frame_equal(df, expected)
-
- df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
- with catch_warnings(record=True):
- df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
- 'float64') + 0.5
- expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
- tm.assert_frame_equal(df, expected)
-
- # GH 8607
- # ix setitem consistency
- df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
- 'delta': [1174, 904, 161],
- 'elapsed': [7673, 9277, 1470]})
- expected = DataFrame({'timestamp': pd.to_datetime(
- [1413840976, 1413842580, 1413760580], unit='s'),
- 'delta': [1174, 904, 161],
- 'elapsed': [7673, 9277, 1470]})
-
- df2 = df.copy()
- df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
- tm.assert_frame_equal(df2, expected)
-
- df2 = df.copy()
- df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
- tm.assert_frame_equal(df2, expected)
-
- df2 = df.copy()
- with catch_warnings(record=True):
- df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
- tm.assert_frame_equal(df2, expected)
-
- def test_ix_loc_consistency(self):
-
- # GH 8613
- # some edge cases where ix/loc should return the same
- # this is not an exhaustive case
-
- def compare(result, expected):
- if is_scalar(expected):
- self.assertEqual(result, expected)
- else:
- self.assertTrue(expected.equals(result))
-
- # failure cases for .loc, but these work for .ix
- df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
- for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
- tuple([slice(0, 2), df.columns[0:2]])]:
-
- for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
- tm.makeDateIndex, tm.makePeriodIndex,
- tm.makeTimedeltaIndex]:
- df.index = index(len(df.index))
- with catch_warnings(record=True):
- df.ix[key]
-
- self.assertRaises(TypeError, lambda: df.loc[key])
-
- df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
- index=pd.date_range('2012-01-01', periods=5))
-
- for key in ['2012-01-03',
- '2012-01-31',
- slice('2012-01-03', '2012-01-03'),
- slice('2012-01-03', '2012-01-04'),
- slice('2012-01-03', '2012-01-06', 2),
- slice('2012-01-03', '2012-01-31'),
- tuple([[True, True, True, False, True]]), ]:
-
- # getitem
-
- # if the expected raises, then compare the exceptions
- try:
- with catch_warnings(record=True):
- expected = df.ix[key]
- except KeyError:
- self.assertRaises(KeyError, lambda: df.loc[key])
- continue
-
- result = df.loc[key]
- compare(result, expected)
-
- # setitem
- df1 = df.copy()
- df2 = df.copy()
-
- with catch_warnings(record=True):
- df1.ix[key] = 10
- df2.loc[key] = 10
- compare(df2, df1)
-
- # edge cases
- s = Series([1, 2, 3, 4], index=list('abde'))
-
- result1 = s['a':'c']
- with catch_warnings(record=True):
- result2 = s.ix['a':'c']
- result3 = s.loc['a':'c']
- tm.assert_series_equal(result1, result2)
- tm.assert_series_equal(result1, result3)
-
- # now work rather than raising KeyError
- s = Series(range(5), [-2, -1, 1, 2, 3])
-
- with catch_warnings(record=True):
- result1 = s.ix[-10:3]
- result2 = s.loc[-10:3]
- tm.assert_series_equal(result1, result2)
-
- with catch_warnings(record=True):
- result1 = s.ix[0:3]
- result2 = s.loc[0:3]
- tm.assert_series_equal(result1, result2)
-
- def test_loc_setitem_dups(self):
-
- # GH 6541
- df_orig = DataFrame(
- {'me': list('rttti'),
- 'foo': list('aaade'),
- 'bar': np.arange(5, dtype='float64') * 1.34 + 2,
- 'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
-
- indexer = tuple(['r', ['bar', 'bar2']])
- df = df_orig.copy()
- df.loc[indexer] *= 2.0
- tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
-
- indexer = tuple(['r', 'bar'])
- df = df_orig.copy()
- df.loc[indexer] *= 2.0
- self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
-
- indexer = tuple(['t', ['bar', 'bar2']])
- df = df_orig.copy()
- df.loc[indexer] *= 2.0
- tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
-
- def test_iloc_setitem_dups(self):
-
- # GH 6766
- # iloc with a mask aligning from another iloc
- df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
- df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
- df = concat([df1, df2], axis=1)
-
- expected = df.fillna(3)
- expected['A'] = expected['A'].astype('float64')
- inds = np.isnan(df.iloc[:, 0])
- mask = inds[inds].index
- df.iloc[mask, 0] = df.iloc[mask, 2]
- tm.assert_frame_equal(df, expected)
-
- # del a dup column across blocks
- expected = DataFrame({0: [1, 2], 1: [3, 4]})
- expected.columns = ['B', 'B']
- del df['A']
- tm.assert_frame_equal(df, expected)
-
- # assign back to self
- df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
- tm.assert_frame_equal(df, expected)
-
- # reversed x 2
- df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
- drop=True)
- df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
- drop=True)
- tm.assert_frame_equal(df, expected)
-
- def test_chained_getitem_with_lists(self):
-
- # GH6394
- # Regression in chained getitem indexing with embedded list-like from
- # 0.12
- def check(result, expected):
- tm.assert_numpy_array_equal(result, expected)
- tm.assertIsInstance(result, np.ndarray)
-
- df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
- expected = df['A'].iloc[2]
- result = df.loc[2, 'A']
- check(result, expected)
- result2 = df.iloc[2]['A']
- check(result2, expected)
- result3 = df['A'].loc[2]
- check(result3, expected)
- result4 = df['A'].iloc[2]
- check(result4, expected)
-
- def test_loc_getitem_int(self):
-
- # int label
- self.check_result('int label', 'loc', 2, 'ix', 2,
- typs=['ints', 'uints'], axes=0)
- self.check_result('int label', 'loc', 3, 'ix', 3,
- typs=['ints', 'uints'], axes=1)
- self.check_result('int label', 'loc', 4, 'ix', 4,
- typs=['ints', 'uints'], axes=2)
- self.check_result('int label', 'loc', 2, 'ix', 2,
- typs=['label'], fails=KeyError)
-
- def test_loc_getitem_label(self):
-
- # label
- self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
- axes=0)
- self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
- axes=0)
- self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
- self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
- typs=['ts'], axes=0)
- self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
- fails=KeyError)
-
- def test_loc_getitem_label_out_of_range(self):
-
- # out of range label
- self.check_result('label range', 'loc', 'f', 'ix', 'f',
- typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
- fails=KeyError)
- self.check_result('label range', 'loc', 'f', 'ix', 'f',
- typs=['floats'], fails=TypeError)
- self.check_result('label range', 'loc', 20, 'ix', 20,
- typs=['ints', 'uints', 'mixed'], fails=KeyError)
- self.check_result('label range', 'loc', 20, 'ix', 20,
- typs=['labels'], fails=TypeError)
- self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
- axes=0, fails=TypeError)
- self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
- axes=0, fails=TypeError)
-
- def test_loc_getitem_label_list(self):
-
- # list of labels
- self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
- typs=['ints', 'uints'], axes=0)
- self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
- typs=['ints', 'uints'], axes=1)
- self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
- typs=['ints', 'uints'], axes=2)
- self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
- ['a', 'b', 'd'], typs=['labels'], axes=0)
- self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
- ['A', 'B', 'C'], typs=['labels'], axes=1)
- self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
- ['Z', 'Y', 'W'], typs=['labels'], axes=2)
- self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
- [2, 8, 'null'], typs=['mixed'], axes=0)
- self.check_result('list lbl', 'loc',
- [Timestamp('20130102'), Timestamp('20130103')], 'ix',
- [Timestamp('20130102'), Timestamp('20130103')],
- typs=['ts'], axes=0)
-
- self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
- typs=['empty'], fails=KeyError)
- self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
- typs=['ints', 'uints'], axes=0, fails=KeyError)
- self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
- typs=['ints', 'uints'], axes=1, fails=KeyError)
- self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
- typs=['ints', 'uints'], axes=2, fails=KeyError)
-
- def test_loc_getitem_label_list_fails(self):
- # fails
- self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
- typs=['ints', 'uints'], axes=1, fails=KeyError)
- self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
- typs=['ints', 'uints'], axes=2, fails=KeyError)
-
- def test_loc_getitem_label_array_like(self):
- # array like
- self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
- 'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
- self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
- 'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
- self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
- 'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
-
- def test_loc_getitem_bool(self):
- # boolean indexers
- b = [True, False, True, False]
- self.check_result('bool', 'loc', b, 'ix', b,
- typs=['ints', 'uints', 'labels',
- 'mixed', 'ts', 'floats'])
- self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
- fails=KeyError)
-
- def test_loc_getitem_int_slice(self):
-
- # ok
- self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
- typs=['ints', 'uints'], axes=0)
- self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
- typs=['ints', 'uints'], axes=1)
- self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
- typs=['ints', 'uints'], axes=2)
-
- # GH 3053
- # loc should treat integer slices like label slices
- from itertools import product
-
- index = MultiIndex.from_tuples([t for t in product(
- [6, 7, 8], ['a', 'b'])])
- df = DataFrame(np.random.randn(6, 6), index, index)
- result = df.loc[6:8, :]
- with catch_warnings(record=True):
- expected = df.ix[6:8, :]
- tm.assert_frame_equal(result, expected)
-
- index = MultiIndex.from_tuples([t
- for t in product(
- [10, 20, 30], ['a', 'b'])])
- df = DataFrame(np.random.randn(6, 6), index, index)
- result = df.loc[20:30, :]
- with catch_warnings(record=True):
- expected = df.ix[20:30, :]
- tm.assert_frame_equal(result, expected)
-
- # doc examples
- result = df.loc[10, :]
- with catch_warnings(record=True):
- expected = df.ix[10, :]
- tm.assert_frame_equal(result, expected)
-
- result = df.loc[:, 10]
- # expected = df.ix[:,10] (this fails)
- expected = df[10]
- tm.assert_frame_equal(result, expected)
-
- def test_loc_to_fail(self):
-
- # GH3449
- df = DataFrame(np.random.random((3, 3)),
- index=['a', 'b', 'c'],
- columns=['e', 'f', 'g'])
-
- # raise a KeyError?
- self.assertRaises(KeyError, df.loc.__getitem__,
- tuple([[1, 2], [1, 2]]))
-
- # GH 7496
- # loc should not fallback
-
- s = Series()
- s.loc[1] = 1
- s.loc['a'] = 2
-
- self.assertRaises(KeyError, lambda: s.loc[-1])
- self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
-
- self.assertRaises(KeyError, lambda: s.loc[['4']])
-
- s.loc[-1] = 3
- result = s.loc[[-1, -2]]
- expected = Series([3, np.nan], index=[-1, -2])
- tm.assert_series_equal(result, expected)
-
- s['a'] = 2
- self.assertRaises(KeyError, lambda: s.loc[[-2]])
-
- del s['a']
-
- def f():
- s.loc[[-2]] = 0
-
- self.assertRaises(KeyError, f)
-
- # inconsistency between .loc[values] and .loc[values,:]
- # GH 7999
- df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
-
- def f():
- df.loc[[3], :]
-
- self.assertRaises(KeyError, f)
-
- def f():
- df.loc[[3]]
-
- self.assertRaises(KeyError, f)
-
- def test_at_to_fail(self):
- # at should not fallback
- # GH 7814
- s = Series([1, 2, 3], index=list('abc'))
- result = s.at['a']
- self.assertEqual(result, 1)
- self.assertRaises(ValueError, lambda: s.at[0])
-
- df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
- result = df.at['a', 'A']
- self.assertEqual(result, 1)
- self.assertRaises(ValueError, lambda: df.at['a', 0])
-
- s = Series([1, 2, 3], index=[3, 2, 1])
- result = s.at[1]
- self.assertEqual(result, 3)
- self.assertRaises(ValueError, lambda: s.at['a'])
-
- df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
- result = df.at[1, 0]
- self.assertEqual(result, 3)
- self.assertRaises(ValueError, lambda: df.at['a', 0])
-
- # GH 13822, incorrect error string with non-unique columns when missing
- # column is accessed
- df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
- df.columns = ['x', 'x', 'z']
-
- # Check that we get the correct value in the KeyError
- self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
- lambda: df[['x', 'y', 'z']])
-
- def test_loc_getitem_label_slice(self):
-
- # label slices (with ints)
- self.check_result('lab slice', 'loc', slice(1, 3),
- 'ix', slice(1, 3),
- typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
- fails=TypeError)
-
- # real label slices
- self.check_result('lab slice', 'loc', slice('a', 'c'),
- 'ix', slice('a', 'c'), typs=['labels'], axes=0)
- self.check_result('lab slice', 'loc', slice('A', 'C'),
- 'ix', slice('A', 'C'), typs=['labels'], axes=1)
- self.check_result('lab slice', 'loc', slice('W', 'Z'),
- 'ix', slice('W', 'Z'), typs=['labels'], axes=2)
-
- self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
- 'ix', slice('20130102', '20130104'),
- typs=['ts'], axes=0)
- self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
- 'ix', slice('20130102', '20130104'),
- typs=['ts'], axes=1, fails=TypeError)
- self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
- 'ix', slice('20130102', '20130104'),
- typs=['ts'], axes=2, fails=TypeError)
-
- # GH 14316
- self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
- 'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
-
- self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
- typs=['mixed'], axes=0, fails=TypeError)
- self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
- typs=['mixed'], axes=1, fails=KeyError)
- self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
- typs=['mixed'], axes=2, fails=KeyError)
-
- self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
- 2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
-
- def test_loc_general(self):
-
- df = DataFrame(
- np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
- index=['A', 'B', 'C', 'D'])
-
- # want this to work
- result = df.loc[:, "A":"B"].iloc[0:2, :]
- self.assertTrue((result.columns == ['A', 'B']).all())
- self.assertTrue((result.index == ['A', 'B']).all())
-
- # mixed type
- result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
- expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
- tm.assert_series_equal(result, expected)
- self.assertEqual(result.dtype, object)
-
- def test_loc_setitem_consistency(self):
- # GH 6149
- # coerce similary for setitem and loc when rows have a null-slice
- expected = DataFrame({'date': Series(0, index=range(5),
- dtype=np.int64),
- 'val': Series(range(5), dtype=np.int64)})
-
- df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
- 'val': Series(
- range(5), dtype=np.int64)})
- df.loc[:, 'date'] = 0
- tm.assert_frame_equal(df, expected)
-
- df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
- 'val': Series(range(5), dtype=np.int64)})
- df.loc[:, 'date'] = np.array(0, dtype=np.int64)
- tm.assert_frame_equal(df, expected)
-
- df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
- 'val': Series(range(5), dtype=np.int64)})
- df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
- tm.assert_frame_equal(df, expected)
-
- expected = DataFrame({'date': Series('foo', index=range(5)),
- 'val': Series(range(5), dtype=np.int64)})
- df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
- 'val': Series(range(5), dtype=np.int64)})
- df.loc[:, 'date'] = 'foo'
- tm.assert_frame_equal(df, expected)
-
- expected = DataFrame({'date': Series(1.0, index=range(5)),
- 'val': Series(range(5), dtype=np.int64)})
- df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
- 'val': Series(range(5), dtype=np.int64)})
- df.loc[:, 'date'] = 1.0
- tm.assert_frame_equal(df, expected)
-
- def test_loc_setitem_consistency_empty(self):
- # empty (essentially noops)
- expected = DataFrame(columns=['x', 'y'])
- expected['x'] = expected['x'].astype(np.int64)
- df = DataFrame(columns=['x', 'y'])
- df.loc[:, 'x'] = 1
- tm.assert_frame_equal(df, expected)
-
- df = DataFrame(columns=['x', 'y'])
- df['x'] = 1
- tm.assert_frame_equal(df, expected)
-
- def test_loc_setitem_consistency_slice_column_len(self):
- # .loc[:,column] setting with slice == len of the column
- # GH10408
- data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
-Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
-Region,Site,RespondentID,,,,,
-Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
-Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
-Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
-Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
-
- df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
- df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
- 'Respondent', 'StartDate')])
- df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
- 'Respondent', 'EndDate')])
- df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
- 'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
-
- df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
- 'Respondent', 'Duration')].astype('timedelta64[s]')
- expected = Series([1380, 720, 840, 2160.], index=df.index,
- name=('Respondent', 'Duration'))
- tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
-
- def test_loc_setitem_frame(self):
- df = self.frame_labels
-
- result = df.iloc[0, 0]
-
- df.loc['a', 'A'] = 1
- result = df.loc['a', 'A']
- self.assertEqual(result, 1)
-
- result = df.iloc[0, 0]
- self.assertEqual(result, 1)
-
- df.loc[:, 'B':'D'] = 0
- expected = df.loc[:, 'B':'D']
- with catch_warnings(record=True):
- result = df.ix[:, 1:]
- tm.assert_frame_equal(result, expected)
-
- # GH 6254
- # setting issue
- df = DataFrame(index=[3, 5, 4], columns=['A'])
- df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
- expected = DataFrame(dict(A=Series(
- [1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
- tm.assert_frame_equal(df, expected)
-
- # GH 6252
- # setting with an empty frame
- keys1 = ['@' + str(i) for i in range(5)]
- val1 = np.arange(5, dtype='int64')
-
- keys2 = ['@' + str(i) for i in range(4)]
- val2 = np.arange(4, dtype='int64')
-
- index = list(set(keys1).union(keys2))
- df = DataFrame(index=index)
- df['A'] = nan
- df.loc[keys1, 'A'] = val1
-
- df['B'] = nan
- df.loc[keys2, 'B'] = val2
-
- expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
- val2, index=keys2))).reindex(index=index)
- tm.assert_frame_equal(df, expected)
-
- # GH 8669
- # invalid coercion of nan -> int
- df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
- df.loc[df.B > df.A, 'B'] = df.A
- expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
- tm.assert_frame_equal(df, expected)
-
- # GH 6546
- # setting with mixed labels
- df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
-
- result = df.loc[0, [1, 2]]
- expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
- tm.assert_series_equal(result, expected)
-
- expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
- df.loc[0, [1, 2]] = [5, 6]
- tm.assert_frame_equal(df, expected)
-
- def test_loc_setitem_frame_multiples(self):
- # multiple setting
- df = DataFrame({'A': ['foo', 'bar', 'baz'],
- 'B': Series(
- range(3), dtype=np.int64)})
- rhs = df.loc[1:2]
- rhs.index = df.index[0:2]
- df.loc[0:1] = rhs
- expected = DataFrame({'A': ['bar', 'baz', 'baz'],
- 'B': Series(
- [1, 2, 2], dtype=np.int64)})
- tm.assert_frame_equal(df, expected)
-
- # multiple setting with frame on rhs (with M8)
- df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
- 'val': Series(
- range(5), dtype=np.int64)})
- expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
- '20000102'), Timestamp('20000101'), Timestamp('20000102'),
- Timestamp('20000103')],
- 'val': Series(
- [0, 1, 0, 1, 2], dtype=np.int64)})
- rhs = df.loc[0:2]
- rhs.index = df.index[2:5]
- df.loc[2:4] = rhs
- tm.assert_frame_equal(df, expected)
-
- def test_iloc_getitem_frame(self):
- df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
- columns=lrange(0, 8, 2))
-
- result = df.iloc[2]
- with catch_warnings(record=True):
- exp = df.ix[4]
- tm.assert_series_equal(result, exp)
-
- result = df.iloc[2, 2]
- with catch_warnings(record=True):
- exp = df.ix[4, 4]
- self.assertEqual(result, exp)
-
- # slice
- result = df.iloc[4:8]
- with catch_warnings(record=True):
- expected = df.ix[8:14]
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[:, 2:3]
- with catch_warnings(record=True):
- expected = df.ix[:, 4:5]
- tm.assert_frame_equal(result, expected)
-
- # list of integers
- result = df.iloc[[0, 1, 3]]
- with catch_warnings(record=True):
- expected = df.ix[[0, 2, 6]]
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[[0, 1, 3], [0, 1]]
- with catch_warnings(record=True):
- expected = df.ix[[0, 2, 6], [0, 2]]
- tm.assert_frame_equal(result, expected)
-
- # neg indicies
- result = df.iloc[[-1, 1, 3], [-1, 1]]
- with catch_warnings(record=True):
- expected = df.ix[[18, 2, 6], [6, 2]]
- tm.assert_frame_equal(result, expected)
-
- # dups indicies
- result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
- with catch_warnings(record=True):
- expected = df.ix[[18, 18, 2, 6], [6, 2]]
- tm.assert_frame_equal(result, expected)
-
- # with index-like
- s = Series(index=lrange(1, 5))
- result = df.iloc[s.index]
- with catch_warnings(record=True):
- expected = df.ix[[2, 4, 6, 8]]
- tm.assert_frame_equal(result, expected)
-
- def test_iloc_getitem_labelled_frame(self):
- # try with labelled frame
- df = DataFrame(np.random.randn(10, 4),
- index=list('abcdefghij'), columns=list('ABCD'))
-
- result = df.iloc[1, 1]
- exp = df.loc['b', 'B']
- self.assertEqual(result, exp)
-
- result = df.iloc[:, 2:3]
- expected = df.loc[:, ['C']]
- tm.assert_frame_equal(result, expected)
-
- # negative indexing
- result = df.iloc[-1, -1]
- exp = df.loc['j', 'D']
- self.assertEqual(result, exp)
-
- # out-of-bounds exception
- self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
-
- # trying to use a label
- self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
-
- def test_iloc_getitem_doc_issue(self):
-
- # multi axis slicing issue with single block
- # surfaced in GH 6059
-
- arr = np.random.randn(6, 4)
- index = date_range('20130101', periods=6)
- columns = list('ABCD')
- df = DataFrame(arr, index=index, columns=columns)
-
- # defines ref_locs
- df.describe()
-
- result = df.iloc[3:5, 0:2]
- str(result)
- result.dtypes
-
- expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
- columns=columns[0:2])
- tm.assert_frame_equal(result, expected)
-
- # for dups
- df.columns = list('aaaa')
- result = df.iloc[3:5, 0:2]
- str(result)
- result.dtypes
-
- expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
- columns=list('aa'))
- tm.assert_frame_equal(result, expected)
-
- # related
- arr = np.random.randn(6, 4)
- index = list(range(0, 12, 2))
- columns = list(range(0, 8, 2))
- df = DataFrame(arr, index=index, columns=columns)
-
- df._data.blocks[0].mgr_locs
- result = df.iloc[1:5, 2:4]
- str(result)
- result.dtypes
- expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
- columns=columns[2:4])
- tm.assert_frame_equal(result, expected)
+class TestFancy(Base, tm.TestCase):
+ """ pure get/set item & fancy indexing """
def test_setitem_ndarray_1d(self):
# GH5508
- # len of indexer vs length of the 1d ndarray
- df = DataFrame(index=Index(lrange(1, 11)))
- df['foo'] = np.zeros(10, dtype=np.float64)
- df['bar'] = np.zeros(10, dtype=np.complex)
-
- # invalid
- def f():
- with catch_warnings(record=True):
- df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
-
- self.assertRaises(ValueError, f)
-
- def f():
- df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
- 2.2, 1.0])
-
- self.assertRaises(ValueError, f)
-
- # valid
- df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
- 2.2, 1.0])
-
- result = df.loc[df.index[2:6], 'bar']
- expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
- name='bar')
- tm.assert_series_equal(result, expected)
-
- # dtype getting changed?
- df = DataFrame(index=Index(lrange(1, 11)))
- df['foo'] = np.zeros(10, dtype=np.float64)
- df['bar'] = np.zeros(10, dtype=np.complex)
-
- def f():
- df[2:5] = np.arange(1, 4) * 1j
-
- self.assertRaises(ValueError, f)
-
- def test_iloc_setitem_series(self):
- df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
- columns=list('ABCD'))
-
- df.iloc[1, 1] = 1
- result = df.iloc[1, 1]
- self.assertEqual(result, 1)
-
- df.iloc[:, 2:3] = 0
- expected = df.iloc[:, 2:3]
- result = df.iloc[:, 2:3]
- tm.assert_frame_equal(result, expected)
-
- s = Series(np.random.randn(10), index=lrange(0, 20, 2))
-
- s.iloc[1] = 1
- result = s.iloc[1]
- self.assertEqual(result, 1)
-
- s.iloc[:4] = 0
- expected = s.iloc[:4]
- result = s.iloc[:4]
- tm.assert_series_equal(result, expected)
-
- s = Series([-1] * 6)
- s.iloc[0::2] = [0, 2, 4]
- s.iloc[1::2] = [1, 3, 5]
- result = s
- expected = Series([0, 1, 2, 3, 4, 5])
- tm.assert_series_equal(result, expected)
-
- def test_iloc_setitem_list_of_lists(self):
-
- # GH 7551
- # list-of-list is set incorrectly in mixed vs. single dtyped frames
- df = DataFrame(dict(A=np.arange(5, dtype='int64'),
- B=np.arange(5, 10, dtype='int64')))
- df.iloc[2:4] = [[10, 11], [12, 13]]
- expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
- tm.assert_frame_equal(df, expected)
-
- df = DataFrame(
- dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
- df.iloc[2:4] = [['x', 11], ['y', 13]]
- expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
- B=[5, 6, 11, 13, 9]))
- tm.assert_frame_equal(df, expected)
-
- def test_ix_general(self):
-
- # ix general issues
-
- # GH 2817
- data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
- 'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
- 'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
- df = DataFrame(data).set_index(keys=['col', 'year'])
- key = 4.0, 2012
-
- # emits a PerformanceWarning, ok
- with self.assert_produces_warning(PerformanceWarning):
- tm.assert_frame_equal(df.loc[key], df.iloc[2:])
-
- # this is ok
- df.sort_index(inplace=True)
- res = df.loc[key]
-
- # col has float dtype, result should be Float64Index
- index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
- names=['col', 'year'])
- expected = DataFrame({'amount': [222, 333, 444]}, index=index)
- tm.assert_frame_equal(res, expected)
-
- def test_ix_weird_slicing(self):
- # http://stackoverflow.com/q/17056560/1240268
- df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
- 'two': [1, 2, 3, 4, 5]})
- df.loc[df['one'] > 1, 'two'] = -df['two']
-
- expected = DataFrame({'one': {0: 1.0,
- 1: 2.0,
- 2: 3.0,
- 3: nan,
- 4: nan},
- 'two': {0: 1,
- 1: -2,
- 2: -3,
- 3: 4,
- 4: 5}})
- tm.assert_frame_equal(df, expected)
-
- def test_loc_coerceion(self):
-
- # 12411
- df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
- pd.NaT]})
- expected = df.dtypes
-
- result = df.iloc[[0]]
- tm.assert_series_equal(result.dtypes, expected)
+ # len of indexer vs length of the 1d ndarray
+ df = DataFrame(index=Index(lrange(1, 11)))
+ df['foo'] = np.zeros(10, dtype=np.float64)
+ df['bar'] = np.zeros(10, dtype=np.complex)
- result = df.iloc[[1]]
- tm.assert_series_equal(result.dtypes, expected)
+ # invalid
+ def f():
+ df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
+ 2.2, 1.0])
- # 12045
- import datetime
- df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
- datetime.datetime(1012, 1, 2)]})
- expected = df.dtypes
+ self.assertRaises(ValueError, f)
- result = df.iloc[[0]]
- tm.assert_series_equal(result.dtypes, expected)
+ # valid
+ df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
+ 2.2, 1.0])
- result = df.iloc[[1]]
- tm.assert_series_equal(result.dtypes, expected)
+ result = df.loc[df.index[2:6], 'bar']
+ expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
+ name='bar')
+ tm.assert_series_equal(result, expected)
- # 11594
- df = DataFrame({'text': ['some words'] + [None] * 9})
- expected = df.dtypes
+ # dtype getting changed?
+ df = DataFrame(index=Index(lrange(1, 11)))
+ df['foo'] = np.zeros(10, dtype=np.float64)
+ df['bar'] = np.zeros(10, dtype=np.complex)
- result = df.iloc[0:2]
- tm.assert_series_equal(result.dtypes, expected)
+ def f():
+ df[2:5] = np.arange(1, 4) * 1j
- result = df.iloc[3:]
- tm.assert_series_equal(result.dtypes, expected)
+ self.assertRaises(ValueError, f)
def test_setitem_dtype_upcast(self):
@@ -1683,19 +99,6 @@ def test_setitem_dtype_upcast(self):
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
- def test_setitem_iloc(self):
-
- # setitem with an iloc list
- df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
- columns=["A", "B", "C"])
- df.iloc[[0, 1], [1, 2]]
- df.iloc[[0, 1], [1, 2]] += 100
-
- expected = DataFrame(
- np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
- index=["A", "B", "C"], columns=["A", "B", "C"])
- tm.assert_frame_equal(df, expected)
-
def test_dups_fancy_indexing(self):
# GH 3455
@@ -1757,23 +160,24 @@ def test_dups_fancy_indexing(self):
# inconsistent returns for unique/duplicate indices when values are
# missing
- df = DataFrame(randn(4, 3), index=list('ABCD'))
- expected = df.ix[['E']]
+ df = DataFrame(np.random.randn(4, 3), index=list('ABCD'))
+ expected = df.reindex(['E'])
- dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
- result = dfnu.ix[['E']]
+ dfnu = DataFrame(np.random.randn(5, 3), index=list('AABCD'))
+ with catch_warnings(record=True):
+ result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
- result = df.ix[[0, 8, 0]]
+ result = df.loc[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
- result = df.ix[[0, 8, 0]]
+ result = df.loc[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
@@ -1781,7 +185,7 @@ def test_dups_fancy_indexing(self):
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
- result = df.ix[['A', 'A', 'E']]
+ result = df.loc[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
@@ -1790,9 +194,9 @@ def test_dups_fancy_indexing(self):
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
- [df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
- index=df.index)], axis=1)
- result = df.ix[:, ['A', 'B', 'C']]
+ [df.loc[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
+ index=df.index)], axis=1)
+ result = df.loc[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
@@ -1822,8 +226,8 @@ def test_indexing_mixed_frame_bug(self):
# this does not work, ie column test is not changed
idx = df['test'] == '_'
- temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
- df.ix[idx, 'test'] = temp
+ temp = df.loc[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
+ df.loc[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
@@ -1859,17 +263,17 @@ def test_set_index_nan(self):
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
- 20: nan,
- 21: nan,
- 22: nan,
- 23: nan,
+ 20: np.nan,
+ 21: np.nan,
+ 22: np.nan,
+ 23: np.nan,
24: 1.0,
- 25: nan,
- 26: nan,
- 27: nan,
- 28: nan,
- 29: nan,
- 30: nan},
+ 25: np.nan,
+ 26: np.nan,
+ 27: np.nan,
+ 28: np.nan,
+ 29: np.nan,
+ 30: np.nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
@@ -1925,14 +329,14 @@ def test_multi_assign(self):
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
- df.ix[1, 0] = np.nan
+ df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
- dft.ix[3, 3] = np.nan
+ dft.iloc[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
@@ -1940,17 +344,17 @@ def test_multi_assign(self):
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
- df2.ix[mask, cols] = dft.ix[mask, cols]
+ df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
- df2.ix[mask, cols] = dft.ix[mask, cols]
+ df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
- df2.ix[mask, cols] = dft.ix[mask, cols].values
+ df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
- df2.ix[mask, cols] = dft.ix[mask, cols].values
+ df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
@@ -1965,79 +369,18 @@ def test_multi_assign(self):
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
- def test_ix_assign_column_mixed(self):
- # GH #1142
- df = DataFrame(tm.getSeriesData())
- df['foo'] = 'bar'
-
- orig = df.ix[:, 'B'].copy()
- df.ix[:, 'B'] = df.ix[:, 'B'] + 1
- tm.assert_series_equal(df.B, orig + 1)
-
- # GH 3668, mixed frame with series value
- df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
- expected = df.copy()
-
- for i in range(5):
- indexer = i * 2
- v = 1000 + i * 200
- expected.ix[indexer, 'y'] = v
- self.assertEqual(expected.ix[indexer, 'y'], v)
-
- df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
- tm.assert_frame_equal(df, expected)
-
- # GH 4508, making sure consistency of assignments
- df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
- df.ix[[0, 2, ], 'b'] = [100, -100]
- expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
- tm.assert_frame_equal(df, expected)
-
- df = pd.DataFrame({'a': lrange(4)})
- df['b'] = np.nan
- df.ix[[1, 3], 'b'] = [100, -100]
- expected = DataFrame({'a': [0, 1, 2, 3],
- 'b': [np.nan, 100, np.nan, -100]})
- tm.assert_frame_equal(df, expected)
-
- # ok, but chained assignments are dangerous
- # if we turn off chained assignement it will work
- with option_context('chained_assignment', None):
- df = pd.DataFrame({'a': lrange(4)})
- df['b'] = np.nan
- df['b'].ix[[1, 3]] = [100, -100]
- tm.assert_frame_equal(df, expected)
-
- def test_ix_get_set_consistency(self):
-
- # GH 4544
- # ix/loc get/set not consistent when
- # a mixed int/string index
- df = DataFrame(np.arange(16).reshape((4, 4)),
- columns=['a', 'b', 8, 'c'],
- index=['e', 7, 'f', 'g'])
-
- self.assertEqual(df.ix['e', 8], 2)
- self.assertEqual(df.loc['e', 8], 2)
-
- df.ix['e', 8] = 42
- self.assertEqual(df.ix['e', 8], 42)
- self.assertEqual(df.loc['e', 8], 42)
-
- df.loc['e', 8] = 45
- self.assertEqual(df.ix['e', 8], 45)
- self.assertEqual(df.loc['e', 8], 45)
-
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
- df.ix[1, 0] = [1, 2, 3]
- df.ix[1, 0] = [1, 2]
+ with catch_warnings(record=True):
+ df.ix[1, 0] = [1, 2, 3]
+ df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
- result.ix[1, 0] = [1, 2]
+ with catch_warnings(record=True):
+ result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
@@ -2059,187 +402,25 @@ def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
- df.ix[1, 0] = TO(1)
- df.ix[1, 0] = TO(2)
+ with catch_warnings(record=True):
+ df.ix[1, 0] = TO(1)
+ df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
- result.ix[1, 0] = TO(2)
+ with catch_warnings(record=True):
+ result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
- df.ix[1, 0] = TO(1)
- df.ix[1, 0] = np.nan
+ with catch_warnings(record=True):
+ df.ix[1, 0] = TO(1)
+ df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
- def test_iloc_mask(self):
-
- # GH 3631, iloc with a mask (of a series) should raise
- df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
- mask = (df.a % 2 == 0)
- self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
- mask.index = lrange(len(mask))
- self.assertRaises(NotImplementedError, df.iloc.__getitem__,
- tuple([mask]))
-
- # ndarray ok
- result = df.iloc[np.array([True] * len(mask), dtype=bool)]
- tm.assert_frame_equal(result, df)
-
- # the possibilities
- locs = np.arange(4)
- nums = 2 ** locs
- reps = lmap(bin, nums)
- df = DataFrame({'locs': locs, 'nums': nums}, reps)
-
- expected = {
- (None, ''): '0b1100',
- (None, '.loc'): '0b1100',
- (None, '.iloc'): '0b1100',
- ('index', ''): '0b11',
- ('index', '.loc'): '0b11',
- ('index', '.iloc'): ('iLocation based boolean indexing '
- 'cannot use an indexable as a mask'),
- ('locs', ''): 'Unalignable boolean Series provided as indexer '
- '(index of the boolean Series and of the indexed '
- 'object do not match',
- ('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
- '(index of the boolean Series and of the '
- 'indexed object do not match',
- ('locs', '.iloc'): ('iLocation based boolean indexing on an '
- 'integer type is not available'),
- }
-
- # UserWarnings from reindex of a boolean mask
- with warnings.catch_warnings(record=True):
- result = dict()
- for idx in [None, 'index', 'locs']:
- mask = (df.nums > 2).values
- if idx:
- mask = Series(mask, list(reversed(getattr(df, idx))))
- for method in ['', '.loc', '.iloc']:
- try:
- if method:
- accessor = getattr(df, method[1:])
- else:
- accessor = df
- ans = str(bin(accessor[mask]['nums'].sum()))
- except Exception as e:
- ans = str(e)
-
- key = tuple([idx, method])
- r = expected.get(key)
- if r != ans:
- raise AssertionError(
- "[%s] does not match [%s], received [%s]"
- % (key, ans, r))
-
- def test_ix_slicing_strings(self):
- # GH3836
- data = {'Classification':
- ['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
- 'Random': [1, 2, 3, 4, 5],
- 'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
- df = DataFrame(data)
- x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
- ])]
- df.ix[x.index, 'X'] = df['Classification']
-
- expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
- 1: 'bbb',
- 2: 'SA EQUITY',
- 3: 'SA SSF',
- 4: 'aaa'},
- 'Random': {0: 1,
- 1: 2,
- 2: 3,
- 3: 4,
- 4: 5},
- 'X': {0: 'correct',
- 1: 'bbb',
- 2: 'correct',
- 3: 'correct',
- 4: 'aaa'}}) # bug was 4: 'bbb'
-
- tm.assert_frame_equal(df, expected)
-
- def test_non_unique_loc(self):
- # GH3659
- # non-unique indexer with loc slice
- # https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
-
- # these are going to raise becuase the we are non monotonic
- df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
- 'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
- self.assertRaises(KeyError, df.loc.__getitem__,
- tuple([slice(1, None)]))
- self.assertRaises(KeyError, df.loc.__getitem__,
- tuple([slice(0, None)]))
- self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
-
- # monotonic are ok
- df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
- 'B': [3, 4, 5, 6, 7, 8]},
- index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
- result = df.loc[1:]
- expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
- index=[1, 1, 2, 3])
- tm.assert_frame_equal(result, expected)
-
- result = df.loc[0:]
- tm.assert_frame_equal(result, df)
-
- result = df.loc[1:2]
- expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
- index=[1, 1, 2])
- tm.assert_frame_equal(result, expected)
-
- def test_loc_name(self):
- # GH 3880
- df = DataFrame([[1, 1], [1, 1]])
- df.index.name = 'index_name'
- result = df.iloc[[0, 1]].index.name
- self.assertEqual(result, 'index_name')
-
- result = df.ix[[0, 1]].index.name
- self.assertEqual(result, 'index_name')
-
- result = df.loc[[0, 1]].index.name
- self.assertEqual(result, 'index_name')
-
- def test_iloc_non_unique_indexing(self):
-
- # GH 4017, non-unique indexing (on the axis)
- df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
- idx = np.array(lrange(30)) * 99
- expected = df.iloc[idx]
-
- df3 = pd.concat([df, 2 * df, 3 * df])
- result = df3.iloc[idx]
-
- tm.assert_frame_equal(result, expected)
-
- df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
- df2 = pd.concat([df2, 2 * df2, 3 * df2])
-
- sidx = df2.index.to_series()
- expected = df2.iloc[idx[idx <= sidx.max()]]
-
- new_list = []
- for r, s in expected.iterrows():
- new_list.append(s)
- new_list.append(s * 2)
- new_list.append(s * 3)
-
- expected = DataFrame(new_list)
- expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
- ])
- result = df2.loc[idx]
- tm.assert_frame_equal(result, expected, check_index_type=False)
-
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
@@ -2300,43 +481,6 @@ def test_mi_access(self):
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
- def test_non_unique_loc_memory_error(self):
-
- # GH 4280
- # non_unique index with a large selection triggers a memory error
-
- columns = list('ABCDEFG')
-
- def gen_test(l, l2):
- return pd.concat([DataFrame(randn(l, len(columns)),
- index=lrange(l), columns=columns),
- DataFrame(np.ones((l2, len(columns))),
- index=[0] * l2, columns=columns)])
-
- def gen_expected(df, mask):
- l = len(mask)
- return pd.concat([df.take([0], convert=False),
- DataFrame(np.ones((l, len(columns))),
- index=[0] * l,
- columns=columns),
- df.take(mask[1:], convert=False)])
-
- df = gen_test(900, 100)
- self.assertFalse(df.index.is_unique)
-
- mask = np.arange(100)
- result = df.loc[mask]
- expected = gen_expected(df, mask)
- tm.assert_frame_equal(result, expected)
-
- df = gen_test(900000, 100000)
- self.assertFalse(df.index.is_unique)
-
- mask = np.arange(100000)
- result = df.loc[mask]
- expected = gen_expected(df, mask)
- tm.assert_frame_equal(result, expected)
-
def test_astype_assignment(self):
# GH4312 (iloc)
@@ -2395,745 +539,79 @@ def test_astype_assignment_with_dups(self):
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
- def test_dups_loc(self):
-
- # GH4726
- # dup indexing with iloc/loc
- df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
- columns=['a', 'a', 'a', 'a', 'a'], index=[1])
- expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
- index=['a', 'a', 'a', 'a', 'a'], name=1)
-
- result = df.iloc[0]
- tm.assert_series_equal(result, expected)
-
- result = df.loc[1]
- tm.assert_series_equal(result, expected)
-
- def test_partial_setting(self):
-
- # GH2578, allow ix and friends to partially set
-
- # series
- s_orig = Series([1, 2, 3])
-
- s = s_orig.copy()
- s[5] = 5
- expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
- tm.assert_series_equal(s, expected)
-
- s = s_orig.copy()
- s.loc[5] = 5
- expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
- tm.assert_series_equal(s, expected)
-
- s = s_orig.copy()
- s[5] = 5.
- expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
- tm.assert_series_equal(s, expected)
-
- s = s_orig.copy()
- s.loc[5] = 5.
- expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
- tm.assert_series_equal(s, expected)
-
- # iloc/iat raise
- s = s_orig.copy()
-
- def f():
- s.iloc[3] = 5.
-
- self.assertRaises(IndexError, f)
-
- def f():
- s.iat[3] = 5.
-
- self.assertRaises(IndexError, f)
-
- # ## frame ##
-
- df_orig = DataFrame(
- np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
-
- # iloc/iat raise
- df = df_orig.copy()
-
- def f():
- df.iloc[4, 2] = 5.
-
- self.assertRaises(IndexError, f)
-
- def f():
- df.iat[4, 2] = 5.
-
- self.assertRaises(IndexError, f)
-
- # row setting where it exists
- expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
- df = df_orig.copy()
- df.iloc[1] = df.iloc[2]
- tm.assert_frame_equal(df, expected)
-
- expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
- df = df_orig.copy()
- df.loc[1] = df.loc[2]
- tm.assert_frame_equal(df, expected)
-
- # like 2578, partial setting with dtype preservation
- expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
- df = df_orig.copy()
- df.loc[3] = df.loc[2]
- tm.assert_frame_equal(df, expected)
-
- # single dtype frame, overwrite
- expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
- df = df_orig.copy()
- df.ix[:, 'B'] = df.ix[:, 'A']
- tm.assert_frame_equal(df, expected)
-
- # mixed dtype frame, overwrite
- expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
- df = df_orig.copy()
- df['B'] = df['B'].astype(np.float64)
- df.ix[:, 'B'] = df.ix[:, 'A']
- tm.assert_frame_equal(df, expected)
-
- # single dtype frame, partial setting
- expected = df_orig.copy()
- expected['C'] = df['A']
- df = df_orig.copy()
- df.ix[:, 'C'] = df.ix[:, 'A']
- tm.assert_frame_equal(df, expected)
-
- # mixed frame, partial setting
- expected = df_orig.copy()
- expected['C'] = df['A']
- df = df_orig.copy()
- df.ix[:, 'C'] = df.ix[:, 'A']
- tm.assert_frame_equal(df, expected)
-
- # ## panel ##
- p_orig = Panel(np.arange(16).reshape(2, 4, 2),
- items=['Item1', 'Item2'],
- major_axis=pd.date_range('2001/1/12', periods=4),
- minor_axis=['A', 'B'], dtype='float64')
-
- # panel setting via item
- p_orig = Panel(np.arange(16).reshape(2, 4, 2),
- items=['Item1', 'Item2'],
- major_axis=pd.date_range('2001/1/12', periods=4),
- minor_axis=['A', 'B'], dtype='float64')
- expected = p_orig.copy()
- expected['Item3'] = expected['Item1']
- p = p_orig.copy()
- p.loc['Item3'] = p['Item1']
- tm.assert_panel_equal(p, expected)
-
- # panel with aligned series
- expected = p_orig.copy()
- expected = expected.transpose(2, 1, 0)
- expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
- 'Item2': [32, 32, 32, 32]},
- index=p_orig.major_axis)
- expected = expected.transpose(2, 1, 0)
- p = p_orig.copy()
- p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
- tm.assert_panel_equal(p, expected)
-
- # GH 8473
- dates = date_range('1/1/2000', periods=8)
- df_orig = DataFrame(np.random.randn(8, 4), index=dates,
- columns=['A', 'B', 'C', 'D'])
-
- expected = pd.concat([df_orig, DataFrame(
- {'A': 7}, index=[dates[-1] + 1])])
- df = df_orig.copy()
- df.loc[dates[-1] + 1, 'A'] = 7
- tm.assert_frame_equal(df, expected)
- df = df_orig.copy()
- df.at[dates[-1] + 1, 'A'] = 7
- tm.assert_frame_equal(df, expected)
-
- exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
- expected = pd.concat([df_orig, exp_other], axis=1)
-
- df = df_orig.copy()
- df.loc[dates[-1] + 1, 0] = 7
- tm.assert_frame_equal(df, expected)
- df = df_orig.copy()
- df.at[dates[-1] + 1, 0] = 7
- tm.assert_frame_equal(df, expected)
-
- def test_partial_setting_mixed_dtype(self):
-
- # in a mixed dtype environment, try to preserve dtypes
- # by appending
- df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
-
- s = df.loc[1].copy()
- s.name = 2
- expected = df.append(s)
-
- df.loc[2] = df.loc[1]
- tm.assert_frame_equal(df, expected)
-
- # columns will align
- df = DataFrame(columns=['A', 'B'])
- df.loc[0] = Series(1, index=range(4))
- tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
-
- # columns will align
- df = DataFrame(columns=['A', 'B'])
- df.loc[0] = Series(1, index=['B'])
-
- exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
- index=[0], dtype='float64')
- tm.assert_frame_equal(df, exp)
-
- # list-like must conform
- df = DataFrame(columns=['A', 'B'])
-
- def f():
- df.loc[0] = [1, 2, 3]
-
- self.assertRaises(ValueError, f)
-
- # these are coerced to float unavoidably (as its a list-like to begin)
- df = DataFrame(columns=['A', 'B'])
- df.loc[3] = [6, 7]
-
- exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
- dtype='float64')
- tm.assert_frame_equal(df, exp)
-
- def test_series_partial_set(self):
- # partial set with new index
- # Regression from GH4825
- ser = Series([0.1, 0.2], index=[1, 2])
-
- # loc
- expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
- result = ser.loc[[3, 2, 3]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
- result = ser.loc[[3, 2, 3, 'x']]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
- result = ser.loc[[2, 2, 1]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
- result = ser.loc[[2, 2, 'x', 1]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- # raises as nothing in in the index
- self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
-
- expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
- result = ser.loc[[2, 2, 3]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
- result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
- result = Series([0.1, 0.2, 0.3, 0.4],
- index=[1, 2, 3, 4]).loc[[5, 3, 3]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
- result = Series([0.1, 0.2, 0.3, 0.4],
- index=[1, 2, 3, 4]).loc[[5, 4, 4]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
- result = Series([0.1, 0.2, 0.3, 0.4],
- index=[4, 5, 6, 7]).loc[[7, 2, 2]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
- result = Series([0.1, 0.2, 0.3, 0.4],
- index=[1, 2, 3, 4]).loc[[4, 5, 5]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- # iloc
- expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
- result = ser.iloc[[1, 1, 0, 0]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- def test_series_partial_set_with_name(self):
- # GH 11497
-
- idx = Index([1, 2], dtype='int64', name='idx')
- ser = Series([0.1, 0.2], index=idx, name='s')
-
- # loc
- exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
- expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
- result = ser.loc[[3, 2, 3]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
- expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
- name='s')
- result = ser.loc[[3, 2, 3, 'x']]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
- expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
- result = ser.loc[[2, 2, 1]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
- expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
- result = ser.loc[[2, 2, 'x', 1]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- # raises as nothing in in the index
- self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
-
- exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
- expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
- result = ser.loc[[2, 2, 3]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
- expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
- idx = Index([1, 2, 3], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
- expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
- idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
- name='s').loc[[5, 3, 3]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
- expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
- idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
- name='s').loc[[5, 4, 4]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
- expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
- idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
- name='s').loc[[7, 2, 2]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
- expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
- idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
- name='s').loc[[4, 5, 5]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- # iloc
- exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
- expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
- result = ser.iloc[[1, 1, 0, 0]]
- tm.assert_series_equal(result, expected, check_index_type=True)
-
- def test_partial_set_invalid(self):
-
- # GH 4940
- # allow only setting of 'valid' values
-
- orig = tm.makeTimeDataFrame()
- df = orig.copy()
-
- # don't allow not string inserts
- def f():
- df.loc[100.0, :] = df.ix[0]
-
- self.assertRaises(TypeError, f)
-
- def f():
- df.loc[100, :] = df.ix[0]
-
- self.assertRaises(TypeError, f)
-
- def f():
- df.ix[100.0, :] = df.ix[0]
-
- self.assertRaises(TypeError, f)
-
- def f():
- df.ix[100, :] = df.ix[0]
-
- self.assertRaises(ValueError, f)
-
- # allow object conversion here
- df = orig.copy()
- df.loc['a', :] = df.ix[0]
- exp = orig.append(pd.Series(df.ix[0], name='a'))
- tm.assert_frame_equal(df, exp)
- tm.assert_index_equal(df.index,
- pd.Index(orig.index.tolist() + ['a']))
- self.assertEqual(df.index.dtype, 'object')
-
- def test_partial_set_empty_series(self):
-
- # GH5226
-
- # partially set with an empty object series
- s = Series()
- s.loc[1] = 1
- tm.assert_series_equal(s, Series([1], index=[1]))
- s.loc[3] = 3
- tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
-
- s = Series()
- s.loc[1] = 1.
- tm.assert_series_equal(s, Series([1.], index=[1]))
- s.loc[3] = 3.
- tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
-
- s = Series()
- s.loc['foo'] = 1
- tm.assert_series_equal(s, Series([1], index=['foo']))
- s.loc['bar'] = 3
- tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
- s.loc[3] = 4
- tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
-
- def test_partial_set_empty_frame(self):
-
- # partially set with an empty object
- # frame
- df = DataFrame()
-
- def f():
- df.loc[1] = 1
-
- self.assertRaises(ValueError, f)
-
- def f():
- df.loc[1] = Series([1], index=['foo'])
-
- self.assertRaises(ValueError, f)
-
- def f():
- df.loc[:, 1] = 1
-
- self.assertRaises(ValueError, f)
-
- # these work as they don't really change
- # anything but the index
- # GH5632
- expected = DataFrame(columns=['foo'], index=pd.Index(
- [], dtype='int64'))
-
- def f():
- df = DataFrame()
- df['foo'] = Series([], dtype='object')
- return df
-
- tm.assert_frame_equal(f(), expected)
-
- def f():
- df = DataFrame()
- df['foo'] = Series(df.index)
- return df
-
- tm.assert_frame_equal(f(), expected)
-
- def f():
- df = DataFrame()
- df['foo'] = df.index
- return df
-
- tm.assert_frame_equal(f(), expected)
-
- expected = DataFrame(columns=['foo'],
- index=pd.Index([], dtype='int64'))
- expected['foo'] = expected['foo'].astype('float64')
-
- def f():
- df = DataFrame()
- df['foo'] = []
- return df
-
- tm.assert_frame_equal(f(), expected)
-
- def f():
- df = DataFrame()
- df['foo'] = Series(range(len(df)))
- return df
-
- tm.assert_frame_equal(f(), expected)
-
- def f():
- df = DataFrame()
- tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
- df['foo'] = range(len(df))
- return df
-
- expected = DataFrame(columns=['foo'],
- index=pd.Index([], dtype='int64'))
- expected['foo'] = expected['foo'].astype('float64')
- tm.assert_frame_equal(f(), expected)
-
- df = DataFrame()
- tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
- df2 = DataFrame()
- df2[1] = Series([1], index=['foo'])
- df.loc[:, 1] = Series([1], index=['foo'])
- tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
- tm.assert_frame_equal(df, df2)
-
- # no index to start
- expected = DataFrame({0: Series(1, index=range(4))},
- columns=['A', 'B', 0])
-
- df = DataFrame(columns=['A', 'B'])
- df[0] = Series(1, index=range(4))
- df.dtypes
- str(df)
- tm.assert_frame_equal(df, expected)
-
- df = DataFrame(columns=['A', 'B'])
- df.loc[:, 0] = Series(1, index=range(4))
- df.dtypes
- str(df)
- tm.assert_frame_equal(df, expected)
-
- def test_partial_set_empty_frame_row(self):
- # GH5720, GH5744
- # don't create rows when empty
- expected = DataFrame(columns=['A', 'B', 'New'],
- index=pd.Index([], dtype='int64'))
- expected['A'] = expected['A'].astype('int64')
- expected['B'] = expected['B'].astype('float64')
- expected['New'] = expected['New'].astype('float64')
-
- df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
- y = df[df.A > 5]
- y['New'] = np.nan
- tm.assert_frame_equal(y, expected)
- # tm.assert_frame_equal(y,expected)
-
- expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
- expected['d'] = expected['d'].astype('int64')
- df = DataFrame(columns=['a', 'b', 'c c'])
- df['d'] = 3
- tm.assert_frame_equal(df, expected)
- tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
-
- # reindex columns is ok
- df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
- y = df[df.A > 5]
- result = y.reindex(columns=['A', 'B', 'C'])
- expected = DataFrame(columns=['A', 'B', 'C'],
- index=pd.Index([], dtype='int64'))
- expected['A'] = expected['A'].astype('int64')
- expected['B'] = expected['B'].astype('float64')
- expected['C'] = expected['C'].astype('float64')
- tm.assert_frame_equal(result, expected)
-
- def test_partial_set_empty_frame_set_series(self):
- # GH 5756
- # setting with empty Series
- df = DataFrame(Series())
- tm.assert_frame_equal(df, DataFrame({0: Series()}))
-
- df = DataFrame(Series(name='foo'))
- tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
-
- def test_partial_set_empty_frame_empty_copy_assignment(self):
- # GH 5932
- # copy on empty with assignment fails
- df = DataFrame(index=[0])
- df = df.copy()
- df['a'] = 0
- expected = DataFrame(0, index=[0], columns=['a'])
- tm.assert_frame_equal(df, expected)
-
- def test_partial_set_empty_frame_empty_consistencies(self):
- # GH 6171
- # consistency on empty frames
- df = DataFrame(columns=['x', 'y'])
- df['x'] = [1, 2]
- expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
- tm.assert_frame_equal(df, expected, check_dtype=False)
+ def test_index_type_coercion(self):
- df = DataFrame(columns=['x', 'y'])
- df['x'] = ['1', '2']
- expected = DataFrame(
- dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
- tm.assert_frame_equal(df, expected)
+ with catch_warnings(record=True):
- df = DataFrame(columns=['x', 'y'])
- df.loc[0, 'x'] = 1
- expected = DataFrame(dict(x=[1], y=[np.nan]))
- tm.assert_frame_equal(df, expected, check_dtype=False)
-
- def test_cache_updating(self):
- # GH 4939, make sure to update the cache on setitem
-
- df = tm.makeDataFrame()
- df['A'] # cache series
- df.ix["Hello Friend"] = df.ix[0]
- self.assertIn("Hello Friend", df['A'].index)
- self.assertIn("Hello Friend", df['B'].index)
-
- panel = tm.makePanel()
- panel.ix[0] # get first item into cache
- panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
- self.assertIn("A+1", panel.ix[0].columns)
- self.assertIn("A+1", panel.ix[1].columns)
-
- # 5216
- # make sure that we don't try to set a dead cache
- a = np.random.rand(10, 3)
- df = DataFrame(a, columns=['x', 'y', 'z'])
- tuples = [(i, j) for i in range(5) for j in range(2)]
- index = MultiIndex.from_tuples(tuples)
- df.index = index
-
- # setting via chained assignment
- # but actually works, since everything is a view
- df.loc[0]['z'].iloc[0] = 1.
- result = df.loc[(0, 0), 'z']
- self.assertEqual(result, 1)
-
- # correct setting
- df.loc[(0, 0), 'z'] = 2
- result = df.loc[(0, 0), 'z']
- self.assertEqual(result, 2)
-
- # 10264
- df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
- 'a', 'b', 'c', 'd', 'e'], index=range(5))
- df['f'] = 0
- df.f.values[3] = 1
+ # GH 11836
+ # if we have an index type and set it with something that looks
+ # to numpy like the same, but is actually, not
+ # (e.g. setting with a float or string '0')
+ # then we need to coerce to object
- # TODO(wesm): unused?
- # y = df.iloc[np.arange(2, len(df))]
+ # integer indexes
+ for s in [Series(range(5)),
+ Series(range(5), index=range(1, 6))]:
- df.f.values[3] = 2
- expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
- 'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
- expected.at[3, 'f'] = 2
- tm.assert_frame_equal(df, expected)
- expected = Series([0, 0, 0, 2, 0], name='f')
- tm.assert_series_equal(df.f, expected)
-
- def test_set_ix_out_of_bounds_axis_0(self):
- df = pd.DataFrame(
- randn(2, 5), index=["row%s" % i for i in range(2)],
- columns=["col%s" % i for i in range(5)])
- self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
-
- def test_set_ix_out_of_bounds_axis_1(self):
- df = pd.DataFrame(
- randn(5, 2), index=["row%s" % i for i in range(5)],
- columns=["col%s" % i for i in range(2)])
- self.assertRaises(ValueError, df.ix.__setitem__, (0, 2), 100)
-
- def test_iloc_empty_list_indexer_is_ok(self):
- from pandas.util.testing import makeCustomDataframe as mkdf
- df = mkdf(5, 2)
- # vertical empty
- tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
- check_index_type=True, check_column_type=True)
- # horizontal empty
- tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
- check_index_type=True, check_column_type=True)
- # horizontal empty
- tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
- check_index_type=True,
- check_column_type=True)
-
- def test_loc_empty_list_indexer_is_ok(self):
- from pandas.util.testing import makeCustomDataframe as mkdf
- df = mkdf(5, 2)
- # vertical empty
- tm.assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
- check_index_type=True, check_column_type=True)
- # horizontal empty
- tm.assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
- check_index_type=True, check_column_type=True)
- # horizontal empty
- tm.assert_frame_equal(df.loc[[]], df.iloc[:0, :],
- check_index_type=True,
- check_column_type=True)
-
- def test_ix_empty_list_indexer_is_ok(self):
- from pandas.util.testing import makeCustomDataframe as mkdf
- df = mkdf(5, 2)
- # vertical empty
- tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
- check_index_type=True,
- check_column_type=True)
- # horizontal empty
- tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
- check_index_type=True,
- check_column_type=True)
- # horizontal empty
- tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
- check_index_type=True,
- check_column_type=True)
+ self.assertTrue(s.index.is_integer())
- def test_index_type_coercion(self):
+ for indexer in [lambda x: x.ix,
+ lambda x: x.loc,
+ lambda x: x]:
+ s2 = s.copy()
+ indexer(s2)[0.1] = 0
+ self.assertTrue(s2.index.is_floating())
+ self.assertTrue(indexer(s2)[0.1] == 0)
- # GH 11836
- # if we have an index type and set it with something that looks
- # to numpy like the same, but is actually, not
- # (e.g. setting with a float or string '0')
- # then we need to coerce to object
+ s2 = s.copy()
+ indexer(s2)[0.0] = 0
+ exp = s.index
+ if 0 not in s:
+ exp = Index(s.index.tolist() + [0])
+ tm.assert_index_equal(s2.index, exp)
- # integer indexes
- for s in [Series(range(5)),
- Series(range(5), index=range(1, 6))]:
+ s2 = s.copy()
+ indexer(s2)['0'] = 0
+ self.assertTrue(s2.index.is_object())
- self.assertTrue(s.index.is_integer())
+ for s in [Series(range(5), index=np.arange(5.))]:
- for indexer in [lambda x: x.ix,
- lambda x: x.loc,
- lambda x: x]:
- s2 = s.copy()
- indexer(s2)[0.1] = 0
- self.assertTrue(s2.index.is_floating())
- self.assertTrue(indexer(s2)[0.1] == 0)
+ self.assertTrue(s.index.is_floating())
- s2 = s.copy()
- indexer(s2)[0.0] = 0
- exp = s.index
- if 0 not in s:
- exp = Index(s.index.tolist() + [0])
- tm.assert_index_equal(s2.index, exp)
+ for idxr in [lambda x: x.ix,
+ lambda x: x.loc,
+ lambda x: x]:
- s2 = s.copy()
- indexer(s2)['0'] = 0
- self.assertTrue(s2.index.is_object())
+ s2 = s.copy()
+ idxr(s2)[0.1] = 0
+ self.assertTrue(s2.index.is_floating())
+ self.assertTrue(idxr(s2)[0.1] == 0)
- for s in [Series(range(5), index=np.arange(5.))]:
+ s2 = s.copy()
+ idxr(s2)[0.0] = 0
+ tm.assert_index_equal(s2.index, s.index)
- self.assertTrue(s.index.is_floating())
+ s2 = s.copy()
+ idxr(s2)['0'] = 0
+ self.assertTrue(s2.index.is_object())
- for idxr in [lambda x: x.ix,
- lambda x: x.loc,
- lambda x: x]:
- s2 = s.copy()
- idxr(s2)[0.1] = 0
- self.assertTrue(s2.index.is_floating())
- self.assertTrue(idxr(s2)[0.1] == 0)
+class TestMisc(Base, tm.TestCase):
- s2 = s.copy()
- idxr(s2)[0.0] = 0
- tm.assert_index_equal(s2.index, s.index)
+ def test_indexer_caching(self):
+ # GH5727
+ # make sure that indexers are in the _internal_names_set
+ n = 1000001
+ arrays = [lrange(n), lrange(n)]
+ index = MultiIndex.from_tuples(lzip(*arrays))
+ s = Series(np.zeros(n), index=index)
+ str(s)
- s2 = s.copy()
- idxr(s2)['0'] = 0
- self.assertTrue(s2.index.is_object())
+ # setitem
+ expected = Series(np.ones(n), index=index)
+ s = Series(np.zeros(n), index=index)
+ s[s == 0] = 1
+ tm.assert_series_equal(s, expected)
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
@@ -3143,13 +621,6 @@ def test_float_index_to_mixed(self):
'a': [10] * 10}),
df)
- def test_duplicate_ix_returns_series(self):
- df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
- columns=list('abc'))
- r = df.ix[0.2, 'a']
- e = df.loc[0.2, 'a']
- tm.assert_series_equal(r, e)
-
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
@@ -3185,15 +656,18 @@ def run_tests(df, rhs, right):
tm.assert_frame_equal(left, right)
left = df.copy()
- left.ix[s, l] = rhs
+ with catch_warnings(record=True):
+ left.ix[s, l] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
- left.ix[i, j] = rhs
+ with catch_warnings(record=True):
+ left.ix[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
- left.ix[r, c] = rhs
+ with catch_warnings(record=True):
+ left.ix[r, c] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
@@ -3226,7 +700,7 @@ def assert_slices_equivalent(l_slc, i_slc):
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
- tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
+ tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
@@ -3243,8 +717,9 @@ def test_slice_with_zero_step_raises(self):
lambda: s[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
- self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
- lambda: s.ix[::0])
+ with catch_warnings(record=True):
+ self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
+ lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = pd.DataFrame({'x': [1, 2, 6],
@@ -3259,11 +734,13 @@ def test_indexing_assignment_dict_already_exists(self):
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})
- df2 = df.ix[[], :]
+ with catch_warnings(record=True):
+ df2 = df.ix[[], :]
self.assertEqual(df2.loc[:, 'a'].dtype, np.int64)
tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])
- tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
+ with catch_warnings(record=True):
+ tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py
new file mode 100644
index 0000000000000..e68e8015a2f39
--- /dev/null
+++ b/pandas/tests/indexing/test_ix.py
@@ -0,0 +1,333 @@
+""" test indexing with ix """
+
+from warnings import catch_warnings
+
+import numpy as np
+import pandas as pd
+
+from pandas.types.common import is_scalar
+from pandas.compat import lrange
+from pandas import Series, DataFrame, option_context, MultiIndex
+from pandas.util import testing as tm
+from pandas.core.common import PerformanceWarning
+
+
+class TestIX(tm.TestCase):
+
+ def test_ix_deprecation(self):
+ # GH 15114
+
+ df = DataFrame({'A': [1, 2, 3]})
+ with tm.assert_produces_warning(DeprecationWarning,
+ check_stacklevel=False):
+ df.ix[1, 'A']
+
+ def test_ix_loc_setitem_consistency(self):
+
+ # GH 5771
+ # loc with slice and series
+ s = Series(0, index=[4, 5, 6])
+ s.loc[4:5] += 1
+ expected = Series([1, 1, 0], index=[4, 5, 6])
+ tm.assert_series_equal(s, expected)
+
+ # GH 5928
+ # chained indexing assignment
+ df = DataFrame({'a': [0, 1, 2]})
+ expected = df.copy()
+ with catch_warnings(record=True):
+ expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
+
+ with catch_warnings(record=True):
+ df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
+ with catch_warnings(record=True):
+ df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
+ 'float64') + 0.5
+ expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
+ tm.assert_frame_equal(df, expected)
+
+ # GH 8607
+ # ix setitem consistency
+ df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
+ 'delta': [1174, 904, 161],
+ 'elapsed': [7673, 9277, 1470]})
+ expected = DataFrame({'timestamp': pd.to_datetime(
+ [1413840976, 1413842580, 1413760580], unit='s'),
+ 'delta': [1174, 904, 161],
+ 'elapsed': [7673, 9277, 1470]})
+
+ df2 = df.copy()
+ df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
+ tm.assert_frame_equal(df2, expected)
+
+ df2 = df.copy()
+ df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
+ tm.assert_frame_equal(df2, expected)
+
+ df2 = df.copy()
+ with catch_warnings(record=True):
+ df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
+ tm.assert_frame_equal(df2, expected)
+
+ def test_ix_loc_consistency(self):
+
+ # GH 8613
+ # some edge cases where ix/loc should return the same
+ # this is not an exhaustive case
+
+ def compare(result, expected):
+ if is_scalar(expected):
+ self.assertEqual(result, expected)
+ else:
+ self.assertTrue(expected.equals(result))
+
+ # failure cases for .loc, but these work for .ix
+ df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
+ for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
+ tuple([slice(0, 2), df.columns[0:2]])]:
+
+ for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
+ tm.makeDateIndex, tm.makePeriodIndex,
+ tm.makeTimedeltaIndex]:
+ df.index = index(len(df.index))
+ with catch_warnings(record=True):
+ df.ix[key]
+
+ self.assertRaises(TypeError, lambda: df.loc[key])
+
+ df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
+ index=pd.date_range('2012-01-01', periods=5))
+
+ for key in ['2012-01-03',
+ '2012-01-31',
+ slice('2012-01-03', '2012-01-03'),
+ slice('2012-01-03', '2012-01-04'),
+ slice('2012-01-03', '2012-01-06', 2),
+ slice('2012-01-03', '2012-01-31'),
+ tuple([[True, True, True, False, True]]), ]:
+
+ # getitem
+
+ # if the expected raises, then compare the exceptions
+ try:
+ with catch_warnings(record=True):
+ expected = df.ix[key]
+ except KeyError:
+ self.assertRaises(KeyError, lambda: df.loc[key])
+ continue
+
+ result = df.loc[key]
+ compare(result, expected)
+
+ # setitem
+ df1 = df.copy()
+ df2 = df.copy()
+
+ with catch_warnings(record=True):
+ df1.ix[key] = 10
+ df2.loc[key] = 10
+ compare(df2, df1)
+
+ # edge cases
+ s = Series([1, 2, 3, 4], index=list('abde'))
+
+ result1 = s['a':'c']
+ with catch_warnings(record=True):
+ result2 = s.ix['a':'c']
+ result3 = s.loc['a':'c']
+ tm.assert_series_equal(result1, result2)
+ tm.assert_series_equal(result1, result3)
+
+ # now work rather than raising KeyError
+ s = Series(range(5), [-2, -1, 1, 2, 3])
+
+ with catch_warnings(record=True):
+ result1 = s.ix[-10:3]
+ result2 = s.loc[-10:3]
+ tm.assert_series_equal(result1, result2)
+
+ with catch_warnings(record=True):
+ result1 = s.ix[0:3]
+ result2 = s.loc[0:3]
+ tm.assert_series_equal(result1, result2)
+
+ def test_ix_weird_slicing(self):
+ # http://stackoverflow.com/q/17056560/1240268
+ df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
+ 'two': [1, 2, 3, 4, 5]})
+ df.loc[df['one'] > 1, 'two'] = -df['two']
+
+ expected = DataFrame({'one': {0: 1.0,
+ 1: 2.0,
+ 2: 3.0,
+ 3: np.nan,
+ 4: np.nan},
+ 'two': {0: 1,
+ 1: -2,
+ 2: -3,
+ 3: 4,
+ 4: 5}})
+ tm.assert_frame_equal(df, expected)
+
+ def test_ix_general(self):
+
+ # ix general issues
+
+ # GH 2817
+ data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
+ 'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
+ 'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
+ df = DataFrame(data).set_index(keys=['col', 'year'])
+ key = 4.0, 2012
+
+ # emits a PerformanceWarning, ok
+ with self.assert_produces_warning(PerformanceWarning):
+ tm.assert_frame_equal(df.loc[key], df.iloc[2:])
+
+ # this is ok
+ df.sort_index(inplace=True)
+ res = df.loc[key]
+
+ # col has float dtype, result should be Float64Index
+ index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
+ names=['col', 'year'])
+ expected = DataFrame({'amount': [222, 333, 444]}, index=index)
+ tm.assert_frame_equal(res, expected)
+
+ def test_ix_assign_column_mixed(self):
+ # GH #1142
+ df = DataFrame(tm.getSeriesData())
+ df['foo'] = 'bar'
+
+ orig = df.loc[:, 'B'].copy()
+ df.loc[:, 'B'] = df.loc[:, 'B'] + 1
+ tm.assert_series_equal(df.B, orig + 1)
+
+ # GH 3668, mixed frame with series value
+ df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
+ expected = df.copy()
+
+ for i in range(5):
+ indexer = i * 2
+ v = 1000 + i * 200
+ expected.loc[indexer, 'y'] = v
+ self.assertEqual(expected.loc[indexer, 'y'], v)
+
+ df.loc[df.x % 2 == 0, 'y'] = df.loc[df.x % 2 == 0, 'y'] * 100
+ tm.assert_frame_equal(df, expected)
+
+ # GH 4508, making sure consistency of assignments
+ df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
+ df.loc[[0, 2, ], 'b'] = [100, -100]
+ expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
+ tm.assert_frame_equal(df, expected)
+
+ df = pd.DataFrame({'a': lrange(4)})
+ df['b'] = np.nan
+ df.loc[[1, 3], 'b'] = [100, -100]
+ expected = DataFrame({'a': [0, 1, 2, 3],
+ 'b': [np.nan, 100, np.nan, -100]})
+ tm.assert_frame_equal(df, expected)
+
+ # ok, but chained assignments are dangerous
+ # if we turn off chained assignement it will work
+ with option_context('chained_assignment', None):
+ df = pd.DataFrame({'a': lrange(4)})
+ df['b'] = np.nan
+ df['b'].loc[[1, 3]] = [100, -100]
+ tm.assert_frame_equal(df, expected)
+
+ def test_ix_get_set_consistency(self):
+
+ # GH 4544
+ # ix/loc get/set not consistent when
+ # a mixed int/string index
+ df = DataFrame(np.arange(16).reshape((4, 4)),
+ columns=['a', 'b', 8, 'c'],
+ index=['e', 7, 'f', 'g'])
+
+ with catch_warnings(record=True):
+ self.assertEqual(df.ix['e', 8], 2)
+ self.assertEqual(df.loc['e', 8], 2)
+
+ with catch_warnings(record=True):
+ df.ix['e', 8] = 42
+ self.assertEqual(df.ix['e', 8], 42)
+ self.assertEqual(df.loc['e', 8], 42)
+
+ df.loc['e', 8] = 45
+ with catch_warnings(record=True):
+ self.assertEqual(df.ix['e', 8], 45)
+ self.assertEqual(df.loc['e', 8], 45)
+
+ def test_ix_slicing_strings(self):
+ # GH3836
+ data = {'Classification':
+ ['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
+ 'Random': [1, 2, 3, 4, 5],
+ 'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
+ df = DataFrame(data)
+ x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
+ ])]
+ with catch_warnings(record=True):
+ df.ix[x.index, 'X'] = df['Classification']
+
+ expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
+ 1: 'bbb',
+ 2: 'SA EQUITY',
+ 3: 'SA SSF',
+ 4: 'aaa'},
+ 'Random': {0: 1,
+ 1: 2,
+ 2: 3,
+ 3: 4,
+ 4: 5},
+ 'X': {0: 'correct',
+ 1: 'bbb',
+ 2: 'correct',
+ 3: 'correct',
+ 4: 'aaa'}}) # bug was 4: 'bbb'
+
+ tm.assert_frame_equal(df, expected)
+
+ def test_ix_setitem_out_of_bounds_axis_0(self):
+ df = pd.DataFrame(
+ np.random.randn(2, 5), index=["row%s" % i for i in range(2)],
+ columns=["col%s" % i for i in range(5)])
+ with catch_warnings(record=True):
+ self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
+
+ def test_ix_setitem_out_of_bounds_axis_1(self):
+ df = pd.DataFrame(
+ np.random.randn(5, 2), index=["row%s" % i for i in range(5)],
+ columns=["col%s" % i for i in range(2)])
+ with catch_warnings(record=True):
+ self.assertRaises(ValueError, df.ix.__setitem__, (0, 2), 100)
+
+ def test_ix_empty_list_indexer_is_ok(self):
+ with catch_warnings(record=True):
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ df = mkdf(5, 2)
+ # vertical empty
+ tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
+ check_index_type=True,
+ check_column_type=True)
+ # horizontal empty
+ tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
+ check_index_type=True,
+ check_column_type=True)
+ # horizontal empty
+ tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
+ check_index_type=True,
+ check_column_type=True)
+
+ def test_ix_duplicate_returns_series(self):
+ df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
+ columns=list('abc'))
+ with catch_warnings(record=True):
+ r = df.ix[0.2, 'a']
+ e = df.loc[0.2, 'a']
+ tm.assert_series_equal(r, e)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
new file mode 100644
index 0000000000000..af9d3ffdf6671
--- /dev/null
+++ b/pandas/tests/indexing/test_loc.py
@@ -0,0 +1,630 @@
+""" test label based indexing with loc """
+
+import itertools
+from warnings import catch_warnings
+import numpy as np
+
+import pandas as pd
+from pandas.compat import lrange, StringIO
+from pandas import (Series, DataFrame, Timestamp,
+ date_range, MultiIndex)
+from pandas.util import testing as tm
+from pandas.tests.indexing.common import Base
+
+
+class TestLoc(Base, tm.TestCase):
+
+ def test_loc_getitem_dups(self):
+ # GH 5678
+ # repeated gettitems on a dup index returing a ndarray
+ df = DataFrame(
+ np.random.random_sample((20, 5)),
+ index=['ABCDE' [x % 5] for x in range(20)])
+ expected = df.loc['A', 0]
+ result = df.loc[:, 0].loc['A']
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_getitem_dups2(self):
+
+ # GH4726
+ # dup indexing with iloc/loc
+ df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
+ columns=['a', 'a', 'a', 'a', 'a'], index=[1])
+ expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
+ index=['a', 'a', 'a', 'a', 'a'], name=1)
+
+ result = df.iloc[0]
+ tm.assert_series_equal(result, expected)
+
+ result = df.loc[1]
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_setitem_dups(self):
+
+ # GH 6541
+ df_orig = DataFrame(
+ {'me': list('rttti'),
+ 'foo': list('aaade'),
+ 'bar': np.arange(5, dtype='float64') * 1.34 + 2,
+ 'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
+
+ indexer = tuple(['r', ['bar', 'bar2']])
+ df = df_orig.copy()
+ df.loc[indexer] *= 2.0
+ tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
+
+ indexer = tuple(['r', 'bar'])
+ df = df_orig.copy()
+ df.loc[indexer] *= 2.0
+ self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
+
+ indexer = tuple(['t', ['bar', 'bar2']])
+ df = df_orig.copy()
+ df.loc[indexer] *= 2.0
+ tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
+
+ def test_loc_setitem_slice(self):
+ # GH10503
+
+ # assigning the same type should not change the type
+ df1 = DataFrame({'a': [0, 1, 1],
+ 'b': Series([100, 200, 300], dtype='uint32')})
+ ix = df1['a'] == 1
+ newb1 = df1.loc[ix, 'b'] + 1
+ df1.loc[ix, 'b'] = newb1
+ expected = DataFrame({'a': [0, 1, 1],
+ 'b': Series([100, 201, 301], dtype='uint32')})
+ tm.assert_frame_equal(df1, expected)
+
+ # assigning a new type should get the inferred type
+ df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
+ dtype='uint64')
+ ix = df1['a'] == 1
+ newb2 = df2.loc[ix, 'b']
+ df1.loc[ix, 'b'] = newb2
+ expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
+ dtype='uint64')
+ tm.assert_frame_equal(df2, expected)
+
+ def test_loc_getitem_int(self):
+
+ # int label
+ self.check_result('int label', 'loc', 2, 'ix', 2,
+ typs=['ints', 'uints'], axes=0)
+ self.check_result('int label', 'loc', 3, 'ix', 3,
+ typs=['ints', 'uints'], axes=1)
+ self.check_result('int label', 'loc', 4, 'ix', 4,
+ typs=['ints', 'uints'], axes=2)
+ self.check_result('int label', 'loc', 2, 'ix', 2,
+ typs=['label'], fails=KeyError)
+
+ def test_loc_getitem_label(self):
+
+ # label
+ self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
+ axes=0)
+ self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
+ axes=0)
+ self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
+ self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
+ typs=['ts'], axes=0)
+ self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
+ fails=KeyError)
+
+ def test_loc_getitem_label_out_of_range(self):
+
+ # out of range label
+ self.check_result('label range', 'loc', 'f', 'ix', 'f',
+ typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
+ fails=KeyError)
+ self.check_result('label range', 'loc', 'f', 'ix', 'f',
+ typs=['floats'], fails=TypeError)
+ self.check_result('label range', 'loc', 20, 'ix', 20,
+ typs=['ints', 'uints', 'mixed'], fails=KeyError)
+ self.check_result('label range', 'loc', 20, 'ix', 20,
+ typs=['labels'], fails=TypeError)
+ self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
+ axes=0, fails=TypeError)
+ self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
+ axes=0, fails=TypeError)
+
+ def test_loc_getitem_label_list(self):
+
+ # list of labels
+ self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
+ typs=['ints', 'uints'], axes=0)
+ self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
+ typs=['ints', 'uints'], axes=1)
+ self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
+ typs=['ints', 'uints'], axes=2)
+ self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
+ ['a', 'b', 'd'], typs=['labels'], axes=0)
+ self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
+ ['A', 'B', 'C'], typs=['labels'], axes=1)
+ self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
+ ['Z', 'Y', 'W'], typs=['labels'], axes=2)
+ self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
+ [2, 8, 'null'], typs=['mixed'], axes=0)
+ self.check_result('list lbl', 'loc',
+ [Timestamp('20130102'), Timestamp('20130103')], 'ix',
+ [Timestamp('20130102'), Timestamp('20130103')],
+ typs=['ts'], axes=0)
+
+ self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
+ typs=['empty'], fails=KeyError)
+ self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
+ typs=['ints', 'uints'], axes=0, fails=KeyError)
+ self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
+ typs=['ints', 'uints'], axes=1, fails=KeyError)
+ self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
+ typs=['ints', 'uints'], axes=2, fails=KeyError)
+
+ def test_loc_getitem_label_list_fails(self):
+ # fails
+ self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
+ typs=['ints', 'uints'], axes=1, fails=KeyError)
+ self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
+ typs=['ints', 'uints'], axes=2, fails=KeyError)
+
+ def test_loc_getitem_label_array_like(self):
+ # array like
+ self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
+ 'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
+ self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
+ 'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
+ self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
+ 'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
+
+ def test_loc_getitem_bool(self):
+ # boolean indexers
+ b = [True, False, True, False]
+ self.check_result('bool', 'loc', b, 'ix', b,
+ typs=['ints', 'uints', 'labels',
+ 'mixed', 'ts', 'floats'])
+ self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
+ fails=KeyError)
+
+ def test_loc_getitem_int_slice(self):
+
+ # ok
+ self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
+ typs=['ints', 'uints'], axes=0)
+ self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
+ typs=['ints', 'uints'], axes=1)
+ self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
+ typs=['ints', 'uints'], axes=2)
+
+ # GH 3053
+ # loc should treat integer slices like label slices
+
+ index = MultiIndex.from_tuples([t for t in itertools.product(
+ [6, 7, 8], ['a', 'b'])])
+ df = DataFrame(np.random.randn(6, 6), index, index)
+ result = df.loc[6:8, :]
+ expected = df
+ tm.assert_frame_equal(result, expected)
+
+ index = MultiIndex.from_tuples([t
+ for t in itertools.product(
+ [10, 20, 30], ['a', 'b'])])
+ df = DataFrame(np.random.randn(6, 6), index, index)
+ result = df.loc[20:30, :]
+ expected = df.iloc[2:]
+ tm.assert_frame_equal(result, expected)
+
+ # doc examples
+ result = df.loc[10, :]
+ expected = df.iloc[0:2]
+ expected.index = ['a', 'b']
+ tm.assert_frame_equal(result, expected)
+
+ result = df.loc[:, 10]
+ # expected = df.ix[:,10] (this fails)
+ expected = df[10]
+ tm.assert_frame_equal(result, expected)
+
+ def test_loc_to_fail(self):
+
+ # GH3449
+ df = DataFrame(np.random.random((3, 3)),
+ index=['a', 'b', 'c'],
+ columns=['e', 'f', 'g'])
+
+ # raise a KeyError?
+ self.assertRaises(KeyError, df.loc.__getitem__,
+ tuple([[1, 2], [1, 2]]))
+
+ # GH 7496
+ # loc should not fallback
+
+ s = Series()
+ s.loc[1] = 1
+ s.loc['a'] = 2
+
+ self.assertRaises(KeyError, lambda: s.loc[-1])
+ self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
+
+ self.assertRaises(KeyError, lambda: s.loc[['4']])
+
+ s.loc[-1] = 3
+ result = s.loc[[-1, -2]]
+ expected = Series([3, np.nan], index=[-1, -2])
+ tm.assert_series_equal(result, expected)
+
+ s['a'] = 2
+ self.assertRaises(KeyError, lambda: s.loc[[-2]])
+
+ del s['a']
+
+ def f():
+ s.loc[[-2]] = 0
+
+ self.assertRaises(KeyError, f)
+
+ # inconsistency between .loc[values] and .loc[values,:]
+ # GH 7999
+ df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
+
+ def f():
+ df.loc[[3], :]
+
+ self.assertRaises(KeyError, f)
+
+ def f():
+ df.loc[[3]]
+
+ self.assertRaises(KeyError, f)
+
+ def test_loc_getitem_label_slice(self):
+
+ # label slices (with ints)
+ self.check_result('lab slice', 'loc', slice(1, 3),
+ 'ix', slice(1, 3),
+ typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
+ fails=TypeError)
+
+ # real label slices
+ self.check_result('lab slice', 'loc', slice('a', 'c'),
+ 'ix', slice('a', 'c'), typs=['labels'], axes=0)
+ self.check_result('lab slice', 'loc', slice('A', 'C'),
+ 'ix', slice('A', 'C'), typs=['labels'], axes=1)
+ self.check_result('lab slice', 'loc', slice('W', 'Z'),
+ 'ix', slice('W', 'Z'), typs=['labels'], axes=2)
+
+ self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
+ 'ix', slice('20130102', '20130104'),
+ typs=['ts'], axes=0)
+ self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
+ 'ix', slice('20130102', '20130104'),
+ typs=['ts'], axes=1, fails=TypeError)
+ self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
+ 'ix', slice('20130102', '20130104'),
+ typs=['ts'], axes=2, fails=TypeError)
+
+ # GH 14316
+ self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
+ 'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
+
+ self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
+ typs=['mixed'], axes=0, fails=TypeError)
+ self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
+ typs=['mixed'], axes=1, fails=KeyError)
+ self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
+ typs=['mixed'], axes=2, fails=KeyError)
+
+ self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
+ 2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
+
+ def test_loc_general(self):
+
+ df = DataFrame(
+ np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
+ index=['A', 'B', 'C', 'D'])
+
+ # want this to work
+ result = df.loc[:, "A":"B"].iloc[0:2, :]
+ self.assertTrue((result.columns == ['A', 'B']).all())
+ self.assertTrue((result.index == ['A', 'B']).all())
+
+ # mixed type
+ result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
+ expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
+ tm.assert_series_equal(result, expected)
+ self.assertEqual(result.dtype, object)
+
+ def test_loc_setitem_consistency(self):
+ # GH 6149
+ # coerce similary for setitem and loc when rows have a null-slice
+ expected = DataFrame({'date': Series(0, index=range(5),
+ dtype=np.int64),
+ 'val': Series(range(5), dtype=np.int64)})
+
+ df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
+ 'val': Series(
+ range(5), dtype=np.int64)})
+ df.loc[:, 'date'] = 0
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
+ 'val': Series(range(5), dtype=np.int64)})
+ df.loc[:, 'date'] = np.array(0, dtype=np.int64)
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
+ 'val': Series(range(5), dtype=np.int64)})
+ df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
+ tm.assert_frame_equal(df, expected)
+
+ expected = DataFrame({'date': Series('foo', index=range(5)),
+ 'val': Series(range(5), dtype=np.int64)})
+ df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
+ 'val': Series(range(5), dtype=np.int64)})
+ df.loc[:, 'date'] = 'foo'
+ tm.assert_frame_equal(df, expected)
+
+ expected = DataFrame({'date': Series(1.0, index=range(5)),
+ 'val': Series(range(5), dtype=np.int64)})
+ df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
+ 'val': Series(range(5), dtype=np.int64)})
+ df.loc[:, 'date'] = 1.0
+ tm.assert_frame_equal(df, expected)
+
+ def test_loc_setitem_consistency_empty(self):
+ # empty (essentially noops)
+ expected = DataFrame(columns=['x', 'y'])
+ expected['x'] = expected['x'].astype(np.int64)
+ df = DataFrame(columns=['x', 'y'])
+ df.loc[:, 'x'] = 1
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame(columns=['x', 'y'])
+ df['x'] = 1
+ tm.assert_frame_equal(df, expected)
+
+ def test_loc_setitem_consistency_slice_column_len(self):
+ # .loc[:,column] setting with slice == len of the column
+ # GH10408
+ data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
+Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
+Region,Site,RespondentID,,,,,
+Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
+Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
+Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
+Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
+
+ df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
+ df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
+ 'Respondent', 'StartDate')])
+ df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
+ 'Respondent', 'EndDate')])
+ df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
+ 'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
+
+ df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
+ 'Respondent', 'Duration')].astype('timedelta64[s]')
+ expected = Series([1380, 720, 840, 2160.], index=df.index,
+ name=('Respondent', 'Duration'))
+ tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
+
+ def test_loc_setitem_frame(self):
+ df = self.frame_labels
+
+ result = df.iloc[0, 0]
+
+ df.loc['a', 'A'] = 1
+ result = df.loc['a', 'A']
+ self.assertEqual(result, 1)
+
+ result = df.iloc[0, 0]
+ self.assertEqual(result, 1)
+
+ df.loc[:, 'B':'D'] = 0
+ expected = df.loc[:, 'B':'D']
+ result = df.iloc[:, 1:]
+ tm.assert_frame_equal(result, expected)
+
+ # GH 6254
+ # setting issue
+ df = DataFrame(index=[3, 5, 4], columns=['A'])
+ df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
+ expected = DataFrame(dict(A=Series(
+ [1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
+ tm.assert_frame_equal(df, expected)
+
+ # GH 6252
+ # setting with an empty frame
+ keys1 = ['@' + str(i) for i in range(5)]
+ val1 = np.arange(5, dtype='int64')
+
+ keys2 = ['@' + str(i) for i in range(4)]
+ val2 = np.arange(4, dtype='int64')
+
+ index = list(set(keys1).union(keys2))
+ df = DataFrame(index=index)
+ df['A'] = np.nan
+ df.loc[keys1, 'A'] = val1
+
+ df['B'] = np.nan
+ df.loc[keys2, 'B'] = val2
+
+ expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
+ val2, index=keys2))).reindex(index=index)
+ tm.assert_frame_equal(df, expected)
+
+ # GH 8669
+ # invalid coercion of nan -> int
+ df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
+ df.loc[df.B > df.A, 'B'] = df.A
+ expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
+ tm.assert_frame_equal(df, expected)
+
+ # GH 6546
+ # setting with mixed labels
+ df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
+
+ result = df.loc[0, [1, 2]]
+ expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
+ tm.assert_series_equal(result, expected)
+
+ expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
+ df.loc[0, [1, 2]] = [5, 6]
+ tm.assert_frame_equal(df, expected)
+
+ def test_loc_setitem_frame_multiples(self):
+ # multiple setting
+ df = DataFrame({'A': ['foo', 'bar', 'baz'],
+ 'B': Series(
+ range(3), dtype=np.int64)})
+ rhs = df.loc[1:2]
+ rhs.index = df.index[0:2]
+ df.loc[0:1] = rhs
+ expected = DataFrame({'A': ['bar', 'baz', 'baz'],
+ 'B': Series(
+ [1, 2, 2], dtype=np.int64)})
+ tm.assert_frame_equal(df, expected)
+
+ # multiple setting with frame on rhs (with M8)
+ df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
+ 'val': Series(
+ range(5), dtype=np.int64)})
+ expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
+ '20000102'), Timestamp('20000101'), Timestamp('20000102'),
+ Timestamp('20000103')],
+ 'val': Series(
+ [0, 1, 0, 1, 2], dtype=np.int64)})
+ rhs = df.loc[0:2]
+ rhs.index = df.index[2:5]
+ df.loc[2:4] = rhs
+ tm.assert_frame_equal(df, expected)
+
+ def test_loc_coerceion(self):
+
+ # 12411
+ df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
+ pd.NaT]})
+ expected = df.dtypes
+
+ result = df.iloc[[0]]
+ tm.assert_series_equal(result.dtypes, expected)
+
+ result = df.iloc[[1]]
+ tm.assert_series_equal(result.dtypes, expected)
+
+ # 12045
+ import datetime
+ df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
+ datetime.datetime(1012, 1, 2)]})
+ expected = df.dtypes
+
+ result = df.iloc[[0]]
+ tm.assert_series_equal(result.dtypes, expected)
+
+ result = df.iloc[[1]]
+ tm.assert_series_equal(result.dtypes, expected)
+
+ # 11594
+ df = DataFrame({'text': ['some words'] + [None] * 9})
+ expected = df.dtypes
+
+ result = df.iloc[0:2]
+ tm.assert_series_equal(result.dtypes, expected)
+
+ result = df.iloc[3:]
+ tm.assert_series_equal(result.dtypes, expected)
+
+ def test_loc_non_unique(self):
+ # GH3659
+ # non-unique indexer with loc slice
+ # https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
+
+ # these are going to raise becuase the we are non monotonic
+ df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
+ 'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
+ self.assertRaises(KeyError, df.loc.__getitem__,
+ tuple([slice(1, None)]))
+ self.assertRaises(KeyError, df.loc.__getitem__,
+ tuple([slice(0, None)]))
+ self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
+
+ # monotonic are ok
+ df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
+ 'B': [3, 4, 5, 6, 7, 8]},
+ index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
+ result = df.loc[1:]
+ expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
+ index=[1, 1, 2, 3])
+ tm.assert_frame_equal(result, expected)
+
+ result = df.loc[0:]
+ tm.assert_frame_equal(result, df)
+
+ result = df.loc[1:2]
+ expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
+ index=[1, 1, 2])
+ tm.assert_frame_equal(result, expected)
+
+ def test_loc_non_unique_memory_error(self):
+
+ # GH 4280
+ # non_unique index with a large selection triggers a memory error
+
+ columns = list('ABCDEFG')
+
+ def gen_test(l, l2):
+ return pd.concat([
+ DataFrame(np.random.randn(l, len(columns)),
+ index=lrange(l), columns=columns),
+ DataFrame(np.ones((l2, len(columns))),
+ index=[0] * l2, columns=columns)])
+
+ def gen_expected(df, mask):
+ l = len(mask)
+ return pd.concat([df.take([0], convert=False),
+ DataFrame(np.ones((l, len(columns))),
+ index=[0] * l,
+ columns=columns),
+ df.take(mask[1:], convert=False)])
+
+ df = gen_test(900, 100)
+ self.assertFalse(df.index.is_unique)
+
+ mask = np.arange(100)
+ result = df.loc[mask]
+ expected = gen_expected(df, mask)
+ tm.assert_frame_equal(result, expected)
+
+ df = gen_test(900000, 100000)
+ self.assertFalse(df.index.is_unique)
+
+ mask = np.arange(100000)
+ result = df.loc[mask]
+ expected = gen_expected(df, mask)
+ tm.assert_frame_equal(result, expected)
+
+ def test_loc_name(self):
+ # GH 3880
+ df = DataFrame([[1, 1], [1, 1]])
+ df.index.name = 'index_name'
+ result = df.iloc[[0, 1]].index.name
+ self.assertEqual(result, 'index_name')
+
+ with catch_warnings(record=True):
+ result = df.ix[[0, 1]].index.name
+ self.assertEqual(result, 'index_name')
+
+ result = df.loc[[0, 1]].index.name
+ self.assertEqual(result, 'index_name')
+
+ def test_loc_empty_list_indexer_is_ok(self):
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ df = mkdf(5, 2)
+ # vertical empty
+ tm.assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
+ check_index_type=True, check_column_type=True)
+ # horizontal empty
+ tm.assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
+ check_index_type=True, check_column_type=True)
+ # horizontal empty
+ tm.assert_frame_equal(df.loc[[]], df.iloc[:0, :],
+ check_index_type=True,
+ check_column_type=True)
diff --git a/pandas/tests/indexing/test_multiindex.py b/pandas/tests/indexing/test_multiindex.py
index b40f0b8cd9976..ed943202872a7 100644
--- a/pandas/tests/indexing/test_multiindex.py
+++ b/pandas/tests/indexing/test_multiindex.py
@@ -46,101 +46,103 @@ def test_iloc_getitem_multiindex2(self):
tm.assert_frame_equal(rs, xp)
def test_setitem_multiindex(self):
- for index_fn in ('ix', 'loc'):
-
- def check(target, indexers, value, compare_fn, expected=None):
- fn = getattr(target, index_fn)
- fn.__setitem__(indexers, value)
- result = fn.__getitem__(indexers)
- if expected is None:
- expected = value
- compare_fn(result, expected)
- # GH7190
- index = pd.MultiIndex.from_product([np.arange(0, 100),
- np.arange(0, 80)],
- names=['time', 'firm'])
- t, n = 0, 2
- df = DataFrame(np.nan, columns=['A', 'w', 'l', 'a', 'x',
- 'X', 'd', 'profit'],
- index=index)
- check(target=df, indexers=((t, n), 'X'), value=0,
- compare_fn=self.assertEqual)
-
- df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
- 'X', 'd', 'profit'],
- index=index)
- check(target=df, indexers=((t, n), 'X'), value=1,
- compare_fn=self.assertEqual)
-
- df = DataFrame(columns=['A', 'w', 'l', 'a', 'x',
- 'X', 'd', 'profit'],
- index=index)
- check(target=df, indexers=((t, n), 'X'), value=2,
- compare_fn=self.assertEqual)
-
- # GH 7218, assinging with 0-dim arrays
- df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
- 'X', 'd', 'profit'],
- index=index)
- check(target=df,
- indexers=((t, n), 'X'),
- value=np.array(3),
- compare_fn=self.assertEqual,
- expected=3, )
-
- # GH5206
- df = pd.DataFrame(np.arange(25).reshape(5, 5),
- columns='A,B,C,D,E'.split(','), dtype=float)
- df['F'] = 99
- row_selection = df['A'] % 2 == 0
- col_selection = ['B', 'C']
- with catch_warnings(record=True):
- df.ix[row_selection, col_selection] = df['F']
- output = pd.DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
- with catch_warnings(record=True):
- tm.assert_frame_equal(df.ix[row_selection, col_selection],
- output)
- check(target=df,
- indexers=(row_selection, col_selection),
- value=df['F'],
- compare_fn=tm.assert_frame_equal,
- expected=output, )
-
- # GH11372
- idx = pd.MultiIndex.from_product([
- ['A', 'B', 'C'],
- pd.date_range('2015-01-01', '2015-04-01', freq='MS')])
- cols = pd.MultiIndex.from_product([
- ['foo', 'bar'],
- pd.date_range('2016-01-01', '2016-02-01', freq='MS')])
-
- df = pd.DataFrame(np.random.random((12, 4)),
- index=idx, columns=cols)
-
- subidx = pd.MultiIndex.from_tuples(
- [('A', pd.Timestamp('2015-01-01')),
- ('A', pd.Timestamp('2015-02-01'))])
- subcols = pd.MultiIndex.from_tuples(
- [('foo', pd.Timestamp('2016-01-01')),
- ('foo', pd.Timestamp('2016-02-01'))])
-
- vals = pd.DataFrame(np.random.random((2, 2)),
- index=subidx, columns=subcols)
- check(target=df,
- indexers=(subidx, subcols),
- value=vals,
- compare_fn=tm.assert_frame_equal, )
- # set all columns
- vals = pd.DataFrame(
- np.random.random((2, 4)), index=subidx, columns=cols)
- check(target=df,
- indexers=(subidx, slice(None, None, None)),
- value=vals,
- compare_fn=tm.assert_frame_equal, )
- # identity
- copy = df.copy()
- check(target=df, indexers=(df.index, df.columns), value=df,
- compare_fn=tm.assert_frame_equal, expected=copy)
+ with catch_warnings(record=True):
+
+ for index_fn in ('ix', 'loc'):
+
+ def check(target, indexers, value, compare_fn, expected=None):
+ fn = getattr(target, index_fn)
+ fn.__setitem__(indexers, value)
+ result = fn.__getitem__(indexers)
+ if expected is None:
+ expected = value
+ compare_fn(result, expected)
+ # GH7190
+ index = pd.MultiIndex.from_product([np.arange(0, 100),
+ np.arange(0, 80)],
+ names=['time', 'firm'])
+ t, n = 0, 2
+ df = DataFrame(np.nan, columns=['A', 'w', 'l', 'a', 'x',
+ 'X', 'd', 'profit'],
+ index=index)
+ check(target=df, indexers=((t, n), 'X'), value=0,
+ compare_fn=self.assertEqual)
+
+ df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
+ 'X', 'd', 'profit'],
+ index=index)
+ check(target=df, indexers=((t, n), 'X'), value=1,
+ compare_fn=self.assertEqual)
+
+ df = DataFrame(columns=['A', 'w', 'l', 'a', 'x',
+ 'X', 'd', 'profit'],
+ index=index)
+ check(target=df, indexers=((t, n), 'X'), value=2,
+ compare_fn=self.assertEqual)
+
+ # GH 7218, assinging with 0-dim arrays
+ df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
+ 'X', 'd', 'profit'],
+ index=index)
+ check(target=df,
+ indexers=((t, n), 'X'),
+ value=np.array(3),
+ compare_fn=self.assertEqual,
+ expected=3, )
+
+ # GH5206
+ df = pd.DataFrame(np.arange(25).reshape(5, 5),
+ columns='A,B,C,D,E'.split(','), dtype=float)
+ df['F'] = 99
+ row_selection = df['A'] % 2 == 0
+ col_selection = ['B', 'C']
+ with catch_warnings(record=True):
+ df.ix[row_selection, col_selection] = df['F']
+ output = pd.DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
+ with catch_warnings(record=True):
+ tm.assert_frame_equal(df.ix[row_selection, col_selection],
+ output)
+ check(target=df,
+ indexers=(row_selection, col_selection),
+ value=df['F'],
+ compare_fn=tm.assert_frame_equal,
+ expected=output, )
+
+ # GH11372
+ idx = pd.MultiIndex.from_product([
+ ['A', 'B', 'C'],
+ pd.date_range('2015-01-01', '2015-04-01', freq='MS')])
+ cols = pd.MultiIndex.from_product([
+ ['foo', 'bar'],
+ pd.date_range('2016-01-01', '2016-02-01', freq='MS')])
+
+ df = pd.DataFrame(np.random.random((12, 4)),
+ index=idx, columns=cols)
+
+ subidx = pd.MultiIndex.from_tuples(
+ [('A', pd.Timestamp('2015-01-01')),
+ ('A', pd.Timestamp('2015-02-01'))])
+ subcols = pd.MultiIndex.from_tuples(
+ [('foo', pd.Timestamp('2016-01-01')),
+ ('foo', pd.Timestamp('2016-02-01'))])
+
+ vals = pd.DataFrame(np.random.random((2, 2)),
+ index=subidx, columns=subcols)
+ check(target=df,
+ indexers=(subidx, subcols),
+ value=vals,
+ compare_fn=tm.assert_frame_equal, )
+ # set all columns
+ vals = pd.DataFrame(
+ np.random.random((2, 4)), index=subidx, columns=cols)
+ check(target=df,
+ indexers=(subidx, slice(None, None, None)),
+ value=vals,
+ compare_fn=tm.assert_frame_equal, )
+ # identity
+ copy = df.copy()
+ check(target=df, indexers=(df.index, df.columns), value=df,
+ compare_fn=tm.assert_frame_equal, expected=copy)
def test_loc_getitem_series(self):
# GH14730
@@ -559,32 +561,37 @@ def test_multiindex_assignment(self):
df['d'] = np.nan
arr = np.array([0., 1.])
- df.ix[4, 'd'] = arr
- tm.assert_series_equal(df.ix[4, 'd'],
- Series(arr, index=[8, 10], name='d'))
+ with catch_warnings(record=True):
+ df.ix[4, 'd'] = arr
+ tm.assert_series_equal(df.ix[4, 'd'],
+ Series(arr, index=[8, 10], name='d'))
# single dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
- df.ix[4, 'c'] = arr
- exp = Series(arr, index=[8, 10], name='c', dtype='float64')
- tm.assert_series_equal(df.ix[4, 'c'], exp)
+ with catch_warnings(record=True):
+ df.ix[4, 'c'] = arr
+ exp = Series(arr, index=[8, 10], name='c', dtype='float64')
+ tm.assert_series_equal(df.ix[4, 'c'], exp)
# scalar ok
- df.ix[4, 'c'] = 10
- exp = Series(10, index=[8, 10], name='c', dtype='float64')
- tm.assert_series_equal(df.ix[4, 'c'], exp)
+ with catch_warnings(record=True):
+ df.ix[4, 'c'] = 10
+ exp = Series(10, index=[8, 10], name='c', dtype='float64')
+ tm.assert_series_equal(df.ix[4, 'c'], exp)
# invalid assignments
def f():
- df.ix[4, 'c'] = [0, 1, 2, 3]
+ with catch_warnings(record=True):
+ df.ix[4, 'c'] = [0, 1, 2, 3]
self.assertRaises(ValueError, f)
def f():
- df.ix[4, 'c'] = [0]
+ with catch_warnings(record=True):
+ df.ix[4, 'c'] = [0]
self.assertRaises(ValueError, f)
@@ -614,7 +621,8 @@ def f(name, df2):
# but in this case, that's ok
for name, df2 in grp:
new_vals = np.arange(df2.shape[0])
- df.ix[name, 'new_col'] = new_vals
+ with catch_warnings(record=True):
+ df.ix[name, 'new_col'] = new_vals
def test_multiindex_label_slicing_with_negative_step(self):
s = Series(np.arange(20),
@@ -624,7 +632,8 @@ def test_multiindex_label_slicing_with_negative_step(self):
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
- tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
+ with catch_warnings(record=True):
+ tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
assert_slices_equivalent(SLC[::-1], SLC[::-1])
diff --git a/pandas/tests/indexing/test_panel.py b/pandas/tests/indexing/test_panel.py
index 5ec3076af599a..0677ea498c282 100644
--- a/pandas/tests/indexing/test_panel.py
+++ b/pandas/tests/indexing/test_panel.py
@@ -1,3 +1,5 @@
+from warnings import catch_warnings
+
import numpy as np
from pandas.util import testing as tm
from pandas import Panel, date_range, DataFrame
@@ -112,8 +114,8 @@ def test_panel_getitem(self):
len(ind), 5), index=ind, columns=list('ABCDE'))
panel = Panel(dict([('frame_' + c, df) for c in list('ABC')]))
- test2 = panel.ix[:, "2002":"2002-12-31"]
- test1 = panel.ix[:, "2002"]
+ test2 = panel.loc[:, "2002":"2002-12-31"]
+ test1 = panel.loc[:, "2002"]
tm.assert_panel_equal(test1, test2)
# GH8710
@@ -134,10 +136,8 @@ def test_panel_getitem(self):
result = panel.loc['ItemA':'ItemB']
tm.assert_panel_equal(result, expected)
- result = panel.ix['ItemA':'ItemB']
- tm.assert_panel_equal(result, expected)
-
- result = panel.ix[['ItemA', 'ItemB']]
+ with catch_warnings(record=True):
+ result = panel.ix[['ItemA', 'ItemB']]
tm.assert_panel_equal(result, expected)
# with an object-like
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
new file mode 100644
index 0000000000000..a00f880ff6591
--- /dev/null
+++ b/pandas/tests/indexing/test_partial.py
@@ -0,0 +1,587 @@
+"""
+test setting *parts* of objects both positionally and label based
+
+TOD: these should be split among the indexer tests
+"""
+from warnings import catch_warnings
+import numpy as np
+
+import pandas as pd
+from pandas import Series, DataFrame, Panel, Index, date_range
+from pandas.util import testing as tm
+
+
+class TestPartialSetting(tm.TestCase):
+
+ def test_partial_setting(self):
+
+ # GH2578, allow ix and friends to partially set
+
+ # series
+ s_orig = Series([1, 2, 3])
+
+ s = s_orig.copy()
+ s[5] = 5
+ expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
+ tm.assert_series_equal(s, expected)
+
+ s = s_orig.copy()
+ s.loc[5] = 5
+ expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
+ tm.assert_series_equal(s, expected)
+
+ s = s_orig.copy()
+ s[5] = 5.
+ expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
+ tm.assert_series_equal(s, expected)
+
+ s = s_orig.copy()
+ s.loc[5] = 5.
+ expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
+ tm.assert_series_equal(s, expected)
+
+ # iloc/iat raise
+ s = s_orig.copy()
+
+ def f():
+ s.iloc[3] = 5.
+
+ self.assertRaises(IndexError, f)
+
+ def f():
+ s.iat[3] = 5.
+
+ self.assertRaises(IndexError, f)
+
+ # ## frame ##
+
+ df_orig = DataFrame(
+ np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
+
+ # iloc/iat raise
+ df = df_orig.copy()
+
+ def f():
+ df.iloc[4, 2] = 5.
+
+ self.assertRaises(IndexError, f)
+
+ def f():
+ df.iat[4, 2] = 5.
+
+ self.assertRaises(IndexError, f)
+
+ # row setting where it exists
+ expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
+ df = df_orig.copy()
+ df.iloc[1] = df.iloc[2]
+ tm.assert_frame_equal(df, expected)
+
+ expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
+ df = df_orig.copy()
+ df.loc[1] = df.loc[2]
+ tm.assert_frame_equal(df, expected)
+
+ # like 2578, partial setting with dtype preservation
+ expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
+ df = df_orig.copy()
+ df.loc[3] = df.loc[2]
+ tm.assert_frame_equal(df, expected)
+
+ # single dtype frame, overwrite
+ expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
+ df = df_orig.copy()
+ with catch_warnings(record=True):
+ df.ix[:, 'B'] = df.ix[:, 'A']
+ tm.assert_frame_equal(df, expected)
+
+ # mixed dtype frame, overwrite
+ expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
+ df = df_orig.copy()
+ df['B'] = df['B'].astype(np.float64)
+ with catch_warnings(record=True):
+ df.ix[:, 'B'] = df.ix[:, 'A']
+ tm.assert_frame_equal(df, expected)
+
+ # single dtype frame, partial setting
+ expected = df_orig.copy()
+ expected['C'] = df['A']
+ df = df_orig.copy()
+ with catch_warnings(record=True):
+ df.ix[:, 'C'] = df.ix[:, 'A']
+ tm.assert_frame_equal(df, expected)
+
+ # mixed frame, partial setting
+ expected = df_orig.copy()
+ expected['C'] = df['A']
+ df = df_orig.copy()
+ with catch_warnings(record=True):
+ df.ix[:, 'C'] = df.ix[:, 'A']
+ tm.assert_frame_equal(df, expected)
+
+ # ## panel ##
+ p_orig = Panel(np.arange(16).reshape(2, 4, 2),
+ items=['Item1', 'Item2'],
+ major_axis=pd.date_range('2001/1/12', periods=4),
+ minor_axis=['A', 'B'], dtype='float64')
+
+ # panel setting via item
+ p_orig = Panel(np.arange(16).reshape(2, 4, 2),
+ items=['Item1', 'Item2'],
+ major_axis=pd.date_range('2001/1/12', periods=4),
+ minor_axis=['A', 'B'], dtype='float64')
+ expected = p_orig.copy()
+ expected['Item3'] = expected['Item1']
+ p = p_orig.copy()
+ p.loc['Item3'] = p['Item1']
+ tm.assert_panel_equal(p, expected)
+
+ # panel with aligned series
+ expected = p_orig.copy()
+ expected = expected.transpose(2, 1, 0)
+ expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
+ 'Item2': [32, 32, 32, 32]},
+ index=p_orig.major_axis)
+ expected = expected.transpose(2, 1, 0)
+ p = p_orig.copy()
+ p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
+ tm.assert_panel_equal(p, expected)
+
+ # GH 8473
+ dates = date_range('1/1/2000', periods=8)
+ df_orig = DataFrame(np.random.randn(8, 4), index=dates,
+ columns=['A', 'B', 'C', 'D'])
+
+ expected = pd.concat([df_orig, DataFrame(
+ {'A': 7}, index=[dates[-1] + 1])])
+ df = df_orig.copy()
+ df.loc[dates[-1] + 1, 'A'] = 7
+ tm.assert_frame_equal(df, expected)
+ df = df_orig.copy()
+ df.at[dates[-1] + 1, 'A'] = 7
+ tm.assert_frame_equal(df, expected)
+
+ exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
+ expected = pd.concat([df_orig, exp_other], axis=1)
+
+ df = df_orig.copy()
+ df.loc[dates[-1] + 1, 0] = 7
+ tm.assert_frame_equal(df, expected)
+ df = df_orig.copy()
+ df.at[dates[-1] + 1, 0] = 7
+ tm.assert_frame_equal(df, expected)
+
+ def test_partial_setting_mixed_dtype(self):
+
+ # in a mixed dtype environment, try to preserve dtypes
+ # by appending
+ df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
+
+ s = df.loc[1].copy()
+ s.name = 2
+ expected = df.append(s)
+
+ df.loc[2] = df.loc[1]
+ tm.assert_frame_equal(df, expected)
+
+ # columns will align
+ df = DataFrame(columns=['A', 'B'])
+ df.loc[0] = Series(1, index=range(4))
+ tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
+
+ # columns will align
+ df = DataFrame(columns=['A', 'B'])
+ df.loc[0] = Series(1, index=['B'])
+
+ exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
+ index=[0], dtype='float64')
+ tm.assert_frame_equal(df, exp)
+
+ # list-like must conform
+ df = DataFrame(columns=['A', 'B'])
+
+ def f():
+ df.loc[0] = [1, 2, 3]
+
+ self.assertRaises(ValueError, f)
+
+ # these are coerced to float unavoidably (as its a list-like to begin)
+ df = DataFrame(columns=['A', 'B'])
+ df.loc[3] = [6, 7]
+
+ exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
+ dtype='float64')
+ tm.assert_frame_equal(df, exp)
+
+ def test_series_partial_set(self):
+ # partial set with new index
+ # Regression from GH4825
+ ser = Series([0.1, 0.2], index=[1, 2])
+
+ # loc
+ expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
+ result = ser.loc[[3, 2, 3]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
+ result = ser.loc[[3, 2, 3, 'x']]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
+ result = ser.loc[[2, 2, 1]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
+ result = ser.loc[[2, 2, 'x', 1]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ # raises as nothing in in the index
+ self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
+
+ expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
+ result = ser.loc[[2, 2, 3]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
+ result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
+ result = Series([0.1, 0.2, 0.3, 0.4],
+ index=[1, 2, 3, 4]).loc[[5, 3, 3]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
+ result = Series([0.1, 0.2, 0.3, 0.4],
+ index=[1, 2, 3, 4]).loc[[5, 4, 4]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
+ result = Series([0.1, 0.2, 0.3, 0.4],
+ index=[4, 5, 6, 7]).loc[[7, 2, 2]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
+ result = Series([0.1, 0.2, 0.3, 0.4],
+ index=[1, 2, 3, 4]).loc[[4, 5, 5]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ # iloc
+ expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
+ result = ser.iloc[[1, 1, 0, 0]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ def test_series_partial_set_with_name(self):
+ # GH 11497
+
+ idx = Index([1, 2], dtype='int64', name='idx')
+ ser = Series([0.1, 0.2], index=idx, name='s')
+
+ # loc
+ exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
+ expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
+ result = ser.loc[[3, 2, 3]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
+ expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
+ name='s')
+ result = ser.loc[[3, 2, 3, 'x']]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
+ expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
+ result = ser.loc[[2, 2, 1]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
+ expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
+ result = ser.loc[[2, 2, 'x', 1]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ # raises as nothing in in the index
+ self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
+
+ exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
+ expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
+ result = ser.loc[[2, 2, 3]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
+ expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
+ idx = Index([1, 2, 3], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
+ expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
+ idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
+ name='s').loc[[5, 3, 3]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
+ expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
+ idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
+ name='s').loc[[5, 4, 4]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
+ expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
+ idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
+ name='s').loc[[7, 2, 2]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
+ expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
+ idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
+ name='s').loc[[4, 5, 5]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ # iloc
+ exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
+ expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
+ result = ser.iloc[[1, 1, 0, 0]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ def test_partial_set_invalid(self):
+
+ # GH 4940
+ # allow only setting of 'valid' values
+
+ orig = tm.makeTimeDataFrame()
+ df = orig.copy()
+
+ # don't allow not string inserts
+ def f():
+ with catch_warnings(record=True):
+ df.loc[100.0, :] = df.ix[0]
+
+ self.assertRaises(TypeError, f)
+
+ def f():
+ with catch_warnings(record=True):
+ df.loc[100, :] = df.ix[0]
+
+ self.assertRaises(TypeError, f)
+
+ def f():
+ with catch_warnings(record=True):
+ df.ix[100.0, :] = df.ix[0]
+
+ self.assertRaises(TypeError, f)
+
+ def f():
+ with catch_warnings(record=True):
+ df.ix[100, :] = df.ix[0]
+
+ self.assertRaises(ValueError, f)
+
+ # allow object conversion here
+ df = orig.copy()
+ with catch_warnings(record=True):
+ df.loc['a', :] = df.ix[0]
+ exp = orig.append(pd.Series(df.ix[0], name='a'))
+ tm.assert_frame_equal(df, exp)
+ tm.assert_index_equal(df.index,
+ pd.Index(orig.index.tolist() + ['a']))
+ self.assertEqual(df.index.dtype, 'object')
+
+ def test_partial_set_empty_series(self):
+
+ # GH5226
+
+ # partially set with an empty object series
+ s = Series()
+ s.loc[1] = 1
+ tm.assert_series_equal(s, Series([1], index=[1]))
+ s.loc[3] = 3
+ tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
+
+ s = Series()
+ s.loc[1] = 1.
+ tm.assert_series_equal(s, Series([1.], index=[1]))
+ s.loc[3] = 3.
+ tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
+
+ s = Series()
+ s.loc['foo'] = 1
+ tm.assert_series_equal(s, Series([1], index=['foo']))
+ s.loc['bar'] = 3
+ tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
+ s.loc[3] = 4
+ tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
+
+ def test_partial_set_empty_frame(self):
+
+ # partially set with an empty object
+ # frame
+ df = DataFrame()
+
+ def f():
+ df.loc[1] = 1
+
+ self.assertRaises(ValueError, f)
+
+ def f():
+ df.loc[1] = Series([1], index=['foo'])
+
+ self.assertRaises(ValueError, f)
+
+ def f():
+ df.loc[:, 1] = 1
+
+ self.assertRaises(ValueError, f)
+
+ # these work as they don't really change
+ # anything but the index
+ # GH5632
+ expected = DataFrame(columns=['foo'], index=pd.Index(
+ [], dtype='int64'))
+
+ def f():
+ df = DataFrame()
+ df['foo'] = Series([], dtype='object')
+ return df
+
+ tm.assert_frame_equal(f(), expected)
+
+ def f():
+ df = DataFrame()
+ df['foo'] = Series(df.index)
+ return df
+
+ tm.assert_frame_equal(f(), expected)
+
+ def f():
+ df = DataFrame()
+ df['foo'] = df.index
+ return df
+
+ tm.assert_frame_equal(f(), expected)
+
+ expected = DataFrame(columns=['foo'],
+ index=pd.Index([], dtype='int64'))
+ expected['foo'] = expected['foo'].astype('float64')
+
+ def f():
+ df = DataFrame()
+ df['foo'] = []
+ return df
+
+ tm.assert_frame_equal(f(), expected)
+
+ def f():
+ df = DataFrame()
+ df['foo'] = Series(range(len(df)))
+ return df
+
+ tm.assert_frame_equal(f(), expected)
+
+ def f():
+ df = DataFrame()
+ tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
+ df['foo'] = range(len(df))
+ return df
+
+ expected = DataFrame(columns=['foo'],
+ index=pd.Index([], dtype='int64'))
+ expected['foo'] = expected['foo'].astype('float64')
+ tm.assert_frame_equal(f(), expected)
+
+ df = DataFrame()
+ tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
+ df2 = DataFrame()
+ df2[1] = Series([1], index=['foo'])
+ df.loc[:, 1] = Series([1], index=['foo'])
+ tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
+ tm.assert_frame_equal(df, df2)
+
+ # no index to start
+ expected = DataFrame({0: Series(1, index=range(4))},
+ columns=['A', 'B', 0])
+
+ df = DataFrame(columns=['A', 'B'])
+ df[0] = Series(1, index=range(4))
+ df.dtypes
+ str(df)
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame(columns=['A', 'B'])
+ df.loc[:, 0] = Series(1, index=range(4))
+ df.dtypes
+ str(df)
+ tm.assert_frame_equal(df, expected)
+
+ def test_partial_set_empty_frame_row(self):
+ # GH5720, GH5744
+ # don't create rows when empty
+ expected = DataFrame(columns=['A', 'B', 'New'],
+ index=pd.Index([], dtype='int64'))
+ expected['A'] = expected['A'].astype('int64')
+ expected['B'] = expected['B'].astype('float64')
+ expected['New'] = expected['New'].astype('float64')
+
+ df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
+ y = df[df.A > 5]
+ y['New'] = np.nan
+ tm.assert_frame_equal(y, expected)
+ # tm.assert_frame_equal(y,expected)
+
+ expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
+ expected['d'] = expected['d'].astype('int64')
+ df = DataFrame(columns=['a', 'b', 'c c'])
+ df['d'] = 3
+ tm.assert_frame_equal(df, expected)
+ tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
+
+ # reindex columns is ok
+ df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
+ y = df[df.A > 5]
+ result = y.reindex(columns=['A', 'B', 'C'])
+ expected = DataFrame(columns=['A', 'B', 'C'],
+ index=pd.Index([], dtype='int64'))
+ expected['A'] = expected['A'].astype('int64')
+ expected['B'] = expected['B'].astype('float64')
+ expected['C'] = expected['C'].astype('float64')
+ tm.assert_frame_equal(result, expected)
+
+ def test_partial_set_empty_frame_set_series(self):
+ # GH 5756
+ # setting with empty Series
+ df = DataFrame(Series())
+ tm.assert_frame_equal(df, DataFrame({0: Series()}))
+
+ df = DataFrame(Series(name='foo'))
+ tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
+
+ def test_partial_set_empty_frame_empty_copy_assignment(self):
+ # GH 5932
+ # copy on empty with assignment fails
+ df = DataFrame(index=[0])
+ df = df.copy()
+ df['a'] = 0
+ expected = DataFrame(0, index=[0], columns=['a'])
+ tm.assert_frame_equal(df, expected)
+
+ def test_partial_set_empty_frame_empty_consistencies(self):
+ # GH 6171
+ # consistency on empty frames
+ df = DataFrame(columns=['x', 'y'])
+ df['x'] = [1, 2]
+ expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
+ tm.assert_frame_equal(df, expected, check_dtype=False)
+
+ df = DataFrame(columns=['x', 'y'])
+ df['x'] = ['1', '2']
+ expected = DataFrame(
+ dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame(columns=['x', 'y'])
+ df.loc[0, 'x'] = 1
+ expected = DataFrame(dict(x=[1], y=[np.nan]))
+ tm.assert_frame_equal(df, expected, check_dtype=False)
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
new file mode 100644
index 0000000000000..4e81cd01cd5d2
--- /dev/null
+++ b/pandas/tests/indexing/test_scalar.py
@@ -0,0 +1,156 @@
+""" test scalar indexing, including at and iat """
+
+import numpy as np
+
+from pandas import (Series, DataFrame, Timestamp,
+ Timedelta, date_range)
+from pandas.util import testing as tm
+from pandas.tests.indexing.common import Base
+
+
+class TestScalar(Base, tm.TestCase):
+
+ def test_at_and_iat_get(self):
+ def _check(f, func, values=False):
+
+ if f is not None:
+ indicies = self.generate_indices(f, values)
+ for i in indicies:
+ result = getattr(f, func)[i]
+ expected = self.get_value(f, i, values)
+ tm.assert_almost_equal(result, expected)
+
+ for o in self._objs:
+
+ d = getattr(self, o)
+
+ # iat
+ for f in [d['ints'], d['uints']]:
+ _check(f, 'iat', values=True)
+
+ for f in [d['labels'], d['ts'], d['floats']]:
+ if f is not None:
+ self.assertRaises(ValueError, self.check_values, f, 'iat')
+
+ # at
+ for f in [d['ints'], d['uints'], d['labels'],
+ d['ts'], d['floats']]:
+ _check(f, 'at')
+
+ def test_at_and_iat_set(self):
+ def _check(f, func, values=False):
+
+ if f is not None:
+ indicies = self.generate_indices(f, values)
+ for i in indicies:
+ getattr(f, func)[i] = 1
+ expected = self.get_value(f, i, values)
+ tm.assert_almost_equal(expected, 1)
+
+ for t in self._objs:
+
+ d = getattr(self, t)
+
+ # iat
+ for f in [d['ints'], d['uints']]:
+ _check(f, 'iat', values=True)
+
+ for f in [d['labels'], d['ts'], d['floats']]:
+ if f is not None:
+ self.assertRaises(ValueError, _check, f, 'iat')
+
+ # at
+ for f in [d['ints'], d['uints'], d['labels'],
+ d['ts'], d['floats']]:
+ _check(f, 'at')
+
+ def test_at_iat_coercion(self):
+
+ # as timestamp is not a tuple!
+ dates = date_range('1/1/2000', periods=8)
+ df = DataFrame(np.random.randn(8, 4),
+ index=dates,
+ columns=['A', 'B', 'C', 'D'])
+ s = df['A']
+
+ result = s.at[dates[5]]
+ xp = s.values[5]
+ self.assertEqual(result, xp)
+
+ # GH 7729
+ # make sure we are boxing the returns
+ s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
+ expected = Timestamp('2014-02-02')
+
+ for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
+ result = r()
+ self.assertEqual(result, expected)
+
+ s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
+ expected = Timedelta('2 days')
+
+ for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
+ result = r()
+ self.assertEqual(result, expected)
+
+ def test_iat_invalid_args(self):
+ pass
+
+ def test_imethods_with_dups(self):
+
+ # GH6493
+ # iat/iloc with dups
+
+ s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
+ result = s.iloc[2]
+ self.assertEqual(result, 2)
+ result = s.iat[2]
+ self.assertEqual(result, 2)
+
+ self.assertRaises(IndexError, lambda: s.iat[10])
+ self.assertRaises(IndexError, lambda: s.iat[-10])
+
+ result = s.iloc[[2, 3]]
+ expected = Series([2, 3], [2, 2], dtype='int64')
+ tm.assert_series_equal(result, expected)
+
+ df = s.to_frame()
+ result = df.iloc[2]
+ expected = Series(2, index=[0], name=2)
+ tm.assert_series_equal(result, expected)
+
+ result = df.iat[2, 0]
+ expected = 2
+ self.assertEqual(result, 2)
+
+ def test_at_to_fail(self):
+ # at should not fallback
+ # GH 7814
+ s = Series([1, 2, 3], index=list('abc'))
+ result = s.at['a']
+ self.assertEqual(result, 1)
+ self.assertRaises(ValueError, lambda: s.at[0])
+
+ df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
+ result = df.at['a', 'A']
+ self.assertEqual(result, 1)
+ self.assertRaises(ValueError, lambda: df.at['a', 0])
+
+ s = Series([1, 2, 3], index=[3, 2, 1])
+ result = s.at[1]
+ self.assertEqual(result, 3)
+ self.assertRaises(ValueError, lambda: s.at['a'])
+
+ df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
+ result = df.at[1, 0]
+ self.assertEqual(result, 3)
+ self.assertRaises(ValueError, lambda: df.at['a', 0])
+
+ # GH 13822, incorrect error string with non-unique columns when missing
+ # column is accessed
+ df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
+ df.columns = ['x', 'x', 'z']
+
+ # Check that we get the correct value in the KeyError
+ self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
+ lambda: df[['x', 'y', 'z']])
| I think we had a filter somewhere that simply turned these off (in another test module) | https://api.github.com/repos/pandas-dev/pandas/pulls/15638 | 2017-03-10T00:02:22Z | 2017-03-10T11:20:43Z | null | 2017-03-10T11:21:18Z |
DOC: remove latex and parallel building | diff --git a/ci/build_docs.sh b/ci/build_docs.sh
index 5dc649a91c4f7..bfe7a1eed756b 100755
--- a/ci/build_docs.sh
+++ b/ci/build_docs.sh
@@ -23,9 +23,6 @@ if [ x"$DOC_BUILD" != x"" ]; then
source activate pandas
- # install sudo deps
- time sudo apt-get $APT_ARGS install dvipng texlive-latex-base texlive-latex-extra
-
mv "$TRAVIS_BUILD_DIR"/doc /tmp
cd /tmp/doc
diff --git a/doc/make.py b/doc/make.py
index a2f5be5594e44..30cd2ad8b61c9 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -197,7 +197,7 @@ def html():
print(e)
print("Failed to convert %s" % nb)
- if os.system('sphinx-build -j 2 -P -b html -d build/doctrees '
+ if os.system('sphinx-build -P -b html -d build/doctrees '
'source build/html'):
raise SystemExit("Building HTML failed.")
try:
diff --git a/doc/source/io.rst b/doc/source/io.rst
index fdd33ab4625f3..a702efdc6aaf9 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2070,9 +2070,9 @@ by the Table Schema spec.
The full list of types supported are described in the Table Schema
spec. This table shows the mapping from pandas types:
-============== =================
+=============== =================
Pandas type Table Schema type
-============== =================
+=============== =================
int64 integer
float64 number
bool boolean
@@ -3096,6 +3096,7 @@ The default is to 'infer
.. ipython:: python
:suppress:
+
import os
os.remove("data.pkl.compress")
os.remove("data.pkl.xz")
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 8f671062464f0..cf3dddc3a2933 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -140,6 +140,7 @@ The default is to 'infer
.. ipython:: python
:suppress:
+
import os
os.remove("data.pkl.compress")
os.remove("data.pkl.xz")
| https://api.github.com/repos/pandas-dev/pandas/pulls/15637 | 2017-03-09T21:03:45Z | 2017-03-09T23:15:03Z | 2017-03-09T23:15:03Z | 2017-03-10T02:51:36Z | |
DOC: add example for DataFrame.resample: keywords on and level | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ff58a2aa77447..e89f56ae04226 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4460,6 +4460,30 @@ def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
+ For DataFrame objects, the keyword ``on`` can be used to specify the
+ column instead of the index for resampling.
+
+ >>> df = pd.DataFrame(data=9*[range(4)], columns=['a', 'b', 'c', 'd'])
+ >>> df['time'] = pd.date_range('1/1/2000', periods=9, freq='T')
+ >>> df.resample('3T', on='time').sum()
+ a b c d
+ time
+ 2000-01-01 00:00:00 0 3 6 9
+ 2000-01-01 00:03:00 0 3 6 9
+ 2000-01-01 00:06:00 0 3 6 9
+
+ For a DataFrame with MultiIndex, the keyword ``level`` can be used to
+ specify on level the resampling needs to take place.
+
+ >>> time = pd.date_range('1/1/2000', periods=5, freq='T')
+ >>> df2 = pd.DataFrame(data=10*[range(4)],
+ columns=['a', 'b', 'c', 'd'],
+ index=pd.MultiIndex.from_product([time, [1, 2]])
+ )
+ >>> df2.resample('3T', level=0).sum()
+ a b c d
+ 2000-01-01 00:00:00 0 6 12 18
+ 2000-01-01 00:03:00 0 4 8 12
"""
from pandas.tseries.resample import (resample,
_maybe_process_deprecations)
| - [ ] closes #xxxx
add examples for DataFrame specific keywords: on and level.
This can possibly fix issue #14843
See attached picture for details

| https://api.github.com/repos/pandas-dev/pandas/pulls/15627 | 2017-03-08T20:36:11Z | 2017-03-09T07:57:27Z | 2017-03-09T07:57:27Z | 2017-03-09T08:07:51Z |
DOC: make it possible to run doctests | diff --git a/pandas/conftest.py b/pandas/conftest.py
index b3683de3a173b..86420ad00109a 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1,5 +1,8 @@
import pytest
+import numpy
+import pandas
+
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true",
@@ -19,3 +22,11 @@ def pytest_runtest_setup(item):
if 'skip' in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
+
+
+# For running doctests: make np and pd names available
+
+@pytest.fixture(autouse=True)
+def add_imports(doctest_namespace):
+ doctest_namespace['np'] = numpy
+ doctest_namespace['pd'] = pandas
| This adds a small entry in the conftest.py file, which makes it possible to run doctests with eg
```
pytest --doctests-module pandas/core/series.py
```
We are far from being able to run them all without error, and if we would want that is even another question (it may be too much small details to pay attention to), but this let's you at least run specific doctests. | https://api.github.com/repos/pandas-dev/pandas/pulls/15626 | 2017-03-08T20:16:44Z | 2017-03-09T07:51:10Z | 2017-03-09T07:51:10Z | 2018-02-12T22:29:08Z |
DOC: resolved mistakes in examples series | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 84a48c9be8fd9..606906bfcd7c4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -668,6 +668,7 @@ def swaplevel(self, i=-2, j=-1, axis=0):
dtype: int64
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
+ Traceback (most recent call last):
...
TypeError: 'int' object is not callable
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
@@ -1115,7 +1116,7 @@ def __setstate__(self, state):
to the existing workbook. This can be used to save different
DataFrames to one workbook:
- >>> writer = ExcelWriter('output.xlsx')
+ >>> writer = pd.ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
@@ -2260,7 +2261,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
- http_status response_time
+ http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
@@ -2275,11 +2276,11 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
- Safari 404 0.07
+ Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
- IE10 404 0.08
- Chrome 200 0.02
+ IE10 404.0 0.08
+ Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f23e90effdabf..cfa25ca1299eb 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -369,10 +369,10 @@ def values(self):
Timezone aware datetime data is converted to UTC:
>>> pd.Series(pd.date_range('20130101', periods=3,
- tz='US/Eastern')).values
- array(['2013-01-01T00:00:00.000000000-0500',
- '2013-01-02T00:00:00.000000000-0500',
- '2013-01-03T00:00:00.000000000-0500'], dtype='datetime64[ns]')
+ ... tz='US/Eastern')).values
+ array(['2013-01-01T05:00:00.000000000',
+ '2013-01-02T05:00:00.000000000',
+ '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
"""
return self._data.external_values()
@@ -1550,6 +1550,8 @@ def append(self, to_append, ignore_index=False, verify_integrity=False):
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
+ Traceback (most recent call last):
+ ...
ValueError: Indexes have overlapping values: [0, 1, 2]
@@ -1919,8 +1921,19 @@ def nlargest(self, n=5, keep='first'):
--------
>>> import pandas as pd
>>> import numpy as np
- >>> s = pd.Series(np.random.randn(1e6))
+ >>> s = pd.Series(np.random.randn(10**6))
>>> s.nlargest(10) # only sorts up to the N requested
+ 219921 4.644710
+ 82124 4.608745
+ 421689 4.564644
+ 425277 4.447014
+ 718691 4.414137
+ 43154 4.403520
+ 283187 4.313922
+ 595519 4.273635
+ 503969 4.250236
+ 121637 4.240952
+ dtype: float64
"""
return algorithms.select_n_series(self, n=n, keep=keep,
method='nlargest')
@@ -1958,8 +1971,19 @@ def nsmallest(self, n=5, keep='first'):
--------
>>> import pandas as pd
>>> import numpy as np
- >>> s = pd.Series(np.random.randn(1e6))
+ >>> s = pd.Series(np.random.randn(10**6))
>>> s.nsmallest(10) # only sorts up to the N requested
+ 288532 -4.954580
+ 732345 -4.835960
+ 64803 -4.812550
+ 446457 -4.609998
+ 501225 -4.483945
+ 669476 -4.472935
+ 973615 -4.401699
+ 621279 -4.355126
+ 773916 -4.347355
+ 359919 -4.331927
+ dtype: float64
"""
return algorithms.select_n_series(self, n=n, keep=keep,
method='nsmallest')
@@ -2052,21 +2076,24 @@ def unstack(self, level=-1, fill_value=None):
Examples
--------
+ >>> s = pd.Series([1, 2, 3, 4],
+ ... index=pd.MultiIndex.from_product([['one', 'two'], ['a', 'b']]))
>>> s
- one a 1.
- one b 2.
- two a 3.
- two b 4.
+ one a 1
+ b 2
+ two a 3
+ b 4
+ dtype: int64
>>> s.unstack(level=-1)
- a b
- one 1. 2.
- two 3. 4.
+ a b
+ one 1 2
+ two 3 4
>>> s.unstack(level=0)
one two
- a 1. 2.
- b 3. 4.
+ a 1 3
+ b 2 4
Returns
-------
@@ -2102,15 +2129,16 @@ def map(self, arg, na_action=None):
>>> x = pd.Series([1,2,3], index=['one', 'two', 'three'])
>>> x
- one 1
- two 2
- three 3
+ one 1
+ two 2
+ three 3
+ dtype: int64
>>> y = pd.Series(['foo', 'bar', 'baz'], index=[1,2,3])
>>> y
- 1 foo
- 2 bar
- 3 baz
+ 1 foo
+ 2 bar
+ 3 baz
>>> x.map(y)
one foo
@@ -2215,6 +2243,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds):
>>> import numpy as np
>>> series = pd.Series([20, 21, 12], index=['London',
... 'New York','Helsinki'])
+ >>> series
London 20
New York 21
Helsinki 12
| Adjusted some docstring examples.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15625 | 2017-03-08T19:51:06Z | 2017-03-09T08:21:31Z | 2017-03-09T08:21:31Z | 2017-03-09T08:21:44Z |
DOC: adapt example of groupby filter and transform | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index a10be078a8f96..6428a4a08a1ed 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2871,7 +2871,22 @@ def transform(self, func, *args, **kwargs):
Examples
--------
+ >>> import pandas as pd
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
+ ... 'foo', 'bar'],
+ ... 'B' : ['one', 'one', 'two', 'three',
+ ... 'two', 'two'],
+ ... 'C' : [1, 5, 5, 2, 5, 5],
+ ... 'D' : [2.0, 5., 8., 1., 2., 9.]})
+ >>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
+ C D
+ 0 -1.154701 -0.577350
+ 1 0.577350 0.000000
+ 2 0.577350 1.154701
+ 3 -1.154701 -1.000000
+ 4 0.577350 -0.577350
+ 5 0.577350 1.000000
Returns
-------
@@ -2948,7 +2963,17 @@ def filter(self, func, dropna=True, *args, **kwargs): # noqa
Examples
--------
- >>> grouped.filter(lambda x: x.mean() > 0)
+ >>> import pandas as pd
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
+ ... 'foo', 'bar'],
+ ... 'B' : [1, 2, 3, 4, 5, 6],
+ ... 'C' : [2.0, 5., 8., 1., 2., 9.]})
+ >>> grouped = df.groupby('A')
+ >>> grouped.filter(lambda x: x['B'].mean() > 3.)
+ A B C
+ 1 bar 2 5.0
+ 3 bar 4 1.0
+ 5 bar 6 9.0
Returns
-------
| In this pull request the `filter` and `transform` examples are converted to a running doctest
| https://api.github.com/repos/pandas-dev/pandas/pulls/15624 | 2017-03-08T19:42:04Z | 2017-03-28T18:18:13Z | null | 2020-01-13T11:22:21Z |
DOC: add documentation to IndexSlice | diff --git a/doc/source/api.rst b/doc/source/api.rst
index fbce64df84859..f126e478f424d 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1405,6 +1405,7 @@ MultiIndex
:toctree: generated/
MultiIndex
+ IndexSlice
MultiIndex Components
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 6f490875742ca..546cbd8337e7e 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -43,6 +43,36 @@ def get_indexers_list():
# the public IndexSlicerMaker
class _IndexSlice(object):
+ """
+ Create an object to more easily perform multi-index slicing
+
+ Examples
+ --------
+
+ >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])
+ >>> columns = ['foo', 'bar']
+ >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
+ index=midx, columns=columns)
+
+ Using the default slice command:
+
+ >>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]
+ foo bar
+ A0 B0 0 1
+ B1 2 3
+ A1 B0 8 9
+ B1 10 11
+
+ Using the IndexSlice class for a more intuitive command:
+
+ >>> idx = pd.IndexSlice
+ >>> dfmi.loc[idx[:, 'B0':'B1'], :]
+ foo bar
+ A0 B0 0 1
+ B1 2 3
+ A1 B0 8 9
+ B1 10 11
+ """
def __getitem__(self, arg):
return arg
| - [x] closes #12508
| https://api.github.com/repos/pandas-dev/pandas/pulls/15623 | 2017-03-08T19:23:47Z | 2017-03-09T12:05:47Z | 2017-03-09T12:05:47Z | 2017-03-09T12:05:48Z |
DOC: add examples to DataFrame.dropna | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4e7a5ebdf6f67..d47cb05285370 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3079,6 +3079,50 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
Returns
-------
dropped : DataFrame
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1],
+ ... [np.nan, np.nan, np.nan, 5]],
+ ... columns=list('ABCD'))
+ >>> df
+ A B C D
+ 0 NaN 2.0 NaN 0
+ 1 3.0 4.0 NaN 1
+ 2 NaN NaN NaN 5
+
+ Drop the columns where all elements are nan:
+
+ >>> df.dropna(axis=1, how='all')
+ A B D
+ 0 NaN 2.0 0
+ 1 3.0 4.0 1
+ 2 NaN NaN 5
+
+ Drop the columns where any of the elements is nan
+
+ >>> df.dropna(axis=1, how='any')
+ D
+ 0 0
+ 1 1
+ 2 5
+
+ Drop the rows where all of the elements are nan
+ (there is no row to drop, so df stays the same):
+
+ >>> df.dropna(axis=0, how='all')
+ A B C D
+ 0 NaN 2.0 NaN 0
+ 1 3.0 4.0 NaN 1
+ 2 NaN NaN NaN 5
+
+ Keep only the rows with at least 2 non-na values:
+
+ >>> df.dropna(thresh=2)
+ A B C D
+ 0 NaN 2.0 NaN 0
+ 1 3.0 4.0 NaN 1
+
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
| - [ ] closes #xxxx
Add examples to DataFrame.dropna.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15620 | 2017-03-08T18:47:01Z | 2017-03-10T13:44:27Z | 2017-03-10T13:44:27Z | 2017-03-10T13:44:27Z |
DOC: fix link to offset strings in resample method | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ff58a2aa77447..c45cf57152599 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4360,6 +4360,8 @@ def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
.. versionadded:: 0.19.0
+ Notes
+ -----
To learn more about the offset strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
| - [ ] closes #xxxx
The link to offset strings was not rendered correctly, I've made a sections ```Notes```, and placed the link there.
Part of issue #14843 | https://api.github.com/repos/pandas-dev/pandas/pulls/15619 | 2017-03-08T18:20:06Z | 2017-03-08T18:46:50Z | 2017-03-08T18:46:50Z | 2017-03-08T19:12:27Z |
DOC: use mathjax on sphinx - #15469 Exponentially Weighed Windows pages now shows formulas | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 1e82dfca87d17..6840f76866d2c 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -46,7 +46,7 @@
'ipython_sphinxext.ipython_console_highlighting',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
- 'sphinx.ext.pngmath',
+ 'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode',
]
| closes #15469
| https://api.github.com/repos/pandas-dev/pandas/pulls/15618 | 2017-03-08T18:08:55Z | 2017-03-09T14:26:57Z | 2017-03-09T14:26:57Z | 2017-03-09T19:02:30Z |
handle nan values in DataFrame.update when overwrite=False #15593 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4e7a5ebdf6f67..c4abbaecb00e9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3846,13 +3846,13 @@ def update(self, other, join='left', overwrite=True, filter_func=None,
if overwrite:
mask = isnull(that)
-
- # don't overwrite columns unecessarily
- if mask.all():
- continue
else:
mask = notnull(this)
+ # don't overwrite columns unecessarily
+ if mask.all():
+ continue
+
self[col] = expressions.where(mask, this, that,
raise_on_error=True)
| closes #15593 | https://api.github.com/repos/pandas-dev/pandas/pulls/15617 | 2017-03-08T17:55:43Z | 2017-04-18T22:38:26Z | null | 2017-05-22T21:55:32Z |
BUG: Error when specifying int index containing NaN | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 8f671062464f0..a0f65c27ba993 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -765,6 +765,7 @@ Bug Fixes
- Bug in ``DataFrame.isin`` comparing datetimelike to empty frame (:issue:`15473`)
- Bug in ``Series.where()`` and ``DataFrame.where()`` where array-like conditionals were being rejected (:issue:`15414`)
+- Bug in ``Index`` construction with ``NaN`` elements and integer dtype specified (:issue:`15187`)
- Bug in ``Series`` construction with a datetimetz (:issue:`14928`)
- Bug in output formatting of a ``MultiIndex`` when names are integers (:issue:`12223`, :issue:`15262`)
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 607a463083fdd..7f46f437489a1 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -203,6 +203,9 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
if inferred == 'integer':
data = np.array(data, copy=copy, dtype=dtype)
elif inferred in ['floating', 'mixed-integer-float']:
+ if isnull(data).any():
+ raise ValueError('cannot convert float '
+ 'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
@@ -230,8 +233,10 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
else:
data = np.array(data, dtype=dtype, copy=copy)
- except (TypeError, ValueError):
- pass
+ except (TypeError, ValueError) as e:
+ msg = str(e)
+ if 'cannot convert float' in msg:
+ raise
# maybe coerce to a sub-class
from pandas.tseries.period import (PeriodIndex,
@@ -585,7 +590,14 @@ def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
- return self._shallow_copy_with_infer(values, dtype=self.dtype)
+
+ dtype = self.dtype
+ if self._is_numeric_dtype and np.any(isnull(values)):
+ # We can't coerce to the numeric dtype of "self" (unless
+ # it's float) if there are NaN values in our output.
+ dtype = None
+
+ return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
@@ -689,7 +701,14 @@ def _coerce_scalar_to_index(self, item):
----------
item : scalar item to coerce
"""
- return Index([item], dtype=self.dtype, **self._get_attributes_dict())
+ dtype = self.dtype
+
+ if self._is_numeric_dtype and isnull(item):
+ # We can't coerce to the numeric dtype of "self" (unless
+ # it's float) if there are NaN values in our output.
+ dtype = None
+
+ return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 8c0a399cb58b3..05d3478ab0705 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -199,6 +199,23 @@ def __array__(self, dtype=None):
result = pd.Index(ArrayLike(array))
self.assert_index_equal(result, expected)
+ def test_constructor_int_dtype_nan(self):
+ # see gh-15187
+ data = [np.nan]
+ msg = "cannot convert"
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ Index(data, dtype='int64')
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ Index(data, dtype='uint64')
+
+ # This, however, should not break
+ # because NaN is float.
+ expected = Float64Index(data)
+ result = Index(data, dtype='float')
+ tm.assert_index_equal(result, expected)
+
def test_index_ctor_infer_nan_nat(self):
# GH 13467
exp = pd.Float64Index([np.nan, np.nan])
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index e23e7c19ed799..d0ce34169f79e 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -5,7 +5,7 @@
import numpy as np
-from pandas import (date_range, Series, Index, Float64Index,
+from pandas import (date_range, notnull, Series, Index, Float64Index,
Int64Index, UInt64Index, RangeIndex)
import pandas.util.testing as tm
@@ -686,6 +686,31 @@ def test_coerce_list(self):
arr = Index([1, 2, 3, 4], dtype=object)
tm.assertIsInstance(arr, Index)
+ def test_where(self):
+ i = self.create_index()
+ result = i.where(notnull(i))
+ expected = i
+ tm.assert_index_equal(result, expected)
+
+ _nan = i._na_value
+ cond = [False] + [True] * len(i[1:])
+ expected = pd.Index([_nan] + i[1:].tolist())
+
+ result = i.where(cond)
+ tm.assert_index_equal(result, expected)
+
+ def test_where_array_like(self):
+ i = self.create_index()
+
+ _nan = i._na_value
+ cond = [False] + [True] * (len(i) - 1)
+ klasses = [list, tuple, np.array, pd.Series]
+ expected = pd.Index([_nan] + i[1:].tolist())
+
+ for klass in klasses:
+ result = i.where(klass(cond))
+ tm.assert_index_equal(result, expected)
+
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 38e715fce2720..53c88897d6764 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -8,7 +8,8 @@
import numpy as np
-from pandas import (Series, Index, Float64Index, Int64Index, RangeIndex)
+from pandas import (notnull, Series, Index, Float64Index,
+ Int64Index, RangeIndex)
from pandas.util.testing import assertRaisesRegexp
import pandas.util.testing as tm
@@ -915,3 +916,28 @@ def test_len_specialised(self):
i = RangeIndex(0, 5, step)
self.assertEqual(len(i), 0)
+
+ def test_where(self):
+ i = self.create_index()
+ result = i.where(notnull(i))
+ expected = i
+ tm.assert_index_equal(result, expected)
+
+ _nan = i._na_value
+ cond = [False] + [True] * len(i[1:])
+ expected = pd.Index([_nan] + i[1:].tolist())
+
+ result = i.where(cond)
+ tm.assert_index_equal(result, expected)
+
+ def test_where_array_like(self):
+ i = self.create_index()
+
+ _nan = i._na_value
+ cond = [False] + [True] * (len(i) - 1)
+ klasses = [list, tuple, np.array, pd.Series]
+ expected = pd.Index([_nan] + i[1:].tolist())
+
+ for klass in klasses:
+ result = i.where(klass(cond))
+ tm.assert_index_equal(result, expected)
| Finally got around to patching this. Partially addresses #15187. | https://api.github.com/repos/pandas-dev/pandas/pulls/15616 | 2017-03-08T17:54:43Z | 2017-03-10T14:41:26Z | null | 2017-03-10T14:43:29Z |
Clean multiindex keys | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6559fc4c24ce2..d2491f3dcceba 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2214,7 +2214,7 @@ def query(self, expr, inplace=False, **kwargs):
try:
new_data = self.loc[res]
- except ValueError:
+ except (ValueError, NotImplementedError):
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index ae0aaf98fdf02..eb960e70d7987 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1511,37 +1511,28 @@ def _getitem_axis(self, key, axis=0):
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
+ if isinstance(key, ABCDataFrame):
+ # GH 15438
+ raise NotImplementedError("Indexing a with a DataFrame key is "
+ "not implemented")
+ elif hasattr(key, 'ndim') and key.ndim > 1:
+ raise NotImplementedError("Indexing with a multidimensional "
+ "key is not implemented")
# convert various list-like indexers
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
if isinstance(labels, MultiIndex):
-
- if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
- # Series, or 0,1 ndim ndarray
+ if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim != 1:
+ # Series or 1-dim ndarray
# GH 14730
key = list(key)
- elif isinstance(key, ABCDataFrame):
- # GH 15438
- raise NotImplementedError("Indexing a MultiIndex with a "
- "DataFrame key is not "
- "implemented")
- elif hasattr(key, 'ndim') and key.ndim > 1:
- raise NotImplementedError("Indexing a MultiIndex with a "
- "multidimensional key is not "
- "implemented")
-
- if (not isinstance(key, tuple) and len(key) > 1 and
- not isinstance(key[0], tuple)):
- key = tuple([key])
+ if not isinstance(key, tuple):
+ return self._getitem_iterable(key, axis=axis)
# an iterable multi-selection
- if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
-
- if hasattr(key, 'ndim') and key.ndim > 1:
- raise ValueError('Cannot index with multidimensional key')
-
+ else:
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
diff --git a/pandas/tests/indexing/test_multiindex.py b/pandas/tests/indexing/test_multiindex.py
index c12bb8910ffc9..bedc1e1669ed0 100644
--- a/pandas/tests/indexing/test_multiindex.py
+++ b/pandas/tests/indexing/test_multiindex.py
@@ -174,6 +174,10 @@ def test_loc_getitem_series(self):
result = x.loc[empty]
tm.assert_series_equal(result, expected)
+ with tm.assertRaises(KeyError):
+ # GH15452
+ x.loc[[4, 5]]
+
def test_loc_getitem_array(self):
# GH15434
# passing an array as a key with a MultiIndex
@@ -203,6 +207,27 @@ def test_loc_getitem_array(self):
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
+ def test_loc_generator(self):
+ index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
+ x = Series(index=index, data=range(9), dtype=np.float64)
+ y = [1, 3]
+
+ # getitem:
+ expected = Series(
+ data=[0, 1, 2, 6, 7, 8],
+ index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
+ dtype=np.float64)
+ result = x.loc[iter(y)]
+ tm.assert_series_equal(result, expected)
+
+ # setitem:
+ expected = Series(
+ data=[9, 10, 11, 3, 4, 5, 12, 13, 14],
+ index=index,
+ dtype=np.float64)
+ x.loc[iter(y)] = range(9, 15)
+ tm.assert_series_equal(x, expected)
+
def test_iloc_getitem_multiindex(self):
mi_labels = DataFrame(np.random.randn(4, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 6d8a54b538237..10d06220b4215 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -4,6 +4,7 @@
import pytest
from datetime import datetime, timedelta
+import pytest
from numpy import nan
import numpy as np
@@ -290,6 +291,44 @@ def test_getitem_generator(self):
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
+ def test_setitem_generator(self):
+ bool_idx = self.series > 0
+ idces = self.series[bool_idx].index
+
+ values = range(bool_idx.sum())
+
+ expected = self.series.copy()
+ expected[bool_idx] = values
+
+ # list of labels:
+ s1 = self.series.copy()
+ s1[iter(idces)] = values
+ assert_series_equal(s1, expected)
+
+ # list of labels with .loc:
+ s2 = self.series.copy()
+ s2.loc[iter(idces)] = values
+ assert_series_equal(s2, expected)
+
+ @pytest.mark.xfail(reason="Setitem with booleans generators unsupported")
+ def test_setitem_boolean_generator(self):
+ bool_idx = self.series > 0
+
+ values = range(bool_idx.sum())
+
+ expected = self.series.copy()
+ expected[bool_idx] = values
+
+ # boolean generator (fails)
+ s1 = self.series.copy()
+ s1[iter(bool_idx)] = values
+ assert_series_equal(s1, expected)
+
+ # boolean generator with .loc (fails)
+ s2 = self.series.copy()
+ s2.loc[iter(bool_idx)] = values
+ assert_series_equal(s2, expected)
+
def test_type_promotion(self):
# GH12599
s = pd.Series()
diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py
index 382cff4b9d0ac..bfbdc498f1b5b 100644
--- a/pandas/tests/sparse/test_indexing.py
+++ b/pandas/tests/sparse/test_indexing.py
@@ -512,13 +512,17 @@ def test_loc(self):
tm.assert_sp_series_equal(sparse.loc['B'],
orig.loc['B'].to_sparse())
- result = sparse.loc[[1, 3, 4]]
- exp = orig.loc[[1, 3, 4]].to_sparse()
+ with tm.assertRaises(KeyError):
+ # GH15452
+ sparse.loc[['D', 'E', 'F']]
+
+ result = sparse.loc[['A', 'B']]
+ exp = orig.loc[['A', 'B']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
- result = sparse.loc[[1, 3, 4, 5]]
- exp = orig.loc[[1, 3, 4, 5]].to_sparse()
+ result = sparse.loc[['A', 'B', 'C', 'D']]
+ exp = orig.loc[['A', 'B', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# single element list (GH 15447)
| - [x] closes #15452
- [x] tests added / passed
- [x] passes ``git diff master | flake8 --diff``
- [x] whatsnew entry
Most of the change to code is mere refactoring (the small change in ``pandas/core/frame.py`` can be undone once we decide about #15438), but I did suppress three obsolete (the same job [is already done](https://github.com/toobaz/pandas/blob/clean_multiindex_keys/pandas/core/indexing.py#L1076) in the right way in ``_getitem_iterable``) and buggy [lines](https://github.com/pandas-dev/pandas/blob/master/pandas/core/indexing.py#L1544) which actually not only caused #15452, but were the real explanation for #15424.
As a side consequence, generators can now be used to index a ``MultiIndex``. This is not documented, as far as I know, but it was already possible with a flat ``Index``, and it was [actually also tested](https://github.com/pandas-dev/pandas/blob/master/pandas/tests/series/test_indexing.py#L270), so I added tests for ``MultiIndex`` too and to setitem (except that not all of them currently work). Once this PR goes through, I plan to open three separate issues for
- the failing tests with setitem and generators (of boolean values)
- ``df.loc[['missing', 'keys'], :]``, which still erroneously returns an empty ``DataFrame`` rather than raising (as ``df.loc[['missing', 'keys']]`` does)
- [my proposal](https://github.com/pandas-dev/pandas/issues/15452#issuecomment-281542685) about a change in behaviour for missing keys
Feel free to tell me if any of this is not worth discussing. | https://api.github.com/repos/pandas-dev/pandas/pulls/15615 | 2017-03-08T14:35:27Z | 2017-07-14T07:22:17Z | null | 2017-07-14T07:22:17Z |
BLD: fix linting wrt to #15537, changes in location of pandas/src | diff --git a/ci/lint.sh b/ci/lint.sh
index 2ffc68e5eb139..ed3af2568811c 100755
--- a/ci/lint.sh
+++ b/ci/lint.sh
@@ -8,9 +8,9 @@ RET=0
if [ "$LINT" ]; then
- # pandas/src is C code, so no need to search there.
+ # pandas/_libs/src is C code, so no need to search there.
echo "Linting *.py"
- flake8 pandas --filename=*.py --exclude pandas/src
+ flake8 pandas --filename=*.py --exclude pandas/_libs/src
if [ $? -ne "0" ]; then
RET=1
fi
@@ -46,8 +46,8 @@ if [ "$LINT" ]; then
echo "Linting *.c and *.h"
for path in '*.h' 'period_helper.c' 'datetime' 'parser' 'ujson'
do
- echo "linting -> pandas/src/$path"
- cpplint --quiet --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive pandas/src/$path
+ echo "linting -> pandas/_libs/src/$path"
+ cpplint --quiet --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive pandas/_libs/src/$path
if [ $? -ne "0" ]; then
RET=1
fi
diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h
index 3445fc3e48376..97ec5782b625b 100644
--- a/pandas/_libs/src/datetime/np_datetime.h
+++ b/pandas/_libs/src/datetime/np_datetime.h
@@ -14,8 +14,8 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
*/
-#ifndef PANDAS_SRC_DATETIME_NP_DATETIME_H_
-#define PANDAS_SRC_DATETIME_NP_DATETIME_H_
+#ifndef PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_
+#define PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_
#include <numpy/ndarraytypes.h>
@@ -124,4 +124,4 @@ convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
PANDAS_DATETIMEUNIT get_datetime64_unit(PyObject *obj);
-#endif // PANDAS_SRC_DATETIME_NP_DATETIME_H_
+#endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_
diff --git a/pandas/_libs/src/datetime/np_datetime_strings.h b/pandas/_libs/src/datetime/np_datetime_strings.h
index 1114ec5eae064..833c1869c1664 100644
--- a/pandas/_libs/src/datetime/np_datetime_strings.h
+++ b/pandas/_libs/src/datetime/np_datetime_strings.h
@@ -19,8 +19,8 @@ This file implements string parsing and creation for NumPy datetime.
*/
-#ifndef PANDAS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
-#define PANDAS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
+#ifndef PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
+#define PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
/*
* Parses (almost) standard ISO 8601 date strings. The differences are:
@@ -103,4 +103,4 @@ make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
int local, PANDAS_DATETIMEUNIT base, int tzoffset,
NPY_CASTING casting);
-#endif // PANDAS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
+#endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
diff --git a/pandas/_libs/src/datetime_helper.h b/pandas/_libs/src/datetime_helper.h
index bef4b4266c824..8023285f85b9b 100644
--- a/pandas/_libs/src/datetime_helper.h
+++ b/pandas/_libs/src/datetime_helper.h
@@ -7,8 +7,8 @@ Distributed under the terms of the BSD Simplified License.
The full license is in the LICENSE file, distributed with this software.
*/
-#ifndef PANDAS_SRC_DATETIME_HELPER_H_
-#define PANDAS_SRC_DATETIME_HELPER_H_
+#ifndef PANDAS__LIBS_SRC_DATETIME_HELPER_H_
+#define PANDAS__LIBS_SRC_DATETIME_HELPER_H_
#include <stdio.h>
#include "datetime.h"
@@ -33,4 +33,4 @@ npy_float64 total_seconds(PyObject *td) {
return (microseconds + (seconds + days_in_seconds) * 1000000.0) / 1000000.0;
}
-#endif // PANDAS_SRC_DATETIME_HELPER_H_
+#endif // PANDAS__LIBS_SRC_DATETIME_HELPER_H_
diff --git a/pandas/_libs/src/helper.h b/pandas/_libs/src/helper.h
index 39bcf27e074df..26b4d033b963b 100644
--- a/pandas/_libs/src/helper.h
+++ b/pandas/_libs/src/helper.h
@@ -7,8 +7,8 @@ Distributed under the terms of the BSD Simplified License.
The full license is in the LICENSE file, distributed with this software.
*/
-#ifndef PANDAS_SRC_HELPER_H_
-#define PANDAS_SRC_HELPER_H_
+#ifndef PANDAS__LIBS_SRC_HELPER_H_
+#define PANDAS__LIBS_SRC_HELPER_H_
#ifndef PANDAS_INLINE
#if defined(__GNUC__)
@@ -22,4 +22,4 @@ The full license is in the LICENSE file, distributed with this software.
#endif
#endif
-#endif // PANDAS_SRC_HELPER_H_
+#endif // PANDAS__LIBS_SRC_HELPER_H_
diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h
index 809edb2e99fa2..5f4db5b2f55d3 100644
--- a/pandas/_libs/src/numpy_helper.h
+++ b/pandas/_libs/src/numpy_helper.h
@@ -7,8 +7,8 @@ Distributed under the terms of the BSD Simplified License.
The full license is in the LICENSE file, distributed with this software.
*/
-#ifndef PANDAS_SRC_NUMPY_HELPER_H_
-#define PANDAS_SRC_NUMPY_HELPER_H_
+#ifndef PANDAS__LIBS_SRC_NUMPY_HELPER_H_
+#define PANDAS__LIBS_SRC_NUMPY_HELPER_H_
#include "Python.h"
#include "helper.h"
@@ -159,4 +159,4 @@ PANDAS_INLINE PyObject* unbox_if_zerodim(PyObject* arr) {
}
}
-#endif // PANDAS_SRC_NUMPY_HELPER_H_
+#endif // PANDAS__LIBS_SRC_NUMPY_HELPER_H_
diff --git a/pandas/_libs/src/parse_helper.h b/pandas/_libs/src/parse_helper.h
index 5d2a0dad3da17..6dd8b66eab33d 100644
--- a/pandas/_libs/src/parse_helper.h
+++ b/pandas/_libs/src/parse_helper.h
@@ -7,8 +7,8 @@ Distributed under the terms of the BSD Simplified License.
The full license is in the LICENSE file, distributed with this software.
*/
-#ifndef PANDAS_SRC_PARSE_HELPER_H_
-#define PANDAS_SRC_PARSE_HELPER_H_
+#ifndef PANDAS__LIBS_SRC_PARSE_HELPER_H_
+#define PANDAS__LIBS_SRC_PARSE_HELPER_H_
#include <errno.h>
#include <float.h>
@@ -270,4 +270,4 @@ static double xstrtod(const char *str, char **endptr, char decimal, char sci,
return number;
}
-#endif // PANDAS_SRC_PARSE_HELPER_H_
+#endif // PANDAS__LIBS_SRC_PARSE_HELPER_H_
diff --git a/pandas/_libs/src/parser/io.h b/pandas/_libs/src/parser/io.h
index 5a0c2b2b5e4a4..77121e9a169c1 100644
--- a/pandas/_libs/src/parser/io.h
+++ b/pandas/_libs/src/parser/io.h
@@ -7,8 +7,8 @@ Distributed under the terms of the BSD Simplified License.
The full license is in the LICENSE file, distributed with this software.
*/
-#ifndef PANDAS_SRC_PARSER_IO_H_
-#define PANDAS_SRC_PARSER_IO_H_
+#ifndef PANDAS__LIBS_SRC_PARSER_IO_H_
+#define PANDAS__LIBS_SRC_PARSER_IO_H_
#include "Python.h"
#include "tokenizer.h"
@@ -83,4 +83,4 @@ void *buffer_file_bytes(void *source, size_t nbytes, size_t *bytes_read,
void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
int *status);
-#endif // PANDAS_SRC_PARSER_IO_H_
+#endif // PANDAS__LIBS_SRC_PARSER_IO_H_
diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h
index 6c1bc630ab547..9853b5149bee3 100644
--- a/pandas/_libs/src/parser/tokenizer.h
+++ b/pandas/_libs/src/parser/tokenizer.h
@@ -9,8 +9,8 @@ See LICENSE for the license
*/
-#ifndef PANDAS_SRC_PARSER_TOKENIZER_H_
-#define PANDAS_SRC_PARSER_TOKENIZER_H_
+#ifndef PANDAS__LIBS_SRC_PARSER_TOKENIZER_H_
+#define PANDAS__LIBS_SRC_PARSER_TOKENIZER_H_
#include <errno.h>
#include <stdio.h>
@@ -276,4 +276,4 @@ double round_trip(const char *p, char **q, char decimal, char sci, char tsep,
int skip_trailing);
int to_boolean(const char *item, uint8_t *val);
-#endif // PANDAS_SRC_PARSER_TOKENIZER_H_
+#endif // PANDAS__LIBS_SRC_PARSER_TOKENIZER_H_
diff --git a/pandas/_libs/src/period_helper.h b/pandas/_libs/src/period_helper.h
index 601717692ff6d..45afc074cab72 100644
--- a/pandas/_libs/src/period_helper.h
+++ b/pandas/_libs/src/period_helper.h
@@ -11,8 +11,8 @@ Cython to pandas. This primarily concerns interval representation and
frequency conversion routines.
*/
-#ifndef PANDAS_SRC_PERIOD_HELPER_H_
-#define PANDAS_SRC_PERIOD_HELPER_H_
+#ifndef PANDAS__LIBS_SRC_PERIOD_HELPER_H_
+#define PANDAS__LIBS_SRC_PERIOD_HELPER_H_
#include <Python.h>
#include "headers/stdint.h"
@@ -188,4 +188,4 @@ int get_yq(npy_int64 ordinal, int freq, int *quarter, int *year);
void initialize_daytime_conversion_factor_matrix(void);
-#endif // PANDAS_SRC_PERIOD_HELPER_H_
+#endif // PANDAS__LIBS_SRC_PERIOD_HELPER_H_
diff --git a/pandas/_libs/src/skiplist.h b/pandas/_libs/src/skiplist.h
index 013516a49fa2f..f9527e72f577e 100644
--- a/pandas/_libs/src/skiplist.h
+++ b/pandas/_libs/src/skiplist.h
@@ -13,8 +13,8 @@ Port of Wes McKinney's Cython version of Raymond Hettinger's original pure
Python recipe (http://rhettinger.wordpress.com/2010/02/06/lost-knowledge/)
*/
-#ifndef PANDAS_SRC_SKIPLIST_H_
-#define PANDAS_SRC_SKIPLIST_H_
+#ifndef PANDAS__LIBS_SRC_SKIPLIST_H_
+#define PANDAS__LIBS_SRC_SKIPLIST_H_
#include <math.h>
#include <stdio.h>
@@ -287,4 +287,4 @@ PANDAS_INLINE int skiplist_remove(skiplist_t *skp, double value) {
return 1;
}
-#endif // PANDAS_SRC_SKIPLIST_H_
+#endif // PANDAS__LIBS_SRC_SKIPLIST_H_
diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h
index 3bfb4b26c0095..d0588348baa44 100644
--- a/pandas/_libs/src/ujson/lib/ultrajson.h
+++ b/pandas/_libs/src/ujson/lib/ultrajson.h
@@ -49,8 +49,8 @@ tree doesn't have cyclic references.
*/
-#ifndef PANDAS_SRC_UJSON_LIB_ULTRAJSON_H_
-#define PANDAS_SRC_UJSON_LIB_ULTRAJSON_H_
+#ifndef PANDAS__LIBS_SRC_UJSON_LIB_ULTRAJSON_H_
+#define PANDAS__LIBS_SRC_UJSON_LIB_ULTRAJSON_H_
#include <stdio.h>
#include <wchar.h>
@@ -307,4 +307,4 @@ EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec,
const char *buffer, size_t cbBuffer);
EXPORTFUNCTION void encode(JSOBJ, JSONObjectEncoder *, const char *, size_t);
-#endif // PANDAS_SRC_UJSON_LIB_ULTRAJSON_H_
+#endif // PANDAS__LIBS_SRC_UJSON_LIB_ULTRAJSON_H_
diff --git a/pandas/_libs/src/ujson/python/py_defines.h b/pandas/_libs/src/ujson/python/py_defines.h
index b32285766c86a..82385fdd48a3b 100644
--- a/pandas/_libs/src/ujson/python/py_defines.h
+++ b/pandas/_libs/src/ujson/python/py_defines.h
@@ -16,7 +16,7 @@ modification, are permitted provided that the following conditions are met:
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@@ -35,8 +35,8 @@ Numeric decoder derived from from TCL library
* Copyright (c) 1994 Sun Microsystems, Inc.
*/
-#ifndef PANDAS_SRC_UJSON_PYTHON_PY_DEFINES_H_
-#define PANDAS_SRC_UJSON_PYTHON_PY_DEFINES_H_
+#ifndef PANDAS__LIBS_SRC_UJSON_PYTHON_PY_DEFINES_H_
+#define PANDAS__LIBS_SRC_UJSON_PYTHON_PY_DEFINES_H_
#include <Python.h>
@@ -55,4 +55,4 @@ Numeric decoder derived from from TCL library
#endif
-#endif // PANDAS_SRC_UJSON_PYTHON_PY_DEFINES_H_
+#endif // PANDAS__LIBS_SRC_UJSON_PYTHON_PY_DEFINES_H_
diff --git a/pandas/_libs/src/ujson/python/version.h b/pandas/_libs/src/ujson/python/version.h
index c074ef572101d..ef6d28bf3a1f7 100644
--- a/pandas/_libs/src/ujson/python/version.h
+++ b/pandas/_libs/src/ujson/python/version.h
@@ -16,7 +16,7 @@ modification, are permitted provided that the following conditions are met:
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@@ -35,9 +35,9 @@ Numeric decoder derived from from TCL library
* Copyright (c) 1994 Sun Microsystems, Inc.
*/
-#ifndef PANDAS_SRC_UJSON_PYTHON_VERSION_H_
-#define PANDAS_SRC_UJSON_PYTHON_VERSION_H_
+#ifndef PANDAS__LIBS_SRC_UJSON_PYTHON_VERSION_H_
+#define PANDAS__LIBS_SRC_UJSON_PYTHON_VERSION_H_
#define UJSON_VERSION "1.33"
-#endif // PANDAS_SRC_UJSON_PYTHON_VERSION_H_
+#endif // PANDAS__LIBS_SRC_UJSON_PYTHON_VERSION_H_
diff --git a/test.bat b/test.bat
index 2c5f25c24a637..080a1cc163a05 100644
--- a/test.bat
+++ b/test.bat
@@ -1,3 +1,3 @@
:: test on windows
-pytest --skip-slow --skip-network pandas
+pytest --skip-slow --skip-network pandas %*
diff --git a/test_fast.sh b/test_fast.sh
index 30ac7f84cbe8b..9b984156a796c 100755
--- a/test_fast.sh
+++ b/test_fast.sh
@@ -5,4 +5,4 @@
# https://github.com/pytest-dev/pytest/issues/1075
export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))')
-pytest pandas --skip-slow --skip-network -m "not single" -n 4
+pytest pandas --skip-slow --skip-network -m "not single" -n 4 "$@"
| https://api.github.com/repos/pandas-dev/pandas/pulls/15614 | 2017-03-08T14:26:08Z | 2017-03-08T14:52:30Z | 2017-03-08T14:52:30Z | 2017-03-08T14:52:30Z | |
DOC: updated docstrings for FramePlotMethods and SeriesPlotMethods | diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index d46c38c117445..f4407ee6a7dc2 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -3763,6 +3763,8 @@ def pie(self, **kwds):
"""
return self(kind='pie', **kwds)
+SeriesPlotMethods.__doc__ = _shared_docs['plot'] % _shared_doc_series_kwargs
+
class FramePlotMethods(BasePlotMethods):
"""DataFrame plotting accessor and method
@@ -4004,3 +4006,5 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
if gridsize is not None:
kwds['gridsize'] = gridsize
return self(kind='hexbin', x=x, y=y, C=C, **kwds)
+
+FramePlotMethods.__doc__ = _shared_docs['plot'] % _shared_doc_df_kwargs
| Closes #15458
| https://api.github.com/repos/pandas-dev/pandas/pulls/15611 | 2017-03-08T07:11:22Z | 2017-08-01T22:54:40Z | null | 2017-08-01T22:54:40Z |
BUG: make Series.sort_values(ascending=[False]) behave as ascending=False (#15604) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index e459c854dfab9..571900a9a16f0 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -228,7 +228,7 @@ Other enhancements
- ``pd.TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`)
- ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs <categorical.union>` for more information.
- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
-
+- ``Series.sort_values`` accepts a one element list of bool for consistency with the behavior of ``DataFrame.sort_values`` (:issue:`15604`)
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 626a4a81193cc..f46819c05a665 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -14,6 +14,7 @@
import numpy.ma as ma
from pandas.types.common import (_coerce_to_dtype, is_categorical_dtype,
+ is_bool,
is_integer, is_integer_dtype,
is_float_dtype,
is_extension_type, is_datetimetz,
@@ -1722,6 +1723,15 @@ def _try_kind_sort(arr):
argsorted = _try_kind_sort(arr[good])
+ if is_list_like(ascending):
+ if len(ascending) != 1:
+ raise ValueError('Length of ascending (%d) must be 1 '
+ 'for Series' % (len(ascending)))
+ ascending = ascending[0]
+
+ if not is_bool(ascending):
+ raise ValueError('ascending must be boolean')
+
if not ascending:
argsorted = argsorted[::-1]
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py
index db506f12a2293..590a530a847bd 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/test_sorting.py
@@ -64,6 +64,25 @@ def test_sort_values(self):
ordered = ts.sort_values(ascending=False, na_position='first')
assert_almost_equal(expected, ordered.valid().values)
+ # ascending=[False] should behave the same as ascending=False
+ ordered = ts.sort_values(ascending=[False])
+ expected = ts.sort_values(ascending=False)
+ assert_series_equal(expected, ordered)
+ ordered = ts.sort_values(ascending=[False], na_position='first')
+ expected = ts.sort_values(ascending=False, na_position='first')
+ assert_series_equal(expected, ordered)
+
+ self.assertRaises(ValueError,
+ lambda: ts.sort_values(ascending=None))
+ self.assertRaises(ValueError,
+ lambda: ts.sort_values(ascending=[]))
+ self.assertRaises(ValueError,
+ lambda: ts.sort_values(ascending=[1, 2, 3]))
+ self.assertRaises(ValueError,
+ lambda: ts.sort_values(ascending=[False, False]))
+ self.assertRaises(ValueError,
+ lambda: ts.sort_values(ascending='foobar'))
+
# inplace=True
ts = self.ts.copy()
ts.sort_values(ascending=False, inplace=True)
| - [X ] closes #15604
- [ X] tests added / passed
- [X] passes ``git diff upstream/master | flake8 --diff``
- [X] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/15607 | 2017-03-07T22:06:20Z | 2017-03-08T13:37:19Z | null | 2017-03-08T13:37:37Z |
BUG: DataFrame.hist() does not get along with matplotlib.pyplot.tight_layout() | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index e459c854dfab9..ef189697ae27f 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -765,7 +765,7 @@ Bug Fixes
- Bug in ``.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`)
- Bug in ``groupby.transform()`` that would coerce the resultant dtypes back to the original (:issue:`10972`, :issue:`11444`)
-- Bug in ``DataFrame.hist`` where ``plt.tight_layout`` caused an ``AttributeError`` (use ``matplotlib >= 0.2.0``) (:issue:`9351`)
+- Bug in ``DataFrame.hist`` where ``plt.tight_layout`` caused an ``AttributeError`` (:issue:`9351`)
- Bug in ``DataFrame.boxplot`` where ``fontsize`` was not applied to the tick labels on both axes (:issue:`15108`)
- Bug in ``Series.replace`` and ``DataFrame.replace`` which failed on empty replacement dicts (:issue:`15289`)
- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 22de7055e3cea..c58856e37abf7 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -241,12 +241,11 @@ def test_hist_layout(self):
@slow
# GH 9351
def test_tight_layout(self):
- if self.mpl_ge_2_0_0:
- df = DataFrame(randn(100, 2))
- _check_plot_works(df.hist)
- self.plt.tight_layout()
+ df = DataFrame(randn(100, 3))
+ _check_plot_works(df.hist)
+ self.plt.tight_layout()
- tm.close()
+ tm.close()
@tm.mplskip
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index d46c38c117445..5325d6ca88575 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -3457,7 +3457,7 @@ def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
if naxes != nplots:
for ax in axarr[naxes:]:
- ax.set_visible(False)
+ ax.axis('off')
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
| - [ ] (re)closes #9351
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15602 | 2017-03-07T17:01:17Z | 2017-03-12T23:51:15Z | null | 2023-05-11T01:15:12Z |
DEPR: Panel deprecated | diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index 57480a244f308..2423f1a342994 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -505,13 +505,18 @@ two ``Series`` or any combination of ``DataFrame/Series`` or
- ``DataFrame/DataFrame``: by default compute the statistic for matching column
names, returning a DataFrame. If the keyword argument ``pairwise=True`` is
passed then computes the statistic for each pair of columns, returning a
- ``Panel`` whose ``items`` are the dates in question (see :ref:`the next section
+ ``MultiIndexed DataFrame`` whose ``index`` are the dates in question (see :ref:`the next section
<stats.moments.corr_pairwise>`).
For example:
.. ipython:: python
+ df = pd.DataFrame(np.random.randn(1000, 4),
+ index=pd.date_range('1/1/2000', periods=1000),
+ columns=['A', 'B', 'C', 'D'])
+ df = df.cumsum()
+
df2 = df[:20]
df2.rolling(window=5).corr(df2['B'])
@@ -520,11 +525,16 @@ For example:
Computing rolling pairwise covariances and correlations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. warning::
+
+ Prior to version 0.20.0 if ``pairwise=True`` was passed, a ``Panel`` would be returned.
+ This will now return a 2-level MultiIndexed DataFrame, see the whatsnew :ref:`here <whatsnew_0200.api_breaking.rolling_pairwise>`
+
In financial data analysis and other fields it's common to compute covariance
and correlation matrices for a collection of time series. Often one is also
interested in moving-window covariance and correlation matrices. This can be
done by passing the ``pairwise`` keyword argument, which in the case of
-``DataFrame`` inputs will yield a ``Panel`` whose ``items`` are the dates in
+``DataFrame`` inputs will yield a MultiIndexed ``DataFrame`` whose ``index`` are the dates in
question. In the case of a single DataFrame argument the ``pairwise`` argument
can even be omitted:
@@ -539,12 +549,12 @@ can even be omitted:
.. ipython:: python
covs = df[['B','C','D']].rolling(window=50).cov(df[['A','B','C']], pairwise=True)
- covs[df.index[-50]]
+ covs.loc['2002-09-22':]
.. ipython:: python
correls = df.rolling(window=50).corr()
- correls[df.index[-50]]
+ correls.loc['2002-09-22':]
You can efficiently retrieve the time series of correlations between two
columns using ``.loc`` indexing:
@@ -557,7 +567,7 @@ columns using ``.loc`` indexing:
.. ipython:: python
@savefig rolling_corr_pairwise_ex.png
- correls.loc[:, 'A', 'C'].plot()
+ correls.loc[:, ('A', 'C')].plot()
.. _stats.aggregate:
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 4fcb63c18757a..2b11b23b1d1c2 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -763,6 +763,11 @@ completion mechanism so they can be tab-completed:
Panel
-----
+.. warning::
+
+ In 0.20.0, ``Panel`` is deprecated and will be removed in
+ a future version. See the section :ref:`Deprecate Panel <dsintro.deprecate_panel>`.
+
Panel is a somewhat less-used, but still important container for 3-dimensional
data. The term `panel data <http://en.wikipedia.org/wiki/Panel_data>`__ is
derived from econometrics and is partially responsible for the name pandas:
@@ -783,6 +788,7 @@ From 3D ndarray with optional axis labels
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. ipython:: python
+ :okwarning:
wp = pd.Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
@@ -794,6 +800,7 @@ From dict of DataFrame objects
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. ipython:: python
+ :okwarning:
data = {'Item1' : pd.DataFrame(np.random.randn(4, 3)),
'Item2' : pd.DataFrame(np.random.randn(4, 2))}
@@ -816,6 +823,7 @@ dictionary of DataFrames as above, and the following named parameters:
For example, compare to the construction above:
.. ipython:: python
+ :okwarning:
pd.Panel.from_dict(data, orient='minor')
@@ -824,6 +832,7 @@ DataFrame objects with mixed-type columns, all of the data will get upcasted to
``dtype=object`` unless you pass ``orient='minor'``:
.. ipython:: python
+ :okwarning:
df = pd.DataFrame({'a': ['foo', 'bar', 'baz'],
'b': np.random.randn(3)})
@@ -851,6 +860,7 @@ This method was introduced in v0.7 to replace ``LongPanel.to_long``, and convert
a DataFrame with a two-level index to a Panel.
.. ipython:: python
+ :okwarning:
midx = pd.MultiIndex(levels=[['one', 'two'], ['x','y']], labels=[[1,1,0,0],[1,0,1,0]])
df = pd.DataFrame({'A' : [1, 2, 3, 4], 'B': [5, 6, 7, 8]}, index=midx)
@@ -880,6 +890,7 @@ A Panel can be rearranged using its ``transpose`` method (which does not make a
copy by default unless the data are heterogeneous):
.. ipython:: python
+ :okwarning:
wp.transpose(2, 0, 1)
@@ -909,6 +920,7 @@ Squeezing
Another way to change the dimensionality of an object is to ``squeeze`` a 1-len object, similar to ``wp['Item1']``
.. ipython:: python
+ :okwarning:
wp.reindex(items=['Item1']).squeeze()
wp.reindex(items=['Item1'], minor=['B']).squeeze()
@@ -923,12 +935,55 @@ for more on this. To convert a Panel to a DataFrame, use the ``to_frame``
method:
.. ipython:: python
+ :okwarning:
panel = pd.Panel(np.random.randn(3, 5, 4), items=['one', 'two', 'three'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['a', 'b', 'c', 'd'])
panel.to_frame()
+
+.. _dsintro.deprecate_panel:
+
+Deprecate Panel
+---------------
+
+Over the last few years, pandas has increased in both breadth and depth, with new features,
+datatype support, and manipulation routines. As a result, supporting efficient indexing and functional
+routines for ``Series``, ``DataFrame`` and ``Panel`` has contributed to an increasingly fragmented and
+difficult-to-understand codebase.
+
+The 3-d structure of a ``Panel`` is much less common for many types of data analysis,
+than the 1-d of the ``Series`` or the 2-D of the ``DataFrame``. Going forward it makes sense for
+pandas to focus on these areas exclusively.
+
+Oftentimes, one can simply use a MultiIndex ``DataFrame`` for easily working with higher dimensional data.
+
+In additon, the ``xarray`` package was built from the ground up, specifically in order to
+support the multi-dimensional analysis that is one of ``Panel`` s main usecases.
+`Here is a link to the xarray panel-transition documentation <http://xarray.pydata.org/en/stable/pandas.html#panel-transition>`__.
+
+.. ipython:: python
+ :okwarning:
+
+ p = tm.makePanel()
+ p
+
+Convert to a MultiIndex DataFrame
+
+.. ipython:: python
+ :okwarning:
+
+ p.to_frame()
+
+Alternatively, one can convert to an xarray ``DataArray``.
+
+.. ipython:: python
+
+ p.to_xarray()
+
+You can see the full-documentation for the `xarray package <http://xarray.pydata.org/en/stable/>`__.
+
.. _dsintro.panelnd:
.. _dsintro.panel4d:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 31b0efa14a44d..132f20cb73142 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -10,13 +10,16 @@ users upgrade to this version.
Highlights include:
- The ``.ix`` indexer has been deprecated, see :ref:`here <whatsnew_0200.api_breaking.deprecate_ix>`
+- ``Panel`` has been deprecated, see :ref:`here <whatsnew_0200.api_breaking.deprecate_panel>`
- Improved user API when accessing levels in ``.groupby()``, see :ref:`here <whatsnew_0200.enhancements.groupby_access>`
- Improved support for UInt64 dtypes, see :ref:`here <whatsnew_0200.enhancements.uint64_support>`
- A new orient for JSON serialization, ``orient='table'``, that uses the Table Schema spec, see :ref:`here <whatsnew_0200.enhancements.table_schema>`
+- Window Binary Corr/Cov operations return a MultiIndexed ``DataFrame`` rather than a ``Panel``, as ``Panel`` is now deprecated, see :ref:`here <whatsnew_0200.api_breaking.rolling_pairwise>`
- Support for S3 handling now uses ``s3fs``, see :ref:`here <whatsnew_0200.api_breaking.s3>`
- Google BigQuery support now uses the ``pandas-gbq`` library, see :ref:`here <whatsnew_0200.api_breaking.gbq>`
- Switched the test framework to use `pytest <http://doc.pytest.org/en/latest>`__ (:issue:`13097`)
+
Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations <whatsnew_0200.deprecations>` before updating.
.. contents:: What's new in v0.20.0
@@ -425,6 +428,33 @@ Using ``.iloc``. Here we will get the location of the 'A' column, then use *posi
df.iloc[[0, 2], df.columns.get_loc('A')]
+.. _whatsnew_0200.api_breaking.deprecate_panel:
+
+Deprecate Panel
+^^^^^^^^^^^^^^^
+
+``Panel`` is deprecated and will be removed in a future version. The recommended way to represent 3-D data are
+with a ``MultiIndex``on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas
+provides a :meth:`~Panel.to_xarray` method to automate this conversion. See the documentation :ref:`Deprecate Panel <dsintro.deprecate_panel>`. (:issue:`13563`).
+
+.. ipython:: python
+ :okwarning:
+
+ p = tm.makePanel()
+ p
+
+Convert to a MultiIndex DataFrame
+
+.. ipython:: python
+
+ p.to_frame()
+
+Convert to an xarray DataArray
+
+.. ipython:: python
+
+ p.to_xarray()
+
.. _whatsnew.api_breaking.io_compat:
Possible incompat for HDF5 formats for pandas < 0.13.0
@@ -836,6 +866,51 @@ New Behavior:
df.groupby('A').agg([np.mean, np.std, np.min, np.max])
+.. _whatsnew_0200.api_breaking.rolling_pairwise:
+
+Window Binary Corr/Cov operations return a MultiIndex DataFrame
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A binary window operation, like ``.corr()`` or ``.cov()``, when operating on a ``.rolling(..)``, ``.expanding(..)``, or ``.ewm(..)`` object,
+will now return a 2-level ``MultiIndexed DataFrame`` rather than a ``Panel``, as ``Panel`` is now deprecated,
+see :ref:`here <_whatsnew_0200.api_breaking.deprecate_panel>`. These are equivalent in function,
+but MultiIndexed ``DataFrame`` s enjoy more support in pandas.
+See the section on :ref:`Windowed Binary Operations <stats.moments.binary>` for more information. (:issue:`15677`)
+
+.. ipython:: python
+
+ np.random.seed(1234)
+ df = pd.DataFrame(np.random.rand(100, 2),
+ columns=pd.Index(['A', 'B'], name='bar'),
+ index=pd.date_range('20160101',
+ periods=100, freq='D', name='foo'))
+ df.tail()
+
+Old Behavior:
+
+.. code-block:: ipython
+
+ In [2]: df.rolling(12).corr()
+ Out[2]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 100 (items) x 2 (major_axis) x 2 (minor_axis)
+ Items axis: 2016-01-01 00:00:00 to 2016-04-09 00:00:00
+ Major_axis axis: A to B
+ Minor_axis axis: A to B
+
+New Behavior:
+
+.. ipython:: python
+
+ res = df.rolling(12).corr()
+ res.tail()
+
+Retrieving a correlation matrix for a cross-section
+
+.. ipython:: python
+
+ df.rolling(12).corr().loc['2016-04-07']
+
.. _whatsnew_0200.api_breaking.hdfstore_where:
HDFStore where string comparison
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 9e95023ccb359..24f4d219fb9ca 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -4,10 +4,8 @@
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
-import warnings
-
import numpy as np
-
+import warnings
from pandas.types.cast import (infer_dtype_from_scalar,
maybe_cast_item)
from pandas.types.common import (is_integer, is_list_like,
@@ -132,6 +130,18 @@ def _constructor(self):
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
+ # deprecation GH13563
+ warnings.warn("\nPanel is deprecated and will be removed in a "
+ "future version.\nThe recommended way to represent "
+ "these types of 3-dimensional data are with a "
+ "MultiIndex on a DataFrame, via the "
+ "Panel.to_frame() method\n"
+ "Alternatively, you can use the xarray package "
+ "http://xarray.pydata.org/en/stable/.\n"
+ "Pandas provides a `.to_xarray()` method to help "
+ "automate this conversion.\n",
+ DeprecationWarning, stacklevel=3)
+
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 9c9f861451309..89d2f5b24d77e 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -927,8 +927,9 @@ def f(arg, *args, **kwargs):
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
- output will be a Panel in the case of DataFrame inputs. In the case of
- missing elements, only complete pairwise observations will be used.
+ output will be a MultiIndexed DataFrame in the case of DataFrame
+ inputs. In the case of missing elements, only complete pairwise
+ observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
@@ -964,11 +965,12 @@ def _get_cov(X, Y):
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
- If False then only matching columns between self and other will be used
- and the output will be a DataFrame.
+ If False then only matching columns between self and other will be
+ used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
- output will be a Panel in the case of DataFrame inputs. In the case of
- missing elements, only complete pairwise observations will be used.""")
+ output will be a MultiIndex DataFrame in the case of DataFrame inputs.
+ In the case of missing elements, only complete pairwise observations
+ will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
@@ -1397,8 +1399,9 @@ def _constructor(self):
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
- will be a Panel in the case of DataFrame inputs. In the case of missing
- elements, only complete pairwise observations will be used.
+ will be a MultiIndex DataFrame in the case of DataFrame inputs.
+ In the case of missing elements, only complete pairwise observations will
+ be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
@@ -1652,7 +1655,8 @@ def _cov(x, y):
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
- from pandas import Series, DataFrame, Panel
+ from pandas import Series, DataFrame
+
if not (isinstance(arg1, (np.ndarray, Series, DataFrame)) and
isinstance(arg2, (np.ndarray, Series, DataFrame))):
raise TypeError("arguments to moment function must be of type "
@@ -1684,10 +1688,13 @@ def dataframe_from_int_dict(data, frame_template):
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
- X, Y = arg1.align(arg2, join='outer')
+ with warnings.catch_warnings(record=True):
+ X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
- res_columns = arg1.columns.union(arg2.columns)
+
+ with warnings.catch_warnings(record=True):
+ res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
@@ -1703,12 +1710,39 @@ def dataframe_from_int_dict(data, frame_template):
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
- p = Panel.from_dict(results).swapaxes('items', 'major')
- if len(p.major_axis) > 0:
- p.major_axis = arg1.columns[p.major_axis]
- if len(p.minor_axis) > 0:
- p.minor_axis = arg2.columns[p.minor_axis]
- return p
+
+ # TODO: not the most efficient (perf-wise)
+ # though not bad code-wise
+ from pandas import Panel, MultiIndex, Index
+ with warnings.catch_warnings(record=True):
+ p = Panel.from_dict(results).swapaxes('items', 'major')
+ if len(p.major_axis) > 0:
+ p.major_axis = arg1.columns[p.major_axis]
+ if len(p.minor_axis) > 0:
+ p.minor_axis = arg2.columns[p.minor_axis]
+
+ if len(p.items):
+ result = pd.concat(
+ [p.iloc[i].T for i in range(len(p.items))],
+ keys=p.items)
+ else:
+
+ result = DataFrame(
+ index=MultiIndex(levels=[arg1.index, arg1.columns],
+ labels=[[], []]),
+ columns=arg2.columns,
+ dtype='float64')
+
+ # reset our index names to arg1 names
+ # reset our column names to arg2 names
+ # careful not to mutate the original names
+ result.columns = Index(result.columns).set_names(
+ arg2.columns.name)
+ result.index = result.index.set_names(
+ [arg1.index.name, arg1.columns.name])
+
+ return result
+
else:
raise ValueError("'pairwise' is not True/False")
else:
diff --git a/pandas/indexes/frozen.py b/pandas/indexes/frozen.py
index 97a1a3ea99e65..ab1228c008ca8 100644
--- a/pandas/indexes/frozen.py
+++ b/pandas/indexes/frozen.py
@@ -117,6 +117,30 @@ def __unicode__(self):
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
+ def searchsorted(self, v, side='left', sorter=None):
+ """
+ Find indices where elements of v should be inserted
+ in a to maintain order.
+
+ For full documentation, see `numpy.searchsorted`
+
+ See Also
+ --------
+ numpy.searchsorted : equivalent function
+ """
+
+ # we are much more performant if the searched
+ # indexer is the same type as the array
+ # this doesn't matter for int64, but DOES
+ # matter for smaller int dtypes
+ # https://github.com/numpy/numpy/issues/5370
+ try:
+ v = self.dtype.type(v)
+ except:
+ pass
+ return super(FrozenNDArray, self).searchsorted(
+ v, side=side, sorter=sorter)
+
def _ensure_frozen(array_like, categories, copy=False):
array_like = coerce_indexer_dtype(array_like, categories)
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 96e0effbd7608..77774f3284fef 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -2203,20 +2203,14 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
else:
loc = level_index.get_loc(key)
- if level > 0 or self.lexsort_depth == 0:
+ if isinstance(loc, slice):
+ return loc
+ elif level > 0 or self.lexsort_depth == 0:
return np.array(labels == loc, dtype=bool)
- else:
- # sorted, so can return slice object -> view
- try:
- loc = labels.dtype.type(loc)
- except TypeError:
- # this occurs when loc is a slice (partial string indexing)
- # but the TypeError raised by searchsorted in this case
- # is catched in Index._has_valid_type()
- pass
- i = labels.searchsorted(loc, side='left')
- j = labels.searchsorted(loc, side='right')
- return slice(i, j)
+
+ i = labels.searchsorted(loc, side='left')
+ j = labels.searchsorted(loc, side='right')
+ return slice(i, j)
def get_locs(self, tup):
"""
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 82a98f5d08488..9908a320a6646 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -1,9 +1,9 @@
import pytest
import sys
import os
-from warnings import catch_warnings
import tempfile
from contextlib import contextmanager
+from warnings import catch_warnings
import datetime
from datetime import timedelta
@@ -11,7 +11,7 @@
import pandas
import pandas as pd
-from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index,
+from pandas import (Series, DataFrame, Panel, Panel4D, MultiIndex, Int64Index,
RangeIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex,
isnull)
@@ -22,8 +22,6 @@
tables = pytest.importorskip('tables')
from pandas.io.pytables import TableIterator
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
- IncompatibilityWarning, PerformanceWarning,
- AttributeConflictWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
@@ -205,8 +203,10 @@ def roundtrip(key, obj, **kwargs):
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame', o))
- o = tm.makePanel()
- assert_panel_equal(o, roundtrip('panel', o))
+ with catch_warnings(record=True):
+
+ o = tm.makePanel()
+ assert_panel_equal(o, roundtrip('panel', o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
@@ -368,8 +368,9 @@ def test_keys(self):
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
- store['d'] = tm.makePanel()
- store['foo/bar'] = tm.makePanel()
+ with catch_warnings(record=True):
+ store['d'] = tm.makePanel()
+ store['foo/bar'] = tm.makePanel()
self.assertEqual(len(store), 5)
expected = set(['/a', '/b', '/c', '/d', '/foo/bar'])
self.assertTrue(set(store.keys()) == expected)
@@ -388,9 +389,11 @@ def test_repr(self):
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
- store['d'] = tm.makePanel()
- store['foo/bar'] = tm.makePanel()
- store.append('e', tm.makePanel())
+
+ with catch_warnings(record=True):
+ store['d'] = tm.makePanel()
+ store['foo/bar'] = tm.makePanel()
+ store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
@@ -755,6 +758,7 @@ def test_put_mixed_type(self):
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
+ # PerformanceWarning
with catch_warnings(record=True):
store.put('df', df)
@@ -764,39 +768,42 @@ def test_put_mixed_type(self):
def test_append(self):
with ensure_clean_store(self.path) as store:
- df = tm.makeTimeDataFrame()
- _maybe_remove(store, 'df1')
- store.append('df1', df[:10])
- store.append('df1', df[10:])
- tm.assert_frame_equal(store['df1'], df)
-
- _maybe_remove(store, 'df2')
- store.put('df2', df[:10], format='table')
- store.append('df2', df[10:])
- tm.assert_frame_equal(store['df2'], df)
-
- _maybe_remove(store, 'df3')
- store.append('/df3', df[:10])
- store.append('/df3', df[10:])
- tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
+
+ df = tm.makeTimeDataFrame()
+ _maybe_remove(store, 'df1')
+ store.append('df1', df[:10])
+ store.append('df1', df[10:])
+ tm.assert_frame_equal(store['df1'], df)
+
+ _maybe_remove(store, 'df2')
+ store.put('df2', df[:10], format='table')
+ store.append('df2', df[10:])
+ tm.assert_frame_equal(store['df2'], df)
+
+ _maybe_remove(store, 'df3')
+ store.append('/df3', df[:10])
+ store.append('/df3', df[10:])
+ tm.assert_frame_equal(store['df3'], df)
+
+ # this is allowed by almost always don't want to do it
+ # tables.NaturalNameWarning
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
- # panel
- wp = tm.makePanel()
- _maybe_remove(store, 'wp1')
- store.append('wp1', wp.iloc[:, :10, :])
- store.append('wp1', wp.iloc[:, 10:, :])
- assert_panel_equal(store['wp1'], wp)
+ # panel
+ wp = tm.makePanel()
+ _maybe_remove(store, 'wp1')
+ store.append('wp1', wp.iloc[:, :10, :])
+ store.append('wp1', wp.iloc[:, 10:, :])
+ assert_panel_equal(store['wp1'], wp)
- # ndim
- with catch_warnings(record=True):
+ # ndim
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.iloc[:, :, :10, :])
@@ -820,42 +827,42 @@ def test_append(self):
'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d2'], p4d2)
- # test using differt order of items on the non-index axes
- _maybe_remove(store, 'wp1')
- wp_append1 = wp.iloc[:, :10, :]
- store.append('wp1', wp_append1)
- wp_append2 = wp.iloc[:, 10:, :].reindex(items=wp.items[::-1])
- store.append('wp1', wp_append2)
- assert_panel_equal(store['wp1'], wp)
-
- # dtype issues - mizxed type in a single object column
- df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
- df['mixed_column'] = 'testing'
- df.loc[2, 'mixed_column'] = np.nan
- _maybe_remove(store, 'df')
- store.append('df', df)
- tm.assert_frame_equal(store['df'], df)
-
- # uints - test storage of uints
- uint_data = DataFrame({
- 'u08': Series(np.random.randint(0, high=255, size=5),
- dtype=np.uint8),
- 'u16': Series(np.random.randint(0, high=65535, size=5),
- dtype=np.uint16),
- 'u32': Series(np.random.randint(0, high=2**30, size=5),
- dtype=np.uint32),
- 'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],
- dtype=np.uint64)}, index=np.arange(5))
- _maybe_remove(store, 'uints')
- store.append('uints', uint_data)
- tm.assert_frame_equal(store['uints'], uint_data)
-
- # uints - test storage of uints in indexable columns
- _maybe_remove(store, 'uints')
- # 64-bit indices not yet supported
- store.append('uints', uint_data, data_columns=[
- 'u08', 'u16', 'u32'])
- tm.assert_frame_equal(store['uints'], uint_data)
+ # test using differt order of items on the non-index axes
+ _maybe_remove(store, 'wp1')
+ wp_append1 = wp.iloc[:, :10, :]
+ store.append('wp1', wp_append1)
+ wp_append2 = wp.iloc[:, 10:, :].reindex(items=wp.items[::-1])
+ store.append('wp1', wp_append2)
+ assert_panel_equal(store['wp1'], wp)
+
+ # dtype issues - mizxed type in a single object column
+ df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
+ df['mixed_column'] = 'testing'
+ df.loc[2, 'mixed_column'] = np.nan
+ _maybe_remove(store, 'df')
+ store.append('df', df)
+ tm.assert_frame_equal(store['df'], df)
+
+ # uints - test storage of uints
+ uint_data = DataFrame({
+ 'u08': Series(np.random.randint(0, high=255, size=5),
+ dtype=np.uint8),
+ 'u16': Series(np.random.randint(0, high=65535, size=5),
+ dtype=np.uint16),
+ 'u32': Series(np.random.randint(0, high=2**30, size=5),
+ dtype=np.uint32),
+ 'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],
+ dtype=np.uint64)}, index=np.arange(5))
+ _maybe_remove(store, 'uints')
+ store.append('uints', uint_data)
+ tm.assert_frame_equal(store['uints'], uint_data)
+
+ # uints - test storage of uints in indexable columns
+ _maybe_remove(store, 'uints')
+ # 64-bit indices not yet supported
+ store.append('uints', uint_data, data_columns=[
+ 'u08', 'u16', 'u32'])
+ tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
@@ -937,8 +944,9 @@ def check(format, index):
# only support for fixed types (and they have a perf warning)
self.assertRaises(TypeError, check, 'table', index)
- with tm.assert_produces_warning(
- expected_warning=PerformanceWarning):
+
+ # PerformanceWarning
+ with catch_warnings(record=True):
check('fixed', index)
def test_encoding(self):
@@ -1131,15 +1139,17 @@ def test_append_all_nans(self):
[[np.nan, np.nan, np.nan], [np.nan, 5, 6]],
[[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]]
- panel_with_missing = Panel(matrix, items=['Item1', 'Item2', 'Item3'],
- major_axis=[1, 2],
- minor_axis=['A', 'B', 'C'])
+ with catch_warnings(record=True):
+ panel_with_missing = Panel(matrix,
+ items=['Item1', 'Item2', 'Item3'],
+ major_axis=[1, 2],
+ minor_axis=['A', 'B', 'C'])
- with ensure_clean_path(self.path) as path:
- panel_with_missing.to_hdf(
- path, 'panel_with_missing', format='table')
- reloaded_panel = read_hdf(path, 'panel_with_missing')
- tm.assert_panel_equal(panel_with_missing, reloaded_panel)
+ with ensure_clean_path(self.path) as path:
+ panel_with_missing.to_hdf(
+ path, 'panel_with_missing', format='table')
+ reloaded_panel = read_hdf(path, 'panel_with_missing')
+ tm.assert_panel_equal(panel_with_missing, reloaded_panel)
def test_append_frame_column_oriented(self):
@@ -1158,13 +1168,14 @@ def test_append_frame_column_oriented(self):
# selection on the non-indexable
result = store.select(
- 'df1', ('columns=A', Term('index=df.index[0:4]')))
+ 'df1', ('columns=A', 'index=df.index[0:4]'))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
- self.assertRaises(TypeError, store.select, 'df1', (
- 'columns=A', Term('index>df.index[4]')))
+ with pytest.raises(TypeError):
+ store.select('df1',
+ 'columns=A and index>df.index[4]')
def test_append_with_different_block_ordering(self):
@@ -1265,15 +1276,15 @@ def check_indexers(key, indexers):
assert_panel4d_equal(result, expected)
# partial selection2
- result = store.select('p4d', [Term(
- 'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
+ result = store.select(
+ 'p4d', "labels='l1' and items='ItemA' and minor_axis='B'")
expected = p4d.reindex(
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
# non-existant partial selection
- result = store.select('p4d', [Term(
- 'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
+ result = store.select(
+ 'p4d', "labels='l1' and items='Item1' and minor_axis='B'")
expected = p4d.reindex(labels=['l1'], items=[],
minor_axis=['B'])
assert_panel4d_equal(result, expected)
@@ -1281,100 +1292,103 @@ def check_indexers(key, indexers):
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
- wp = tm.makePanel()
- wp2 = wp.rename_axis(
- dict([(x, "%s_extra" % x) for x in wp.minor_axis]), axis=2)
-
- def check_col(key, name, size):
- self.assertEqual(getattr(store.get_storer(
- key).table.description, name).itemsize, size)
-
- store.append('s1', wp, min_itemsize=20)
- store.append('s1', wp2)
- expected = concat([wp, wp2], axis=2)
- expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
- assert_panel_equal(store['s1'], expected)
- check_col('s1', 'minor_axis', 20)
-
- # test dict format
- store.append('s2', wp, min_itemsize={'minor_axis': 20})
- store.append('s2', wp2)
- expected = concat([wp, wp2], axis=2)
- expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
- assert_panel_equal(store['s2'], expected)
- check_col('s2', 'minor_axis', 20)
-
- # apply the wrong field (similar to #1)
- store.append('s3', wp, min_itemsize={'major_axis': 20})
- self.assertRaises(ValueError, store.append, 's3', wp2)
-
- # test truncation of bigger strings
- store.append('s4', wp)
- self.assertRaises(ValueError, store.append, 's4', wp2)
-
- # avoid truncation on elements
- df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
- store.append('df_big', df)
- tm.assert_frame_equal(store.select('df_big'), df)
- check_col('df_big', 'values_block_1', 15)
-
- # appending smaller string ok
- df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
- store.append('df_big', df2)
- expected = concat([df, df2])
- tm.assert_frame_equal(store.select('df_big'), expected)
- check_col('df_big', 'values_block_1', 15)
-
- # avoid truncation on elements
- df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
- store.append('df_big2', df, min_itemsize={'values': 50})
- tm.assert_frame_equal(store.select('df_big2'), df)
- check_col('df_big2', 'values_block_1', 50)
-
- # bigger string on next append
- store.append('df_new', df)
- df_new = DataFrame(
- [[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
- self.assertRaises(ValueError, store.append, 'df_new', df_new)
-
- # min_itemsize on Series index (GH 11412)
- df = tm.makeMixedDataFrame().set_index('C')
- store.append('ss', df['B'], min_itemsize={'index': 4})
- tm.assert_series_equal(store.select('ss'), df['B'])
-
- # same as above, with data_columns=True
- store.append('ss2', df['B'], data_columns=True,
- min_itemsize={'index': 4})
- tm.assert_series_equal(store.select('ss2'), df['B'])
-
- # min_itemsize in index without appending (GH 10381)
- store.put('ss3', df, format='table',
- min_itemsize={'index': 6})
- # just make sure there is a longer string:
- df2 = df.copy().reset_index().assign(C='longer').set_index('C')
- store.append('ss3', df2)
- tm.assert_frame_equal(store.select('ss3'),
- pd.concat([df, df2]))
-
- # same as above, with a Series
- store.put('ss4', df['B'], format='table',
- min_itemsize={'index': 6})
- store.append('ss4', df2['B'])
- tm.assert_series_equal(store.select('ss4'),
- pd.concat([df['B'], df2['B']]))
-
- # with nans
- _maybe_remove(store, 'df')
- df = tm.makeTimeDataFrame()
- df['string'] = 'foo'
- df.loc[1:4, 'string'] = np.nan
- df['string2'] = 'bar'
- df.loc[4:8, 'string2'] = np.nan
- df['string3'] = 'bah'
- df.loc[1:, 'string3'] = np.nan
- store.append('df', df)
- result = store.select('df')
- tm.assert_frame_equal(result, df)
+ with catch_warnings(record=True):
+ wp = tm.makePanel()
+ wp2 = wp.rename_axis(
+ dict([(x, "%s_extra" % x) for x in wp.minor_axis]), axis=2)
+
+ def check_col(key, name, size):
+ self.assertEqual(getattr(store.get_storer(
+ key).table.description, name).itemsize, size)
+
+ store.append('s1', wp, min_itemsize=20)
+ store.append('s1', wp2)
+ expected = concat([wp, wp2], axis=2)
+ expected = expected.reindex(
+ minor_axis=sorted(expected.minor_axis))
+ assert_panel_equal(store['s1'], expected)
+ check_col('s1', 'minor_axis', 20)
+
+ # test dict format
+ store.append('s2', wp, min_itemsize={'minor_axis': 20})
+ store.append('s2', wp2)
+ expected = concat([wp, wp2], axis=2)
+ expected = expected.reindex(
+ minor_axis=sorted(expected.minor_axis))
+ assert_panel_equal(store['s2'], expected)
+ check_col('s2', 'minor_axis', 20)
+
+ # apply the wrong field (similar to #1)
+ store.append('s3', wp, min_itemsize={'major_axis': 20})
+ self.assertRaises(ValueError, store.append, 's3', wp2)
+
+ # test truncation of bigger strings
+ store.append('s4', wp)
+ self.assertRaises(ValueError, store.append, 's4', wp2)
+
+ # avoid truncation on elements
+ df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
+ store.append('df_big', df)
+ tm.assert_frame_equal(store.select('df_big'), df)
+ check_col('df_big', 'values_block_1', 15)
+
+ # appending smaller string ok
+ df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
+ store.append('df_big', df2)
+ expected = concat([df, df2])
+ tm.assert_frame_equal(store.select('df_big'), expected)
+ check_col('df_big', 'values_block_1', 15)
+
+ # avoid truncation on elements
+ df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
+ store.append('df_big2', df, min_itemsize={'values': 50})
+ tm.assert_frame_equal(store.select('df_big2'), df)
+ check_col('df_big2', 'values_block_1', 50)
+
+ # bigger string on next append
+ store.append('df_new', df)
+ df_new = DataFrame(
+ [[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
+ self.assertRaises(ValueError, store.append, 'df_new', df_new)
+
+ # min_itemsize on Series index (GH 11412)
+ df = tm.makeMixedDataFrame().set_index('C')
+ store.append('ss', df['B'], min_itemsize={'index': 4})
+ tm.assert_series_equal(store.select('ss'), df['B'])
+
+ # same as above, with data_columns=True
+ store.append('ss2', df['B'], data_columns=True,
+ min_itemsize={'index': 4})
+ tm.assert_series_equal(store.select('ss2'), df['B'])
+
+ # min_itemsize in index without appending (GH 10381)
+ store.put('ss3', df, format='table',
+ min_itemsize={'index': 6})
+ # just make sure there is a longer string:
+ df2 = df.copy().reset_index().assign(C='longer').set_index('C')
+ store.append('ss3', df2)
+ tm.assert_frame_equal(store.select('ss3'),
+ pd.concat([df, df2]))
+
+ # same as above, with a Series
+ store.put('ss4', df['B'], format='table',
+ min_itemsize={'index': 6})
+ store.append('ss4', df2['B'])
+ tm.assert_series_equal(store.select('ss4'),
+ pd.concat([df['B'], df2['B']]))
+
+ # with nans
+ _maybe_remove(store, 'df')
+ df = tm.makeTimeDataFrame()
+ df['string'] = 'foo'
+ df.loc[1:4, 'string'] = np.nan
+ df['string2'] = 'bar'
+ df.loc[4:8, 'string2'] = np.nan
+ df['string3'] = 'bah'
+ df.loc[1:, 'string3'] = np.nan
+ store.append('df', df)
+ result = store.select('df')
+ tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
@@ -1452,13 +1466,13 @@ def test_append_with_data_columns(self):
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
- result = store.select('df', [Term('B>0')])
+ result = store.select('df', 'B>0')
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
- 'df', [Term('B>0'), Term('index>df.index[3]')])
+ 'df', 'B>0 and index>df.index[3]')
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
@@ -1470,7 +1484,7 @@ def test_append_with_data_columns(self):
df_new.loc[5:6, 'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
- result = store.select('df', [Term('string=foo')])
+ result = store.select('df', "string='foo'")
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
@@ -1523,15 +1537,15 @@ def check_col(key, name, size):
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
- result = store.select('df', [Term('string=foo'), Term(
- 'string2=foo'), Term('A>0'), Term('B<0')])
+ result = store.select('df',
+ "string='foo' and string2='foo'"
+ " and A>0 and B<0")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
- result = store.select('df', [Term('string=foo'), Term(
- 'string2=cool')])
+ result = store.select('df', "string='foo' and string2='cool'")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
@@ -1551,7 +1565,7 @@ def check_col(key, name, size):
store.append('df_dc', df_dc,
data_columns=['B', 'C', 'string',
'string2', 'datetime'])
- result = store.select('df_dc', [Term('B>0')])
+ result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
@@ -1578,7 +1592,7 @@ def check_col(key, name, size):
store.append('df_dc', df_dc, data_columns=[
'B', 'C', 'string', 'string2'])
- result = store.select('df_dc', [Term('B>0')])
+ result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
@@ -1589,99 +1603,104 @@ def check_col(key, name, size):
tm.assert_frame_equal(result, expected)
with ensure_clean_store(self.path) as store:
- # panel
- # GH5717 not handling data_columns
- np.random.seed(1234)
- p = tm.makePanel()
-
- store.append('p1', p)
- tm.assert_panel_equal(store.select('p1'), p)
-
- store.append('p2', p, data_columns=True)
- tm.assert_panel_equal(store.select('p2'), p)
-
- result = store.select('p2', where='ItemA>0')
- expected = p.to_frame()
- expected = expected[expected['ItemA'] > 0]
- tm.assert_frame_equal(result.to_frame(), expected)
-
- result = store.select('p2', where='ItemA>0 & minor_axis=["A","B"]')
- expected = p.to_frame()
- expected = expected[expected['ItemA'] > 0]
- expected = expected[expected.reset_index(
- level=['major']).index.isin(['A', 'B'])]
- tm.assert_frame_equal(result.to_frame(), expected)
+ with catch_warnings(record=True):
+ # panel
+ # GH5717 not handling data_columns
+ np.random.seed(1234)
+ p = tm.makePanel()
+
+ store.append('p1', p)
+ tm.assert_panel_equal(store.select('p1'), p)
+
+ store.append('p2', p, data_columns=True)
+ tm.assert_panel_equal(store.select('p2'), p)
+
+ result = store.select('p2', where='ItemA>0')
+ expected = p.to_frame()
+ expected = expected[expected['ItemA'] > 0]
+ tm.assert_frame_equal(result.to_frame(), expected)
+
+ result = store.select(
+ 'p2', where='ItemA>0 & minor_axis=["A","B"]')
+ expected = p.to_frame()
+ expected = expected[expected['ItemA'] > 0]
+ expected = expected[expected.reset_index(
+ level=['major']).index.isin(['A', 'B'])]
+ tm.assert_frame_equal(result.to_frame(), expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
- def col(t, column):
- return getattr(store.get_storer(t).table.cols, column)
+ with catch_warnings(record=True):
+ def col(t, column):
+ return getattr(store.get_storer(t).table.cols, column)
- # index=False
- wp = tm.makePanel()
- store.append('p5', wp, index=False)
- store.create_table_index('p5', columns=['major_axis'])
- assert(col('p5', 'major_axis').is_indexed is True)
- assert(col('p5', 'minor_axis').is_indexed is False)
-
- # index=True
- store.append('p5i', wp, index=True)
- assert(col('p5i', 'major_axis').is_indexed is True)
- assert(col('p5i', 'minor_axis').is_indexed is True)
-
- # default optlevels
- store.get_storer('p5').create_index()
- assert(col('p5', 'major_axis').index.optlevel == 6)
- assert(col('p5', 'minor_axis').index.kind == 'medium')
-
- # let's change the indexing scheme
- store.create_table_index('p5')
- assert(col('p5', 'major_axis').index.optlevel == 6)
- assert(col('p5', 'minor_axis').index.kind == 'medium')
- store.create_table_index('p5', optlevel=9)
- assert(col('p5', 'major_axis').index.optlevel == 9)
- assert(col('p5', 'minor_axis').index.kind == 'medium')
- store.create_table_index('p5', kind='full')
- assert(col('p5', 'major_axis').index.optlevel == 9)
- assert(col('p5', 'minor_axis').index.kind == 'full')
- store.create_table_index('p5', optlevel=1, kind='light')
- assert(col('p5', 'major_axis').index.optlevel == 1)
- assert(col('p5', 'minor_axis').index.kind == 'light')
-
- # data columns
- df = tm.makeTimeDataFrame()
- df['string'] = 'foo'
- df['string2'] = 'bar'
- store.append('f', df, data_columns=['string', 'string2'])
- assert(col('f', 'index').is_indexed is True)
- assert(col('f', 'string').is_indexed is True)
- assert(col('f', 'string2').is_indexed is True)
-
- # specify index=columns
- store.append(
- 'f2', df, index=['string'], data_columns=['string', 'string2'])
- assert(col('f2', 'index').is_indexed is False)
- assert(col('f2', 'string').is_indexed is True)
- assert(col('f2', 'string2').is_indexed is False)
+ # index=False
+ wp = tm.makePanel()
+ store.append('p5', wp, index=False)
+ store.create_table_index('p5', columns=['major_axis'])
+ assert(col('p5', 'major_axis').is_indexed is True)
+ assert(col('p5', 'minor_axis').is_indexed is False)
+
+ # index=True
+ store.append('p5i', wp, index=True)
+ assert(col('p5i', 'major_axis').is_indexed is True)
+ assert(col('p5i', 'minor_axis').is_indexed is True)
+
+ # default optlevels
+ store.get_storer('p5').create_index()
+ assert(col('p5', 'major_axis').index.optlevel == 6)
+ assert(col('p5', 'minor_axis').index.kind == 'medium')
+
+ # let's change the indexing scheme
+ store.create_table_index('p5')
+ assert(col('p5', 'major_axis').index.optlevel == 6)
+ assert(col('p5', 'minor_axis').index.kind == 'medium')
+ store.create_table_index('p5', optlevel=9)
+ assert(col('p5', 'major_axis').index.optlevel == 9)
+ assert(col('p5', 'minor_axis').index.kind == 'medium')
+ store.create_table_index('p5', kind='full')
+ assert(col('p5', 'major_axis').index.optlevel == 9)
+ assert(col('p5', 'minor_axis').index.kind == 'full')
+ store.create_table_index('p5', optlevel=1, kind='light')
+ assert(col('p5', 'major_axis').index.optlevel == 1)
+ assert(col('p5', 'minor_axis').index.kind == 'light')
+
+ # data columns
+ df = tm.makeTimeDataFrame()
+ df['string'] = 'foo'
+ df['string2'] = 'bar'
+ store.append('f', df, data_columns=['string', 'string2'])
+ assert(col('f', 'index').is_indexed is True)
+ assert(col('f', 'string').is_indexed is True)
+ assert(col('f', 'string2').is_indexed is True)
+
+ # specify index=columns
+ store.append(
+ 'f2', df, index=['string'],
+ data_columns=['string', 'string2'])
+ assert(col('f2', 'index').is_indexed is False)
+ assert(col('f2', 'string').is_indexed is True)
+ assert(col('f2', 'string2').is_indexed is False)
- # try to index a non-table
- _maybe_remove(store, 'f2')
- store.put('f2', df)
- self.assertRaises(TypeError, store.create_table_index, 'f2')
+ # try to index a non-table
+ _maybe_remove(store, 'f2')
+ store.put('f2', df)
+ self.assertRaises(TypeError, store.create_table_index, 'f2')
def test_append_diff_item_order(self):
- wp = tm.makePanel()
- wp1 = wp.iloc[:, :10, :]
- wp2 = wp.iloc[wp.items.get_indexer(['ItemC', 'ItemB', 'ItemA']),
- 10:, :]
+ with catch_warnings(record=True):
+ wp = tm.makePanel()
+ wp1 = wp.iloc[:, :10, :]
+ wp2 = wp.iloc[wp.items.get_indexer(['ItemC', 'ItemB', 'ItemA']),
+ 10:, :]
- with ensure_clean_store(self.path) as store:
- store.put('panel', wp1, format='table')
- self.assertRaises(ValueError, store.put, 'panel', wp2,
- append=True)
+ with ensure_clean_store(self.path) as store:
+ store.put('panel', wp1, format='table')
+ self.assertRaises(ValueError, store.put, 'panel', wp2,
+ append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
@@ -1909,8 +1928,9 @@ def check(obj, comparator):
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
- p = tm.makePanel()
- check(p, assert_panel_equal)
+ with catch_warnings(record=True):
+ p = tm.makePanel()
+ check(p, assert_panel_equal)
with catch_warnings(record=True):
p4d = tm.makePanel4D()
@@ -1936,21 +1956,23 @@ def check(obj, comparator):
store.put('df2', df)
assert_frame_equal(store.select('df2'), df)
- # 0 len
- p_empty = Panel(items=list('ABC'))
- store.append('p', p_empty)
- self.assertRaises(KeyError, store.select, 'p')
+ with catch_warnings(record=True):
- # repeated append of 0/non-zero frames
- p = Panel(np.random.randn(3, 4, 5), items=list('ABC'))
- store.append('p', p)
- assert_panel_equal(store.select('p'), p)
- store.append('p', p_empty)
- assert_panel_equal(store.select('p'), p)
+ # 0 len
+ p_empty = Panel(items=list('ABC'))
+ store.append('p', p_empty)
+ self.assertRaises(KeyError, store.select, 'p')
- # store
- store.put('p2', p_empty)
- assert_panel_equal(store.select('p2'), p_empty)
+ # repeated append of 0/non-zero frames
+ p = Panel(np.random.randn(3, 4, 5), items=list('ABC'))
+ store.append('p', p)
+ assert_panel_equal(store.select('p'), p)
+ store.append('p', p_empty)
+ assert_panel_equal(store.select('p'), p)
+
+ # store
+ store.put('p2', p_empty)
+ assert_panel_equal(store.select('p2'), p_empty)
def test_append_raise(self):
@@ -2066,22 +2088,25 @@ def test_table_mixed_dtypes(self):
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
- # panel
- wp = tm.makePanel()
- wp['obj1'] = 'foo'
- wp['obj2'] = 'bar'
- wp['bool1'] = wp['ItemA'] > 0
- wp['bool2'] = wp['ItemB'] > 0
- wp['int1'] = 1
- wp['int2'] = 2
- wp = wp._consolidate()
+ with catch_warnings(record=True):
- with ensure_clean_store(self.path) as store:
- store.append('p1_mixed', wp)
- assert_panel_equal(store.select('p1_mixed'), wp)
+ # panel
+ wp = tm.makePanel()
+ wp['obj1'] = 'foo'
+ wp['obj2'] = 'bar'
+ wp['bool1'] = wp['ItemA'] > 0
+ wp['bool2'] = wp['ItemB'] > 0
+ wp['int1'] = 1
+ wp['int2'] = 2
+ wp = wp._consolidate()
with catch_warnings(record=True):
+ with ensure_clean_store(self.path) as store:
+ store.append('p1_mixed', wp)
+ assert_panel_equal(store.select('p1_mixed'), wp)
+
+ with catch_warnings(record=True):
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
@@ -2166,9 +2191,12 @@ def test_append_with_timedelta(self):
result = store.select('df')
assert_frame_equal(result, df)
- result = store.select('df', "C<100000")
+ result = store.select('df', where="C<100000")
assert_frame_equal(result, df)
+ result = store.select('df', where="C<pd.Timedelta('-3D')")
+ assert_frame_equal(result, df.iloc[3:])
+
result = store.select('df', "C<'-3D'")
assert_frame_equal(result, df.iloc[3:])
@@ -2229,183 +2257,188 @@ def test_remove_where(self):
with ensure_clean_store(self.path) as store:
- # non-existance
- crit1 = Term('index>foo')
- self.assertRaises(KeyError, store.remove, 'a', [crit1])
+ with catch_warnings(record=True):
- # try to remove non-table (with crit)
- # non-table ok (where = None)
- wp = tm.makePanel(30)
- store.put('wp', wp, format='table')
- store.remove('wp', ["minor_axis=['A', 'D']"])
- rs = store.select('wp')
- expected = wp.reindex(minor_axis=['B', 'C'])
- assert_panel_equal(rs, expected)
+ # non-existance
+ crit1 = 'index>foo'
+ self.assertRaises(KeyError, store.remove, 'a', [crit1])
- # empty where
- _maybe_remove(store, 'wp')
- store.put('wp', wp, format='table')
+ # try to remove non-table (with crit)
+ # non-table ok (where = None)
+ wp = tm.makePanel(30)
+ store.put('wp', wp, format='table')
+ store.remove('wp', ["minor_axis=['A', 'D']"])
+ rs = store.select('wp')
+ expected = wp.reindex(minor_axis=['B', 'C'])
+ assert_panel_equal(rs, expected)
- # deleted number (entire table)
- n = store.remove('wp', [])
- self.assertTrue(n == 120)
+ # empty where
+ _maybe_remove(store, 'wp')
+ store.put('wp', wp, format='table')
- # non - empty where
- _maybe_remove(store, 'wp')
- store.put('wp', wp, format='table')
- self.assertRaises(ValueError, store.remove,
- 'wp', ['foo'])
+ # deleted number (entire table)
+ n = store.remove('wp', [])
+ self.assertTrue(n == 120)
- # selectin non-table with a where
- # store.put('wp2', wp, format='f')
- # self.assertRaises(ValueError, store.remove,
- # 'wp2', [('column', ['A', 'D'])])
+ # non - empty where
+ _maybe_remove(store, 'wp')
+ store.put('wp', wp, format='table')
+ self.assertRaises(ValueError, store.remove,
+ 'wp', ['foo'])
def test_remove_startstop(self):
# GH #4835 and #6177
with ensure_clean_store(self.path) as store:
- wp = tm.makePanel(30)
-
- # start
- _maybe_remove(store, 'wp1')
- store.put('wp1', wp, format='t')
- n = store.remove('wp1', start=32)
- self.assertTrue(n == 120 - 32)
- result = store.select('wp1')
- expected = wp.reindex(major_axis=wp.major_axis[:32 // 4])
- assert_panel_equal(result, expected)
-
- _maybe_remove(store, 'wp2')
- store.put('wp2', wp, format='t')
- n = store.remove('wp2', start=-32)
- self.assertTrue(n == 32)
- result = store.select('wp2')
- expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4])
- assert_panel_equal(result, expected)
-
- # stop
- _maybe_remove(store, 'wp3')
- store.put('wp3', wp, format='t')
- n = store.remove('wp3', stop=32)
- self.assertTrue(n == 32)
- result = store.select('wp3')
- expected = wp.reindex(major_axis=wp.major_axis[32 // 4:])
- assert_panel_equal(result, expected)
-
- _maybe_remove(store, 'wp4')
- store.put('wp4', wp, format='t')
- n = store.remove('wp4', stop=-32)
- self.assertTrue(n == 120 - 32)
- result = store.select('wp4')
- expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:])
- assert_panel_equal(result, expected)
-
- # start n stop
- _maybe_remove(store, 'wp5')
- store.put('wp5', wp, format='t')
- n = store.remove('wp5', start=16, stop=-16)
- self.assertTrue(n == 120 - 32)
- result = store.select('wp5')
- expected = wp.reindex(major_axis=wp.major_axis[
- :16 // 4].union(wp.major_axis[-16 // 4:]))
- assert_panel_equal(result, expected)
-
- _maybe_remove(store, 'wp6')
- store.put('wp6', wp, format='t')
- n = store.remove('wp6', start=16, stop=16)
- self.assertTrue(n == 0)
- result = store.select('wp6')
- expected = wp.reindex(major_axis=wp.major_axis)
- assert_panel_equal(result, expected)
-
- # with where
- _maybe_remove(store, 'wp7')
-
- # TODO: unused?
- date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa
-
- crit = Term('major_axis=date')
- store.put('wp7', wp, format='t')
- n = store.remove('wp7', where=[crit], stop=80)
- self.assertTrue(n == 28)
- result = store.select('wp7')
- expected = wp.reindex(major_axis=wp.major_axis.difference(
- wp.major_axis[np.arange(0, 20, 3)]))
- assert_panel_equal(result, expected)
+ with catch_warnings(record=True):
+ wp = tm.makePanel(30)
+
+ # start
+ _maybe_remove(store, 'wp1')
+ store.put('wp1', wp, format='t')
+ n = store.remove('wp1', start=32)
+ self.assertTrue(n == 120 - 32)
+ result = store.select('wp1')
+ expected = wp.reindex(major_axis=wp.major_axis[:32 // 4])
+ assert_panel_equal(result, expected)
+
+ _maybe_remove(store, 'wp2')
+ store.put('wp2', wp, format='t')
+ n = store.remove('wp2', start=-32)
+ self.assertTrue(n == 32)
+ result = store.select('wp2')
+ expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4])
+ assert_panel_equal(result, expected)
+
+ # stop
+ _maybe_remove(store, 'wp3')
+ store.put('wp3', wp, format='t')
+ n = store.remove('wp3', stop=32)
+ self.assertTrue(n == 32)
+ result = store.select('wp3')
+ expected = wp.reindex(major_axis=wp.major_axis[32 // 4:])
+ assert_panel_equal(result, expected)
+
+ _maybe_remove(store, 'wp4')
+ store.put('wp4', wp, format='t')
+ n = store.remove('wp4', stop=-32)
+ self.assertTrue(n == 120 - 32)
+ result = store.select('wp4')
+ expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:])
+ assert_panel_equal(result, expected)
+
+ # start n stop
+ _maybe_remove(store, 'wp5')
+ store.put('wp5', wp, format='t')
+ n = store.remove('wp5', start=16, stop=-16)
+ self.assertTrue(n == 120 - 32)
+ result = store.select('wp5')
+ expected = wp.reindex(
+ major_axis=(wp.major_axis[:16 // 4]
+ .union(wp.major_axis[-16 // 4:])))
+ assert_panel_equal(result, expected)
+
+ _maybe_remove(store, 'wp6')
+ store.put('wp6', wp, format='t')
+ n = store.remove('wp6', start=16, stop=16)
+ self.assertTrue(n == 0)
+ result = store.select('wp6')
+ expected = wp.reindex(major_axis=wp.major_axis)
+ assert_panel_equal(result, expected)
+
+ # with where
+ _maybe_remove(store, 'wp7')
+
+ # TODO: unused?
+ date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa
+
+ crit = 'major_axis=date'
+ store.put('wp7', wp, format='t')
+ n = store.remove('wp7', where=[crit], stop=80)
+ self.assertTrue(n == 28)
+ result = store.select('wp7')
+ expected = wp.reindex(major_axis=wp.major_axis.difference(
+ wp.major_axis[np.arange(0, 20, 3)]))
+ assert_panel_equal(result, expected)
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
- wp = tm.makePanel(30)
-
- # group row removal
- _maybe_remove(store, 'wp3')
- date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
- crit4 = Term('major_axis=date4')
- store.put('wp3', wp, format='t')
- n = store.remove('wp3', where=[crit4])
- self.assertTrue(n == 36)
-
- result = store.select('wp3')
- expected = wp.reindex(major_axis=wp.major_axis.difference(date4))
- assert_panel_equal(result, expected)
-
- # upper half
- _maybe_remove(store, 'wp')
- store.put('wp', wp, format='table')
- date = wp.major_axis[len(wp.major_axis) // 2]
-
- crit1 = Term('major_axis>date')
- crit2 = Term("minor_axis=['A', 'D']")
- n = store.remove('wp', where=[crit1])
- self.assertTrue(n == 56)
-
- n = store.remove('wp', where=[crit2])
- self.assertTrue(n == 32)
-
- result = store['wp']
- expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
- assert_panel_equal(result, expected)
-
- # individual row elements
- _maybe_remove(store, 'wp2')
- store.put('wp2', wp, format='table')
-
- date1 = wp.major_axis[1:3]
- crit1 = Term('major_axis=date1')
- store.remove('wp2', where=[crit1])
- result = store.select('wp2')
- expected = wp.reindex(major_axis=wp.major_axis.difference(date1))
- assert_panel_equal(result, expected)
-
- date2 = wp.major_axis[5]
- crit2 = Term('major_axis=date2')
- store.remove('wp2', where=[crit2])
- result = store['wp2']
- expected = wp.reindex(major_axis=wp.major_axis.difference(date1)
- .difference(Index([date2])))
- assert_panel_equal(result, expected)
-
- date3 = [wp.major_axis[7], wp.major_axis[9]]
- crit3 = Term('major_axis=date3')
- store.remove('wp2', where=[crit3])
- result = store['wp2']
- expected = wp.reindex(major_axis=wp.major_axis
- .difference(date1)
- .difference(Index([date2]))
- .difference(Index(date3)))
- assert_panel_equal(result, expected)
-
- # corners
- _maybe_remove(store, 'wp4')
- store.put('wp4', wp, format='table')
- n = store.remove(
- 'wp4', where=[Term('major_axis>wp.major_axis[-1]')])
- result = store.select('wp4')
- assert_panel_equal(result, wp)
+ with catch_warnings(record=True):
+ wp = tm.makePanel(30)
+
+ # group row removal
+ _maybe_remove(store, 'wp3')
+ date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
+ crit4 = 'major_axis=date4'
+ store.put('wp3', wp, format='t')
+ n = store.remove('wp3', where=[crit4])
+ self.assertTrue(n == 36)
+
+ result = store.select('wp3')
+ expected = wp.reindex(
+ major_axis=wp.major_axis.difference(date4))
+ assert_panel_equal(result, expected)
+
+ # upper half
+ _maybe_remove(store, 'wp')
+ store.put('wp', wp, format='table')
+ date = wp.major_axis[len(wp.major_axis) // 2]
+
+ crit1 = 'major_axis>date'
+ crit2 = "minor_axis=['A', 'D']"
+ n = store.remove('wp', where=[crit1])
+ self.assertTrue(n == 56)
+
+ n = store.remove('wp', where=[crit2])
+ self.assertTrue(n == 32)
+
+ result = store['wp']
+ expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
+ assert_panel_equal(result, expected)
+
+ # individual row elements
+ _maybe_remove(store, 'wp2')
+ store.put('wp2', wp, format='table')
+
+ date1 = wp.major_axis[1:3]
+ crit1 = 'major_axis=date1'
+ store.remove('wp2', where=[crit1])
+ result = store.select('wp2')
+ expected = wp.reindex(
+ major_axis=wp.major_axis.difference(date1))
+ assert_panel_equal(result, expected)
+
+ date2 = wp.major_axis[5]
+ crit2 = 'major_axis=date2'
+ store.remove('wp2', where=[crit2])
+ result = store['wp2']
+ expected = wp.reindex(
+ major_axis=(wp.major_axis
+ .difference(date1)
+ .difference(Index([date2]))
+ ))
+ assert_panel_equal(result, expected)
+
+ date3 = [wp.major_axis[7], wp.major_axis[9]]
+ crit3 = 'major_axis=date3'
+ store.remove('wp2', where=[crit3])
+ result = store['wp2']
+ expected = wp.reindex(major_axis=wp.major_axis
+ .difference(date1)
+ .difference(Index([date2]))
+ .difference(Index(date3)))
+ assert_panel_equal(result, expected)
+
+ # corners
+ _maybe_remove(store, 'wp4')
+ store.put('wp4', wp, format='table')
+ n = store.remove(
+ 'wp4', where="major_axis>wp.major_axis[-1]")
+ result = store.select('wp4')
+ assert_panel_equal(result, wp)
def test_invalid_terms(self):
@@ -2464,24 +2497,32 @@ def test_terms(self):
with ensure_clean_store(self.path) as store:
- wp = tm.makePanel()
- wpneg = Panel.fromDict({-1: tm.makeDataFrame(),
- 0: tm.makeDataFrame(),
- 1: tm.makeDataFrame()})
-
with catch_warnings(record=True):
+ wp = tm.makePanel()
+ wpneg = Panel.fromDict({-1: tm.makeDataFrame(),
+ 0: tm.makeDataFrame(),
+ 1: tm.makeDataFrame()})
p4d = tm.makePanel4D()
store.put('p4d', p4d, format='table')
-
- store.put('wp', wp, format='table')
- store.put('wpneg', wpneg, format='table')
-
- # panel
- result = store.select(
- 'wp', "major_axis<'20000108' and minor_axis=['A', 'B']")
- expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
- assert_panel_equal(result, expected)
+ store.put('wp', wp, format='table')
+ store.put('wpneg', wpneg, format='table')
+
+ # panel
+ result = store.select(
+ 'wp',
+ "major_axis<'20000108' and minor_axis=['A', 'B']")
+ expected = wp.truncate(
+ after='20000108').reindex(minor=['A', 'B'])
+ assert_panel_equal(result, expected)
+
+ # with deprecation
+ result = store.select(
+ 'wp', where=("major_axis<'20000108' "
+ "and minor_axis=['A', 'B']"))
+ expected = wp.truncate(
+ after='20000108').reindex(minor=['A', 'B'])
+ tm.assert_panel_equal(result, expected)
# p4d
with catch_warnings(record=True):
@@ -2516,74 +2557,79 @@ def test_terms(self):
store.select('p4d', t)
# valid for p4d only
- terms = [(("labels=['l1', 'l2']"),),
- Term("labels=['l1', 'l2']"),
- ]
-
+ terms = ["labels=['l1', 'l2']"]
for t in terms:
store.select('p4d', t)
- with tm.assertRaisesRegexp(TypeError,
- 'Only named functions are supported'):
- store.select('wp', Term(
- 'major_axis == (lambda x: x)("20130101")'))
+ with tm.assertRaisesRegexp(
+ TypeError, 'Only named functions are supported'):
+ store.select(
+ 'wp',
+ 'major_axis == (lambda x: x)("20130101")')
- # check USub node parsing
- res = store.select('wpneg', Term('items == -1'))
- expected = Panel({-1: wpneg[-1]})
- tm.assert_panel_equal(res, expected)
+ with catch_warnings(record=True):
+ # check USub node parsing
+ res = store.select('wpneg', 'items == -1')
+ expected = Panel({-1: wpneg[-1]})
+ tm.assert_panel_equal(res, expected)
- with tm.assertRaisesRegexp(NotImplementedError,
- 'Unary addition not supported'):
- store.select('wpneg', Term('items == +1'))
+ with tm.assertRaisesRegexp(NotImplementedError,
+ 'Unary addition not supported'):
+ store.select('wpneg', 'items == +1')
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
- wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
- major_axis=date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B', 'C', 'D'])
- store.append('wp', wp)
+ with catch_warnings(record=True):
+ wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
+ major_axis=date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B', 'C', 'D'])
+ store.append('wp', wp)
- result = store.select(
- 'wp', "major_axis>20000102 and minor_axis=['A', 'B']")
- expected = wp.loc[:, wp.major_axis >
- Timestamp('20000102'), ['A', 'B']]
- assert_panel_equal(result, expected)
+ result = store.select(
+ 'wp', where=("major_axis>20000102 "
+ "and minor_axis=['A', 'B']"))
+ expected = wp.loc[:, wp.major_axis >
+ Timestamp('20000102'), ['A', 'B']]
+ assert_panel_equal(result, expected)
- store.remove('wp', 'major_axis>20000103')
- result = store.select('wp')
- expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]
- assert_panel_equal(result, expected)
+ store.remove('wp', 'major_axis>20000103')
+ result = store.select('wp')
+ expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]
+ assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
- wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
- major_axis=date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B', 'C', 'D'])
- store.append('wp', wp)
-
- # stringified datetimes
- result = store.select(
- 'wp', "major_axis>datetime.datetime(2000, 1, 2)")
- expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
- assert_panel_equal(result, expected)
-
- result = store.select(
- 'wp', "major_axis>datetime.datetime(2000, 1, 2, 0, 0)")
- expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
- assert_panel_equal(result, expected)
-
- result = store.select(
- 'wp', ("major_axis=[datetime.datetime(2000, 1, 2, 0, 0), "
- "datetime.datetime(2000, 1, 3, 0, 0)]"))
- expected = wp.loc[:, [Timestamp('20000102'),
- Timestamp('20000103')]]
- assert_panel_equal(result, expected)
-
- result = store.select('wp', "minor_axis=['A', 'B']")
- expected = wp.loc[:, :, ['A', 'B']]
- assert_panel_equal(result, expected)
+ with catch_warnings(record=True):
+ wp = Panel(np.random.randn(2, 5, 4),
+ items=['Item1', 'Item2'],
+ major_axis=date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B', 'C', 'D'])
+ store.append('wp', wp)
+
+ # stringified datetimes
+ result = store.select(
+ 'wp', 'major_axis>datetime.datetime(2000, 1, 2)')
+ expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
+ assert_panel_equal(result, expected)
+
+ result = store.select(
+ 'wp', 'major_axis>datetime.datetime(2000, 1, 2)')
+ expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
+ assert_panel_equal(result, expected)
+
+ result = store.select(
+ 'wp',
+ "major_axis=[datetime.datetime(2000, 1, 2, 0, 0), "
+ "datetime.datetime(2000, 1, 3, 0, 0)]")
+ expected = wp.loc[:, [Timestamp('20000102'),
+ Timestamp('20000103')]]
+ assert_panel_equal(result, expected)
+
+ result = store.select(
+ 'wp', "minor_axis=['A', 'B']")
+ expected = wp.loc[:, :, ['A', 'B']]
+ assert_panel_equal(result, expected)
def test_same_name_scoping(self):
@@ -2678,12 +2724,13 @@ def test_tuple_index(self):
def test_index_types(self):
- values = np.random.randn(2)
+ with catch_warnings(record=True):
+ values = np.random.randn(2)
- func = lambda l, r: tm.assert_series_equal(l, r,
- check_dtype=True,
- check_index_type=True,
- check_series_type=True)
+ func = lambda l, r: tm.assert_series_equal(l, r,
+ check_dtype=True,
+ check_index_type=True,
+ check_series_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
@@ -2702,18 +2749,31 @@ def test_index_types(self):
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
+
+ ser = Series(values, [0, 'y'])
+ self._check_roundtrip(ser, func)
+
+ ser = Series(values, [datetime.datetime.today(), 0])
+ self._check_roundtrip(ser, func)
+
+ ser = Series(values, ['y', 0])
+ self._check_roundtrip(ser, func)
+
+ ser = Series(values, [datetime.date.today(), 'a'])
+ self._check_roundtrip(ser, func)
+
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
- ser = Series(values, [1, 1.53])
- self._check_roundtrip(ser, func)
+ ser = Series(values, [1, 1.53])
+ self._check_roundtrip(ser, func)
- ser = Series(values, [1, 5])
- self._check_roundtrip(ser, func)
+ ser = Series(values, [1, 5])
+ self._check_roundtrip(ser, func)
- ser = Series(values, [datetime.datetime(
- 2012, 1, 1), datetime.datetime(2012, 1, 2)])
- self._check_roundtrip(ser, func)
+ ser = Series(values, [datetime.datetime(
+ 2012, 1, 1), datetime.datetime(2012, 1, 2)])
+ self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
@@ -2876,13 +2936,9 @@ def _make_one():
def test_wide(self):
- wp = tm.makePanel()
- self._check_roundtrip(wp, assert_panel_equal)
-
- def test_wide_table(self):
-
- wp = tm.makePanel()
- self._check_roundtrip_table(wp, assert_panel_equal)
+ with catch_warnings(record=True):
+ wp = tm.makePanel()
+ self._check_roundtrip(wp, assert_panel_equal)
def test_select_with_dups(self):
@@ -2944,25 +3000,24 @@ def test_select_with_dups(self):
assert_frame_equal(result, expected, by_blocks=True)
def test_wide_table_dups(self):
- wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
- store.put('panel', wp, format='table')
- store.put('panel', wp, format='table', append=True)
-
with catch_warnings(record=True):
+
+ wp = tm.makePanel()
+ store.put('panel', wp, format='table')
+ store.put('panel', wp, format='table', append=True)
+
recons = store['panel']
- assert_panel_equal(recons, wp)
+ assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
- wp = tm.makePanel()
- self._check_roundtrip(wp.to_frame(), _check)
-
- # empty
- # self._check_roundtrip(wp.to_frame()[:0], _check)
+ with catch_warnings(record=True):
+ wp = tm.makePanel()
+ self._check_roundtrip(wp.to_frame(), _check)
def test_longpanel(self):
pass
@@ -3009,70 +3064,72 @@ def test_sparse_with_compression(self):
check_frame_type=True)
def test_select(self):
- wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
- # put/select ok
- _maybe_remove(store, 'wp')
- store.put('wp', wp, format='table')
- store.select('wp')
-
- # non-table ok (where = None)
- _maybe_remove(store, 'wp')
- store.put('wp2', wp)
- store.select('wp2')
-
- # selection on the non-indexable with a large number of columns
- wp = Panel(np.random.randn(100, 100, 100),
- items=['Item%03d' % i for i in range(100)],
- major_axis=date_range('1/1/2000', periods=100),
- minor_axis=['E%03d' % i for i in range(100)])
-
- _maybe_remove(store, 'wp')
- store.append('wp', wp)
- items = ['Item%03d' % i for i in range(80)]
- result = store.select('wp', Term('items=items'))
- expected = wp.reindex(items=items)
- assert_panel_equal(expected, result)
-
- # selectin non-table with a where
- # self.assertRaises(ValueError, store.select,
- # 'wp2', ('column', ['A', 'D']))
+ with catch_warnings(record=True):
+ wp = tm.makePanel()
- # select with columns=
- df = tm.makeTimeDataFrame()
- _maybe_remove(store, 'df')
- store.append('df', df)
- result = store.select('df', columns=['A', 'B'])
- expected = df.reindex(columns=['A', 'B'])
- tm.assert_frame_equal(expected, result)
+ # put/select ok
+ _maybe_remove(store, 'wp')
+ store.put('wp', wp, format='table')
+ store.select('wp')
+
+ # non-table ok (where = None)
+ _maybe_remove(store, 'wp')
+ store.put('wp2', wp)
+ store.select('wp2')
+
+ # selection on the non-indexable with a large number of columns
+ wp = Panel(np.random.randn(100, 100, 100),
+ items=['Item%03d' % i for i in range(100)],
+ major_axis=date_range('1/1/2000', periods=100),
+ minor_axis=['E%03d' % i for i in range(100)])
+
+ _maybe_remove(store, 'wp')
+ store.append('wp', wp)
+ items = ['Item%03d' % i for i in range(80)]
+ result = store.select('wp', 'items=items')
+ expected = wp.reindex(items=items)
+ assert_panel_equal(expected, result)
+
+ # selectin non-table with a where
+ # self.assertRaises(ValueError, store.select,
+ # 'wp2', ('column', ['A', 'D']))
+
+ # select with columns=
+ df = tm.makeTimeDataFrame()
+ _maybe_remove(store, 'df')
+ store.append('df', df)
+ result = store.select('df', columns=['A', 'B'])
+ expected = df.reindex(columns=['A', 'B'])
+ tm.assert_frame_equal(expected, result)
- # equivalentsly
- result = store.select('df', [("columns=['A', 'B']")])
- expected = df.reindex(columns=['A', 'B'])
- tm.assert_frame_equal(expected, result)
+ # equivalentsly
+ result = store.select('df', [("columns=['A', 'B']")])
+ expected = df.reindex(columns=['A', 'B'])
+ tm.assert_frame_equal(expected, result)
- # with a data column
- _maybe_remove(store, 'df')
- store.append('df', df, data_columns=['A'])
- result = store.select('df', ['A > 0'], columns=['A', 'B'])
- expected = df[df.A > 0].reindex(columns=['A', 'B'])
- tm.assert_frame_equal(expected, result)
+ # with a data column
+ _maybe_remove(store, 'df')
+ store.append('df', df, data_columns=['A'])
+ result = store.select('df', ['A > 0'], columns=['A', 'B'])
+ expected = df[df.A > 0].reindex(columns=['A', 'B'])
+ tm.assert_frame_equal(expected, result)
- # all a data columns
- _maybe_remove(store, 'df')
- store.append('df', df, data_columns=True)
- result = store.select('df', ['A > 0'], columns=['A', 'B'])
- expected = df[df.A > 0].reindex(columns=['A', 'B'])
- tm.assert_frame_equal(expected, result)
+ # all a data columns
+ _maybe_remove(store, 'df')
+ store.append('df', df, data_columns=True)
+ result = store.select('df', ['A > 0'], columns=['A', 'B'])
+ expected = df[df.A > 0].reindex(columns=['A', 'B'])
+ tm.assert_frame_equal(expected, result)
- # with a data column, but different columns
- _maybe_remove(store, 'df')
- store.append('df', df, data_columns=['A'])
- result = store.select('df', ['A > 0'], columns=['C', 'D'])
- expected = df[df.A > 0].reindex(columns=['C', 'D'])
- tm.assert_frame_equal(expected, result)
+ # with a data column, but different columns
+ _maybe_remove(store, 'df')
+ store.append('df', df, data_columns=['A'])
+ result = store.select('df', ['A > 0'], columns=['C', 'D'])
+ expected = df[df.A > 0].reindex(columns=['C', 'D'])
+ tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
@@ -3084,7 +3141,7 @@ def test_select_dtypes(self):
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
- result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
+ result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
@@ -3099,15 +3156,15 @@ def test_select_dtypes(self):
expected = (df[df.boolv == True] # noqa
.reindex(columns=['A', 'boolv']))
for v in [True, 'true', 1]:
- result = store.select('df', Term(
- 'boolv == %s' % str(v)), columns=['A', 'boolv'])
+ result = store.select('df', 'boolv == %s' % str(v),
+ columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
expected = (df[df.boolv == False] # noqa
.reindex(columns=['A', 'boolv']))
for v in [False, 'false', 0]:
- result = store.select('df', Term(
- 'boolv == %s' % str(v)), columns=['A', 'boolv'])
+ result = store.select(
+ 'df', 'boolv == %s' % str(v), columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
# integer index
@@ -3115,7 +3172,7 @@ def test_select_dtypes(self):
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
- 'df_int', [Term("index<10"), Term("columns=['A']")])
+ 'df_int', "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
@@ -3125,7 +3182,7 @@ def test_select_dtypes(self):
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
- 'df_float', [Term("index<10.0"), Term("columns=['A']")])
+ 'df_float', "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
@@ -3196,14 +3253,14 @@ def test_select_with_many_inputs(self):
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
- result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
+ result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
- 'df', [Term("ts>=Timestamp('2012-02-01') & "
- "users=['a','b','c']")])
+ 'df',
+ "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(['a', 'b', 'c'])]
tm.assert_frame_equal(expected, result)
@@ -3211,21 +3268,21 @@ def test_select_with_many_inputs(self):
# big selector along the columns
selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)]
result = store.select(
- 'df', [Term("ts>=Timestamp('2012-02-01')"),
- Term('users=selector')])
+ 'df',
+ "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
- result = store.select('df', [Term('B=selector')])
+ result = store.select('df', 'B=selector')
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
# big selector along the index
selector = Index(df.ts[0:100].values)
- result = store.select('df', [Term('ts=selector')])
+ result = store.select('df', 'ts=selector')
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
@@ -3296,17 +3353,6 @@ def test_select_iterator(self):
result = concat(results)
tm.assert_frame_equal(expected, result)
- # where selection
- # expected = store.select_as_multiple(
- # ['df1', 'df2'], where= Term('A>0'), selector='df1')
- # results = []
- # for s in store.select_as_multiple(
- # ['df1', 'df2'], where= Term('A>0'), selector='df1',
- # chunksize=25):
- # results.append(s)
- # result = concat(results)
- # tm.assert_frame_equal(expected, result)
-
def test_select_iterator_complete_8014(self):
# GH 8014
@@ -3518,8 +3564,7 @@ def test_retain_index_attributes(self):
getattr(getattr(result, idx), attr, None))
# try to append a table with a different frequency
- with tm.assert_produces_warning(
- expected_warning=AttributeConflictWarning):
+ with catch_warnings(record=True):
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1',
@@ -3544,9 +3589,7 @@ def test_retain_index_attributes(self):
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
- expected_warning = Warning if PY35 else AttributeConflictWarning
- with tm.assert_produces_warning(expected_warning=expected_warning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
df = DataFrame(dict(
A=Series(lrange(3),
@@ -3566,8 +3609,7 @@ def test_retain_index_attributes2(self):
self.assertEqual(read_hdf(path, 'data').index.name, 'foo')
- with tm.assert_produces_warning(expected_warning=expected_warning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
idx2 = date_range('2001-1-1', periods=3, freq='H')
idx2.name = 'bar'
@@ -3578,23 +3620,28 @@ def test_retain_index_attributes2(self):
def test_panel_select(self):
- wp = tm.makePanel()
-
with ensure_clean_store(self.path) as store:
- store.put('wp', wp, format='table')
- date = wp.major_axis[len(wp.major_axis) // 2]
- crit1 = ('major_axis>=date')
- crit2 = ("minor_axis=['A', 'D']")
+ with catch_warnings(record=True):
- result = store.select('wp', [crit1, crit2])
- expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
- assert_panel_equal(result, expected)
+ wp = tm.makePanel()
- result = store.select(
- 'wp', ['major_axis>="20000124"', ("minor_axis=['A', 'B']")])
- expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
- assert_panel_equal(result, expected)
+ store.put('wp', wp, format='table')
+ date = wp.major_axis[len(wp.major_axis) // 2]
+
+ crit1 = ('major_axis>=date')
+ crit2 = ("minor_axis=['A', 'D']")
+
+ result = store.select('wp', [crit1, crit2])
+ expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
+ assert_panel_equal(result, expected)
+
+ result = store.select(
+ 'wp', ['major_axis>="20000124"',
+ ("minor_axis=['A', 'B']")])
+ expected = wp.truncate(
+ before='20000124').reindex(minor=['A', 'B'])
+ assert_panel_equal(result, expected)
def test_frame_select(self):
@@ -3622,7 +3669,7 @@ def test_frame_select(self):
df = tm.makeTimeDataFrame()
store.append('df_time', df)
self.assertRaises(
- ValueError, store.select, 'df_time', [Term("index>0")])
+ ValueError, store.select, 'df_time', "index>0")
# can't select if not written as table
# store['frame'] = df
@@ -3701,7 +3748,7 @@ def test_frame_select_complex2(self):
hist.to_hdf(hh, 'df', mode='w', format='table')
- expected = read_hdf(hh, 'df', where="l1=[2, 3, 4]")
+ expected = read_hdf(hh, 'df', where='l1=[2, 3, 4]')
# sccope with list like
l = selection.index.tolist() # noqa
@@ -4005,6 +4052,7 @@ def test_append_to_multiple_dropna(self):
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
+
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
@@ -4015,14 +4063,27 @@ def test_append_to_multiple_dropna(self):
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
+ @pytest.mark.xfail(run=False,
+ reason="append_to_multiple_dropna_false "
+ "is not raising as failed")
+ def test_append_to_multiple_dropna_false(self):
+ df1 = tm.makeTimeDataFrame()
+ df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
+ df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
+ df = concat([df1, df2], axis=1)
+
+ with ensure_clean_store(self.path) as store:
+
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
- {'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
+ {'df1a': ['A', 'B'], 'df2a': None}, df, selector='df1a',
dropna=False)
- self.assertRaises(
- ValueError, store.select_as_multiple, ['df1', 'df2'])
- assert not store.select('df1').index.equals(
- store.select('df2').index)
+
+ with pytest.raises(ValueError):
+ store.select_as_multiple(['df1a', 'df2a'])
+
+ assert not store.select('df1a').index.equals(
+ store.select('df2a').index)
def test_select_as_multiple(self):
@@ -4220,7 +4281,7 @@ def _check_roundtrip_table(self, obj, comparator, compression=False):
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
- # sorted_obj = _test_sort(obj)
+
comparator(retrieved, obj)
def test_multiple_open_close(self):
@@ -4351,16 +4412,16 @@ def test_legacy_table_read(self):
with ensure_clean_store(
tm.get_data_path('legacy_hdf/legacy_table.h5'),
mode='r') as store:
- store.select('df1')
- store.select('df2')
- store.select('wp1')
- # force the frame
- store.select('df2', typ='legacy_frame')
+ with catch_warnings(record=True):
+ store.select('df1')
+ store.select('df2')
+ store.select('wp1')
+
+ # force the frame
+ store.select('df2', typ='legacy_frame')
- # old version warning
- with tm.assert_produces_warning(
- expected_warning=IncompatibilityWarning):
+ # old version warning
self.assertRaises(
Exception, store.select, 'wp1', 'minor_axis=B')
@@ -4466,7 +4527,8 @@ def test_legacy_table_write(self):
'legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
df = tm.makeDataFrame()
- wp = tm.makePanel()
+ with catch_warnings(record=True):
+ wp = tm.makePanel()
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
@@ -4803,12 +4865,11 @@ def test_to_hdf_with_object_column_names(self):
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
- with self.assertRaises(
+ with catch_warnings(record=True):
+ with self.assertRaises(
ValueError, msg=("cannot have non-object label "
"DataIndexableCol")):
- with catch_warnings(record=True):
- df.to_hdf(path, 'df',
- format='table',
+ df.to_hdf(path, 'df', format='table',
data_columns=True)
for index in types_should_run:
@@ -4979,7 +5040,7 @@ def test_query_compare_column_type(self):
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
- ts = pd.Timestamp('2014-01-01') # noqa
+ ts = pd.Timestamp('2014-01-01') # noqa
result = store.select('test', where='real_date > ts')
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
@@ -5092,28 +5153,30 @@ def test_complex_mixed_table(self):
assert_frame_equal(df, reread)
def test_complex_across_dimensions_fixed(self):
- complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
- s = Series(complex128, index=list('abcd'))
- df = DataFrame({'A': s, 'B': s})
- p = Panel({'One': df, 'Two': df})
+ with catch_warnings(record=True):
+ complex128 = np.array(
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
+ s = Series(complex128, index=list('abcd'))
+ df = DataFrame({'A': s, 'B': s})
+ p = Panel({'One': df, 'Two': df})
- objs = [s, df, p]
- comps = [tm.assert_series_equal, tm.assert_frame_equal,
- tm.assert_panel_equal]
- for obj, comp in zip(objs, comps):
- with ensure_clean_path(self.path) as path:
- obj.to_hdf(path, 'obj', format='fixed')
- reread = read_hdf(path, 'obj')
- comp(obj, reread)
+ objs = [s, df, p]
+ comps = [tm.assert_series_equal, tm.assert_frame_equal,
+ tm.assert_panel_equal]
+ for obj, comp in zip(objs, comps):
+ with ensure_clean_path(self.path) as path:
+ obj.to_hdf(path, 'obj', format='fixed')
+ reread = read_hdf(path, 'obj')
+ comp(obj, reread)
def test_complex_across_dimensions(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
- p = Panel({'One': df, 'Two': df})
with catch_warnings(record=True):
- p4d = pd.Panel4D({'i': p, 'ii': p})
+ p = Panel({'One': df, 'Two': df})
+ p4d = Panel4D({'i': p, 'ii': p})
objs = [df, p, p4d]
comps = [tm.assert_frame_equal, tm.assert_panel_equal,
@@ -5430,12 +5493,3 @@ def test_dst_transitions(self):
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
-
-
-def _test_sort(obj):
- if isinstance(obj, DataFrame):
- return obj.reindex(sorted(obj.index))
- elif isinstance(obj, Panel):
- return obj.reindex(major=sorted(obj.major_axis))
- else:
- raise ValueError('type not supported here')
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index f669ebe371f9d..dc4787176a0b5 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -2,6 +2,7 @@
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
+from warnings import catch_warnings
import re
import operator
import pytest
@@ -32,19 +33,26 @@
'D': _frame2['D'].astype('int32')})
_integer = DataFrame(
np.random.randint(1, 100,
- size=(10001, 4)), columns=list('ABCD'), dtype='int64')
+ size=(10001, 4)),
+ columns=list('ABCD'), dtype='int64')
_integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)),
columns=list('ABCD'), dtype='int64')
-_frame_panel = Panel(dict(ItemA=_frame.copy(), ItemB=(
- _frame.copy() + 3), ItemC=_frame.copy(), ItemD=_frame.copy()))
-_frame2_panel = Panel(dict(ItemA=_frame2.copy(), ItemB=(_frame2.copy() + 3),
- ItemC=_frame2.copy(), ItemD=_frame2.copy()))
-_integer_panel = Panel(dict(ItemA=_integer, ItemB=(_integer + 34).astype(
- 'int64')))
-_integer2_panel = Panel(dict(ItemA=_integer2, ItemB=(_integer2 + 34).astype(
- 'int64')))
-_mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))
-_mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
+
+with catch_warnings(record=True):
+ _frame_panel = Panel(dict(ItemA=_frame.copy(),
+ ItemB=(_frame.copy() + 3),
+ ItemC=_frame.copy(),
+ ItemD=_frame.copy()))
+ _frame2_panel = Panel(dict(ItemA=_frame2.copy(),
+ ItemB=(_frame2.copy() + 3),
+ ItemC=_frame2.copy(),
+ ItemD=_frame2.copy()))
+ _integer_panel = Panel(dict(ItemA=_integer,
+ ItemB=(_integer + 34).astype('int64')))
+ _integer2_panel = Panel(dict(ItemA=_integer2,
+ ItemB=(_integer2 + 34).astype('int64')))
+ _mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))
+ _mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
@pytest.mark.skipif(not expr._USE_NUMEXPR, reason='not using numexpr')
@@ -204,7 +212,7 @@ def test_float_panel(self):
@slow
def test_panel4d(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5,
assert_func=assert_panel4d_equal, binary_comp=3)
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 0e8e8dc43ff03..118039d1f354c 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -3,6 +3,8 @@
from operator import methodcaller
from copy import copy, deepcopy
+from warnings import catch_warnings
+
import pytest
import numpy as np
from numpy import nan
@@ -1570,17 +1572,18 @@ def test_to_xarray(self):
tm._skip_if_no_xarray()
from xarray import DataArray
- p = tm.makePanel()
+ with catch_warnings(record=True):
+ p = tm.makePanel()
- result = p.to_xarray()
- self.assertIsInstance(result, DataArray)
- self.assertEqual(len(result.coords), 3)
- assert_almost_equal(list(result.coords.keys()),
- ['items', 'major_axis', 'minor_axis'])
- self.assertEqual(len(result.dims), 3)
+ result = p.to_xarray()
+ self.assertIsInstance(result, DataArray)
+ self.assertEqual(len(result.coords), 3)
+ assert_almost_equal(list(result.coords.keys()),
+ ['items', 'major_axis', 'minor_axis'])
+ self.assertEqual(len(result.dims), 3)
- # idempotency
- assert_panel_equal(result.to_pandas(), p)
+ # idempotency
+ assert_panel_equal(result.to_pandas(), p)
class TestPanel4D(tm.TestCase, Generic):
@@ -1590,15 +1593,12 @@ class TestPanel4D(tm.TestCase, Generic):
def test_sample(self):
pytest.skip("sample on Panel4D")
- def test_copy_and_deepcopy(self):
- pytest.skip("copy_and_deepcopy on Panel4D")
-
def test_to_xarray(self):
tm._skip_if_no_xarray()
from xarray import DataArray
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p = tm.makePanel4D()
result = p.to_xarray()
@@ -1624,12 +1624,20 @@ def test_to_xarray(self):
'test_stat_unexpected_keyword', 'test_api_compat',
'test_stat_non_defaults_args',
'test_clip', 'test_truncate_out_of_bounds', 'test_numpy_clip',
- 'test_metadata_propagation']:
+ 'test_metadata_propagation', 'test_copy_and_deepcopy',
+ 'test_sample']:
+
+ def f():
+ def tester(self):
+ with catch_warnings(record=True):
+ return getattr(super(TestPanel, self), t)()
+ return tester
+
+ setattr(TestPanel, t, f())
def f():
def tester(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ with catch_warnings(record=True):
return getattr(super(TestPanel4D, self), t)()
return tester
@@ -1660,10 +1668,11 @@ def test_sample(sel):
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
- panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
- minor_axis=[3, 4, 5])
- with tm.assertRaises(ValueError):
- panel.sample(n=1, weights='weight_column')
+ with catch_warnings(record=True):
+ panel = Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
+ minor_axis=[3, 4, 5])
+ with tm.assertRaises(ValueError):
+ panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
@@ -1726,14 +1735,15 @@ def test_sample(sel):
assert_frame_equal(sample1, df[['colString']])
# Test default axes
- p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
- minor_axis=[1, 3, 5])
- assert_panel_equal(
- p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
- random_state=42))
- assert_frame_equal(
- df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
- random_state=42))
+ with catch_warnings(record=True):
+ p = Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
+ minor_axis=[1, 3, 5])
+ assert_panel_equal(
+ p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
+ random_state=42))
+ assert_frame_equal(
+ df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
+ random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
@@ -1763,9 +1773,10 @@ def test_squeeze(self):
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
- for p in [tm.makePanel()]:
- tm.assert_panel_equal(p.squeeze(), p)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
+ for p in [tm.makePanel()]:
+ tm.assert_panel_equal(p.squeeze(), p)
+ with catch_warnings(record=True):
for p4d in [tm.makePanel4D()]:
tm.assert_panel4d_equal(p4d.squeeze(), p4d)
@@ -1773,24 +1784,26 @@ def test_squeeze(self):
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(), df['A'])
- p = tm.makePanel().reindex(items=['ItemA'])
- tm.assert_frame_equal(p.squeeze(), p['ItemA'])
+ with catch_warnings(record=True):
+ p = tm.makePanel().reindex(items=['ItemA'])
+ tm.assert_frame_equal(p.squeeze(), p['ItemA'])
- p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
- tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A'])
+ p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
+ tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A'])
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p4d = tm.makePanel4D().reindex(labels=['label1'])
tm.assert_panel_equal(p4d.squeeze(), p4d['label1'])
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p4d = tm.makePanel4D().reindex(labels=['label1'], items=['ItemA'])
tm.assert_frame_equal(p4d.squeeze(), p4d.loc['label1', 'ItemA'])
# don't fail with 0 length dimensions GH11229 & GH8999
- empty_series = pd.Series([], name='five')
- empty_frame = pd.DataFrame([empty_series])
- empty_panel = pd.Panel({'six': empty_frame})
+ empty_series = Series([], name='five')
+ empty_frame = DataFrame([empty_series])
+ with catch_warnings(record=True):
+ empty_panel = Panel({'six': empty_frame})
[tm.assert_series_equal(empty_series, higher_dim.squeeze())
for higher_dim in [empty_series, empty_frame, empty_panel]]
@@ -1825,13 +1838,15 @@ def test_transpose(self):
tm.assert_series_equal(s.transpose(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.transpose().transpose(), df)
- for p in [tm.makePanel()]:
- tm.assert_panel_equal(p.transpose(2, 0, 1)
- .transpose(1, 2, 0), p)
- tm.assertRaisesRegexp(TypeError, msg, p.transpose,
- 2, 0, 1, axes=(2, 0, 1))
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
+ for p in [tm.makePanel()]:
+ tm.assert_panel_equal(p.transpose(2, 0, 1)
+ .transpose(1, 2, 0), p)
+ tm.assertRaisesRegexp(TypeError, msg, p.transpose,
+ 2, 0, 1, axes=(2, 0, 1))
+
+ with catch_warnings(record=True):
for p4d in [tm.makePanel4D()]:
tm.assert_panel4d_equal(p4d.transpose(2, 0, 3, 1)
.transpose(1, 3, 0, 2), p4d)
@@ -1853,12 +1868,13 @@ def test_numpy_transpose(self):
tm.assertRaisesRegexp(ValueError, msg,
np.transpose, df, axes=1)
- p = tm.makePanel()
- tm.assert_panel_equal(np.transpose(
- np.transpose(p, axes=(2, 0, 1)),
- axes=(1, 2, 0)), p)
+ with catch_warnings(record=True):
+ p = tm.makePanel()
+ tm.assert_panel_equal(np.transpose(
+ np.transpose(p, axes=(2, 0, 1)),
+ axes=(1, 2, 0)), p)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
p4d = tm.makePanel4D()
tm.assert_panel4d_equal(np.transpose(
np.transpose(p4d, axes=(2, 0, 3, 1)),
@@ -1880,15 +1896,16 @@ def test_take(self):
tm.assert_frame_equal(out, expected)
indices = [-3, 2, 0, 1]
- for p in [tm.makePanel()]:
- out = p.take(indices)
- expected = Panel(data=p.values.take(indices, axis=0),
- items=p.items.take(indices),
- major_axis=p.major_axis,
- minor_axis=p.minor_axis)
- tm.assert_panel_equal(out, expected)
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
+ for p in [tm.makePanel()]:
+ out = p.take(indices)
+ expected = Panel(data=p.values.take(indices, axis=0),
+ items=p.items.take(indices),
+ major_axis=p.major_axis,
+ minor_axis=p.minor_axis)
+ tm.assert_panel_equal(out, expected)
+
+ with catch_warnings(record=True):
for p4d in [tm.makePanel4D()]:
out = p4d.take(indices)
expected = Panel4D(data=p4d.values.take(indices, axis=0),
@@ -1902,9 +1919,9 @@ def test_take_invalid_kwargs(self):
indices = [-3, 2, 0, 1]
s = tm.makeFloatSeries()
df = tm.makeTimeDataFrame()
- p = tm.makePanel()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
+ p = tm.makePanel()
p4d = tm.makePanel4D()
for obj in (s, df, p, p4d):
@@ -2011,8 +2028,9 @@ def test_equals(self):
self.assertTrue(e.equals(f))
def test_describe_raises(self):
- with tm.assertRaises(NotImplementedError):
- tm.makePanel().describe()
+ with catch_warnings(record=True):
+ with tm.assertRaises(NotImplementedError):
+ tm.makePanel().describe()
def test_pipe(self):
df = DataFrame({'A': [1, 2, 3]})
@@ -2043,15 +2061,16 @@ def test_pipe_tuple_error(self):
df.A.pipe((f, 'y'), x=1, y=0)
def test_pipe_panel(self):
- wp = Panel({'r1': DataFrame({"A": [1, 2, 3]})})
- f = lambda x, y: x + y
- result = wp.pipe(f, 2)
- expected = wp + 2
- assert_panel_equal(result, expected)
-
- result = wp.pipe((f, 'y'), x=1)
- expected = wp + 1
- assert_panel_equal(result, expected)
-
- with tm.assertRaises(ValueError):
- result = wp.pipe((f, 'y'), x=1, y=1)
+ with catch_warnings(record=True):
+ wp = Panel({'r1': DataFrame({"A": [1, 2, 3]})})
+ f = lambda x, y: x + y
+ result = wp.pipe(f, 2)
+ expected = wp + 2
+ assert_panel_equal(result, expected)
+
+ result = wp.pipe((f, 'y'), x=1)
+ expected = wp + 1
+ assert_panel_equal(result, expected)
+
+ with tm.assertRaises(ValueError):
+ result = wp.pipe((f, 'y'), x=1, y=1)
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index ab0322abbcf06..bc7bb8a4dfec1 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -3,7 +3,6 @@
from warnings import catch_warnings
from datetime import datetime
-
import operator
import pytest
@@ -31,25 +30,37 @@
import pandas.util.testing as tm
+def make_test_panel():
+ with catch_warnings(record=True):
+ _panel = tm.makePanel()
+ tm.add_nans(_panel)
+ _panel = _panel.copy()
+ return _panel
+
+
class PanelTests(object):
panel = None
def test_pickle(self):
- unpickled = self.round_trip_pickle(self.panel)
- assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
+ with catch_warnings(record=True):
+ unpickled = self.round_trip_pickle(self.panel)
+ assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
- self.assertRaises(NotImplementedError, lambda: self.panel.rank())
+ with catch_warnings(record=True):
+ self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
- cumsum = self.panel.cumsum()
- assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
+ with catch_warnings(record=True):
+ cumsum = self.panel.cumsum()
+ assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
- c_empty = Panel()
- c = Panel(Panel([[[1]]]))
- self.assertRaises(TypeError, hash, c_empty)
- self.assertRaises(TypeError, hash, c)
+ with catch_warnings(record=True):
+ c_empty = Panel()
+ c = Panel(Panel([[[1]]]))
+ self.assertRaises(TypeError, hash, c_empty)
+ self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
@@ -58,11 +69,12 @@ def test_repr(self):
repr(self.panel)
def test_copy_names(self):
- for attr in ('major_axis', 'minor_axis'):
- getattr(self.panel, attr).name = None
- cp = self.panel.copy()
- getattr(cp, attr).name = 'foo'
- self.assertIsNone(getattr(self.panel, attr).name)
+ with catch_warnings(record=True):
+ for attr in ('major_axis', 'minor_axis'):
+ getattr(self.panel, attr).name = None
+ cp = self.panel.copy()
+ getattr(cp, attr).name = 'foo'
+ self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
@@ -107,10 +119,6 @@ def this_skew(x):
self._check_stat_op('skew', this_skew)
- # def test_mad(self):
- # f = lambda x: np.abs(x - x.mean()).mean()
- # self._check_stat_op('mad', f)
-
def test_var(self):
def alt(x):
if len(x) < 2:
@@ -239,47 +247,48 @@ def test_get_plane_axes(self):
index, columns = self.panel._get_plane_axes(0)
def test_truncate(self):
- dates = self.panel.major_axis
- start, end = dates[1], dates[5]
-
- trunced = self.panel.truncate(start, end, axis='major')
- expected = self.panel['ItemA'].truncate(start, end)
+ with catch_warnings(record=True):
+ dates = self.panel.major_axis
+ start, end = dates[1], dates[5]
- assert_frame_equal(trunced['ItemA'], expected)
+ trunced = self.panel.truncate(start, end, axis='major')
+ expected = self.panel['ItemA'].truncate(start, end)
- trunced = self.panel.truncate(before=start, axis='major')
- expected = self.panel['ItemA'].truncate(before=start)
+ assert_frame_equal(trunced['ItemA'], expected)
- assert_frame_equal(trunced['ItemA'], expected)
+ trunced = self.panel.truncate(before=start, axis='major')
+ expected = self.panel['ItemA'].truncate(before=start)
- trunced = self.panel.truncate(after=end, axis='major')
- expected = self.panel['ItemA'].truncate(after=end)
+ assert_frame_equal(trunced['ItemA'], expected)
- assert_frame_equal(trunced['ItemA'], expected)
+ trunced = self.panel.truncate(after=end, axis='major')
+ expected = self.panel['ItemA'].truncate(after=end)
- # XXX test other axes
+ assert_frame_equal(trunced['ItemA'], expected)
def test_arith(self):
- self._test_op(self.panel, operator.add)
- self._test_op(self.panel, operator.sub)
- self._test_op(self.panel, operator.mul)
- self._test_op(self.panel, operator.truediv)
- self._test_op(self.panel, operator.floordiv)
- self._test_op(self.panel, operator.pow)
-
- self._test_op(self.panel, lambda x, y: y + x)
- self._test_op(self.panel, lambda x, y: y - x)
- self._test_op(self.panel, lambda x, y: y * x)
- self._test_op(self.panel, lambda x, y: y / x)
- self._test_op(self.panel, lambda x, y: y ** x)
-
- self._test_op(self.panel, lambda x, y: x + y) # panel + 1
- self._test_op(self.panel, lambda x, y: x - y) # panel - 1
- self._test_op(self.panel, lambda x, y: x * y) # panel * 1
- self._test_op(self.panel, lambda x, y: x / y) # panel / 1
- self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
-
- self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
+ with catch_warnings(record=True):
+ self._test_op(self.panel, operator.add)
+ self._test_op(self.panel, operator.sub)
+ self._test_op(self.panel, operator.mul)
+ self._test_op(self.panel, operator.truediv)
+ self._test_op(self.panel, operator.floordiv)
+ self._test_op(self.panel, operator.pow)
+
+ self._test_op(self.panel, lambda x, y: y + x)
+ self._test_op(self.panel, lambda x, y: y - x)
+ self._test_op(self.panel, lambda x, y: y * x)
+ self._test_op(self.panel, lambda x, y: y / x)
+ self._test_op(self.panel, lambda x, y: y ** x)
+
+ self._test_op(self.panel, lambda x, y: x + y) # panel + 1
+ self._test_op(self.panel, lambda x, y: x - y) # panel - 1
+ self._test_op(self.panel, lambda x, y: x * y) # panel * 1
+ self._test_op(self.panel, lambda x, y: x / y) # panel / 1
+ self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
+
+ self.assertRaises(Exception, self.panel.__add__,
+ self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
@@ -299,92 +308,100 @@ def test_iteritems(self):
len(self.panel.items))
def test_combineFrame(self):
- def check_op(op, name):
- # items
- df = self.panel['ItemA']
+ with catch_warnings(record=True):
+ def check_op(op, name):
+ # items
+ df = self.panel['ItemA']
- func = getattr(self.panel, name)
+ func = getattr(self.panel, name)
- result = func(df, axis='items')
+ result = func(df, axis='items')
- assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
+ assert_frame_equal(
+ result['ItemB'], op(self.panel['ItemB'], df))
- # major
- xs = self.panel.major_xs(self.panel.major_axis[0])
- result = func(xs, axis='major')
+ # major
+ xs = self.panel.major_xs(self.panel.major_axis[0])
+ result = func(xs, axis='major')
- idx = self.panel.major_axis[1]
+ idx = self.panel.major_axis[1]
- assert_frame_equal(result.major_xs(idx),
- op(self.panel.major_xs(idx), xs))
+ assert_frame_equal(result.major_xs(idx),
+ op(self.panel.major_xs(idx), xs))
- # minor
- xs = self.panel.minor_xs(self.panel.minor_axis[0])
- result = func(xs, axis='minor')
+ # minor
+ xs = self.panel.minor_xs(self.panel.minor_axis[0])
+ result = func(xs, axis='minor')
- idx = self.panel.minor_axis[1]
+ idx = self.panel.minor_axis[1]
- assert_frame_equal(result.minor_xs(idx),
- op(self.panel.minor_xs(idx), xs))
+ assert_frame_equal(result.minor_xs(idx),
+ op(self.panel.minor_xs(idx), xs))
- ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow', 'mod']
- if not compat.PY3:
- ops.append('div')
+ ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow', 'mod']
+ if not compat.PY3:
+ ops.append('div')
- for op in ops:
- try:
- check_op(getattr(operator, op), op)
- except:
- pprint_thing("Failing operation: %r" % op)
- raise
- if compat.PY3:
- try:
- check_op(operator.truediv, 'div')
- except:
- pprint_thing("Failing operation: %r" % 'div')
- raise
+ for op in ops:
+ try:
+ check_op(getattr(operator, op), op)
+ except:
+ pprint_thing("Failing operation: %r" % op)
+ raise
+ if compat.PY3:
+ try:
+ check_op(operator.truediv, 'div')
+ except:
+ pprint_thing("Failing operation: %r" % 'div')
+ raise
def test_combinePanel(self):
- result = self.panel.add(self.panel)
- self.assert_panel_equal(result, self.panel * 2)
+ with catch_warnings(record=True):
+ result = self.panel.add(self.panel)
+ assert_panel_equal(result, self.panel * 2)
def test_neg(self):
- self.assert_panel_equal(-self.panel, self.panel * -1)
+ with catch_warnings(record=True):
+ assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
- p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
- items=['ItemA', 'ItemB', 'ItemC'],
- major_axis=pd.date_range('20130101', periods=4),
- minor_axis=list('ABCDE'))
- d = p.sum(axis=1).iloc[0]
- ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
- for op in ops:
- with self.assertRaises(NotImplementedError):
- getattr(p, op)(d, axis=0)
+ with catch_warnings(record=True):
+ p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
+ items=['ItemA', 'ItemB', 'ItemC'],
+ major_axis=pd.date_range('20130101', periods=4),
+ minor_axis=list('ABCDE'))
+ d = p.sum(axis=1).iloc[0]
+ ops = ['add', 'sub', 'mul', 'truediv',
+ 'floordiv', 'div', 'mod', 'pow']
+ for op in ops:
+ with self.assertRaises(NotImplementedError):
+ getattr(p, op)(d, axis=0)
def test_select(self):
- p = self.panel
+ with catch_warnings(record=True):
+ p = self.panel
- # select items
- result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
- expected = p.reindex(items=['ItemA', 'ItemC'])
- self.assert_panel_equal(result, expected)
+ # select items
+ result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
+ expected = p.reindex(items=['ItemA', 'ItemC'])
+ assert_panel_equal(result, expected)
- # select major_axis
- result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
- new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
- expected = p.reindex(major=new_major)
- self.assert_panel_equal(result, expected)
+ # select major_axis
+ result = p.select(lambda x: x >= datetime(
+ 2000, 1, 15), axis='major')
+ new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
+ expected = p.reindex(major=new_major)
+ assert_panel_equal(result, expected)
- # select minor_axis
- result = p.select(lambda x: x in ('D', 'A'), axis=2)
- expected = p.reindex(minor=['A', 'D'])
- self.assert_panel_equal(result, expected)
+ # select minor_axis
+ result = p.select(lambda x: x in ('D', 'A'), axis=2)
+ expected = p.reindex(minor=['A', 'D'])
+ assert_panel_equal(result, expected)
- # corner case, empty thing
- result = p.select(lambda x: x in ('foo', ), axis='items')
- self.assert_panel_equal(result, p.reindex(items=[]))
+ # corner case, empty thing
+ result = p.select(lambda x: x in ('foo', ), axis='items')
+ assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
@@ -396,27 +413,28 @@ def test_get_value(self):
def test_abs(self):
- result = self.panel.abs()
- result2 = abs(self.panel)
- expected = np.abs(self.panel)
- self.assert_panel_equal(result, expected)
- self.assert_panel_equal(result2, expected)
-
- df = self.panel['ItemA']
- result = df.abs()
- result2 = abs(df)
- expected = np.abs(df)
- assert_frame_equal(result, expected)
- assert_frame_equal(result2, expected)
+ with catch_warnings(record=True):
+ result = self.panel.abs()
+ result2 = abs(self.panel)
+ expected = np.abs(self.panel)
+ assert_panel_equal(result, expected)
+ assert_panel_equal(result2, expected)
- s = df['A']
- result = s.abs()
- result2 = abs(s)
- expected = np.abs(s)
- assert_series_equal(result, expected)
- assert_series_equal(result2, expected)
- self.assertEqual(result.name, 'A')
- self.assertEqual(result2.name, 'A')
+ df = self.panel['ItemA']
+ result = df.abs()
+ result2 = abs(df)
+ expected = np.abs(df)
+ assert_frame_equal(result, expected)
+ assert_frame_equal(result2, expected)
+
+ s = df['A']
+ result = s.abs()
+ result2 = abs(s)
+ expected = np.abs(s)
+ assert_series_equal(result, expected)
+ assert_series_equal(result2, expected)
+ self.assertEqual(result.name, 'A')
+ self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
@@ -425,188 +443,200 @@ def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
- expected = self.panel['ItemA']
- result = self.panel.pop('ItemA')
- assert_frame_equal(expected, result)
- self.assertNotIn('ItemA', self.panel.items)
+ with catch_warnings(record=True):
+ expected = self.panel['ItemA']
+ result = self.panel.pop('ItemA')
+ assert_frame_equal(expected, result)
+ self.assertNotIn('ItemA', self.panel.items)
- del self.panel['ItemB']
- self.assertNotIn('ItemB', self.panel.items)
- self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
+ del self.panel['ItemB']
+ self.assertNotIn('ItemB', self.panel.items)
+ self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
- values = np.empty((3, 3, 3))
- values[0] = 0
- values[1] = 1
- values[2] = 2
+ values = np.empty((3, 3, 3))
+ values[0] = 0
+ values[1] = 1
+ values[2] = 2
- panel = Panel(values, lrange(3), lrange(3), lrange(3))
+ panel = Panel(values, lrange(3), lrange(3), lrange(3))
- # did we delete the right row?
+ # did we delete the right row?
- panelc = panel.copy()
- del panelc[0]
- assert_frame_equal(panelc[1], panel[1])
- assert_frame_equal(panelc[2], panel[2])
+ panelc = panel.copy()
+ del panelc[0]
+ assert_frame_equal(panelc[1], panel[1])
+ assert_frame_equal(panelc[2], panel[2])
- panelc = panel.copy()
- del panelc[1]
- assert_frame_equal(panelc[0], panel[0])
- assert_frame_equal(panelc[2], panel[2])
+ panelc = panel.copy()
+ del panelc[1]
+ assert_frame_equal(panelc[0], panel[0])
+ assert_frame_equal(panelc[2], panel[2])
- panelc = panel.copy()
- del panelc[2]
- assert_frame_equal(panelc[1], panel[1])
- assert_frame_equal(panelc[0], panel[0])
+ panelc = panel.copy()
+ del panelc[2]
+ assert_frame_equal(panelc[1], panel[1])
+ assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
- # LongPanel with one item
- lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
- with tm.assertRaises(ValueError):
- self.panel['ItemE'] = lp
+ with catch_warnings(record=True):
- # DataFrame
- df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
- self.panel['ItemF'] = df
- self.panel['ItemE'] = df
+ # LongPanel with one item
+ lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
+ with tm.assertRaises(ValueError):
+ self.panel['ItemE'] = lp
- df2 = self.panel['ItemF']
+ # DataFrame
+ df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
+ self.panel['ItemF'] = df
+ self.panel['ItemE'] = df
- assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
+ df2 = self.panel['ItemF']
- # scalar
- self.panel['ItemG'] = 1
- self.panel['ItemE'] = True
- self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
- self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
+ assert_frame_equal(df, df2.reindex(
+ index=df.index, columns=df.columns))
- # object dtype
- self.panel['ItemQ'] = 'foo'
- self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
+ # scalar
+ self.panel['ItemG'] = 1
+ self.panel['ItemE'] = True
+ self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
+ self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
- # boolean dtype
- self.panel['ItemP'] = self.panel['ItemA'] > 0
- self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
+ # object dtype
+ self.panel['ItemQ'] = 'foo'
+ self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
- self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
- self.panel.loc[['ItemP']])
+ # boolean dtype
+ self.panel['ItemP'] = self.panel['ItemA'] > 0
+ self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
- # bad shape
- p = Panel(np.random.randn(4, 3, 2))
- with tm.assertRaisesRegexp(ValueError,
- r"shape of value must be \(3, 2\), "
- r"shape of given object was \(4, 2\)"):
- p[0] = np.random.randn(4, 2)
+ self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
+ self.panel.loc[['ItemP']])
+
+ # bad shape
+ p = Panel(np.random.randn(4, 3, 2))
+ with tm.assertRaisesRegexp(ValueError,
+ r"shape of value must be \(3, 2\), "
+ r"shape of given object was \(4, 2\)"):
+ p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
- timeidx = date_range(start=datetime(2009, 1, 1),
- end=datetime(2009, 12, 31),
- freq=MonthEnd())
- lons_coarse = np.linspace(-177.5, 177.5, 72)
- lats_coarse = np.linspace(-87.5, 87.5, 36)
- P = Panel(items=timeidx, major_axis=lons_coarse,
- minor_axis=lats_coarse)
- data = np.random.randn(72 * 36).reshape((72, 36))
- key = datetime(2009, 2, 28)
- P[key] = data
-
- assert_almost_equal(P[key].values, data)
+ with catch_warnings(record=True):
+ timeidx = date_range(start=datetime(2009, 1, 1),
+ end=datetime(2009, 12, 31),
+ freq=MonthEnd())
+ lons_coarse = np.linspace(-177.5, 177.5, 72)
+ lats_coarse = np.linspace(-87.5, 87.5, 36)
+ P = Panel(items=timeidx, major_axis=lons_coarse,
+ minor_axis=lats_coarse)
+ data = np.random.randn(72 * 36).reshape((72, 36))
+ key = datetime(2009, 2, 28)
+ P[key] = data
+
+ assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
- # GH 11014
- df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
- df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
- panel = Panel({'Item1': df1, 'Item2': df2})
+ with catch_warnings(record=True):
+ # GH 11014
+ df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
+ df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
+ panel = Panel({'Item1': df1, 'Item2': df2})
- newminor = notnull(panel.iloc[:, :, 0])
- panel.loc[:, :, 'NewMinor'] = newminor
- assert_frame_equal(panel.loc[:, :, 'NewMinor'],
- newminor.astype(object))
+ newminor = notnull(panel.iloc[:, :, 0])
+ panel.loc[:, :, 'NewMinor'] = newminor
+ assert_frame_equal(panel.loc[:, :, 'NewMinor'],
+ newminor.astype(object))
- newmajor = notnull(panel.iloc[:, 0, :])
- panel.loc[:, 'NewMajor', :] = newmajor
- assert_frame_equal(panel.loc[:, 'NewMajor', :],
- newmajor.astype(object))
+ newmajor = notnull(panel.iloc[:, 0, :])
+ panel.loc[:, 'NewMajor', :] = newmajor
+ assert_frame_equal(panel.loc[:, 'NewMajor', :],
+ newmajor.astype(object))
def test_major_xs(self):
- ref = self.panel['ItemA']
+ with catch_warnings(record=True):
+ ref = self.panel['ItemA']
- idx = self.panel.major_axis[5]
- xs = self.panel.major_xs(idx)
+ idx = self.panel.major_axis[5]
+ xs = self.panel.major_xs(idx)
- result = xs['ItemA']
- assert_series_equal(result, ref.xs(idx), check_names=False)
- self.assertEqual(result.name, 'ItemA')
+ result = xs['ItemA']
+ assert_series_equal(result, ref.xs(idx), check_names=False)
+ self.assertEqual(result.name, 'ItemA')
- # not contained
- idx = self.panel.major_axis[0] - BDay()
- self.assertRaises(Exception, self.panel.major_xs, idx)
+ # not contained
+ idx = self.panel.major_axis[0] - BDay()
+ self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
- self.panel['ItemD'] = 'foo'
- xs = self.panel.major_xs(self.panel.major_axis[0])
- self.assertEqual(xs['ItemA'].dtype, np.float64)
- self.assertEqual(xs['ItemD'].dtype, np.object_)
+ with catch_warnings(record=True):
+ self.panel['ItemD'] = 'foo'
+ xs = self.panel.major_xs(self.panel.major_axis[0])
+ self.assertEqual(xs['ItemA'].dtype, np.float64)
+ self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
- ref = self.panel['ItemA']
+ with catch_warnings(record=True):
+ ref = self.panel['ItemA']
- idx = self.panel.minor_axis[1]
- xs = self.panel.minor_xs(idx)
+ idx = self.panel.minor_axis[1]
+ xs = self.panel.minor_xs(idx)
- assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
+ assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
- # not contained
- self.assertRaises(Exception, self.panel.minor_xs, 'E')
+ # not contained
+ self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
- self.panel['ItemD'] = 'foo'
+ with catch_warnings(record=True):
+ self.panel['ItemD'] = 'foo'
- xs = self.panel.minor_xs('D')
- self.assertEqual(xs['ItemA'].dtype, np.float64)
- self.assertEqual(xs['ItemD'].dtype, np.object_)
+ xs = self.panel.minor_xs('D')
+ self.assertEqual(xs['ItemA'].dtype, np.float64)
+ self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
- itemA = self.panel.xs('ItemA', axis=0)
- expected = self.panel['ItemA']
- assert_frame_equal(itemA, expected)
+ with catch_warnings(record=True):
+ itemA = self.panel.xs('ItemA', axis=0)
+ expected = self.panel['ItemA']
+ assert_frame_equal(itemA, expected)
- # get a view by default
- itemA_view = self.panel.xs('ItemA', axis=0)
- itemA_view.values[:] = np.nan
- self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
+ # get a view by default
+ itemA_view = self.panel.xs('ItemA', axis=0)
+ itemA_view.values[:] = np.nan
+ self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
- # mixed-type yields a copy
- self.panel['strings'] = 'foo'
- result = self.panel.xs('D', axis=2)
- self.assertIsNotNone(result.is_copy)
+ # mixed-type yields a copy
+ self.panel['strings'] = 'foo'
+ result = self.panel.xs('D', axis=2)
+ self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
- p = self.panel
+ with catch_warnings(record=True):
+ p = self.panel
- items = p.items[[1, 0]]
- dates = p.major_axis[::2]
- cols = ['D', 'C', 'F']
+ items = p.items[[1, 0]]
+ dates = p.major_axis[::2]
+ cols = ['D', 'C', 'F']
- # all 3 specified
- assert_panel_equal(p.loc[items, dates, cols],
- p.reindex(items=items, major=dates, minor=cols))
+ # all 3 specified
+ assert_panel_equal(p.loc[items, dates, cols],
+ p.reindex(items=items, major=dates, minor=cols))
- # 2 specified
- assert_panel_equal(p.loc[:, dates, cols],
- p.reindex(major=dates, minor=cols))
+ # 2 specified
+ assert_panel_equal(p.loc[:, dates, cols],
+ p.reindex(major=dates, minor=cols))
- assert_panel_equal(p.loc[items, :, cols],
- p.reindex(items=items, minor=cols))
+ assert_panel_equal(p.loc[items, :, cols],
+ p.reindex(items=items, minor=cols))
- assert_panel_equal(p.loc[items, dates, :],
- p.reindex(items=items, major=dates))
+ assert_panel_equal(p.loc[items, dates, :],
+ p.reindex(items=items, major=dates))
- # only 1
- assert_panel_equal(p.loc[items, :, :], p.reindex(items=items))
+ # only 1
+ assert_panel_equal(p.loc[items, :, :], p.reindex(items=items))
- assert_panel_equal(p.loc[:, dates, :], p.reindex(major=dates))
+ assert_panel_equal(p.loc[:, dates, :], p.reindex(major=dates))
- assert_panel_equal(p.loc[:, :, cols], p.reindex(minor=cols))
+ assert_panel_equal(p.loc[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
@@ -646,127 +676,132 @@ def test_getitem_fancy_xs(self):
assert_series_equal(p.loc[:, date, col], p.major_xs(date).loc[col])
def test_getitem_fancy_xs_check_view(self):
- item = 'ItemB'
- date = self.panel.major_axis[5]
-
- # make sure it's always a view
- NS = slice(None, None)
-
- # DataFrames
- comp = assert_frame_equal
- self._check_view(item, comp)
- self._check_view((item, NS), comp)
- self._check_view((item, NS, NS), comp)
- self._check_view((NS, date), comp)
- self._check_view((NS, date, NS), comp)
- self._check_view((NS, NS, 'C'), comp)
-
- # Series
- comp = assert_series_equal
- self._check_view((item, date), comp)
- self._check_view((item, date, NS), comp)
- self._check_view((item, NS, 'C'), comp)
- self._check_view((NS, date, 'C'), comp)
+ with catch_warnings(record=True):
+ item = 'ItemB'
+ date = self.panel.major_axis[5]
+
+ # make sure it's always a view
+ NS = slice(None, None)
+
+ # DataFrames
+ comp = assert_frame_equal
+ self._check_view(item, comp)
+ self._check_view((item, NS), comp)
+ self._check_view((item, NS, NS), comp)
+ self._check_view((NS, date), comp)
+ self._check_view((NS, date, NS), comp)
+ self._check_view((NS, NS, 'C'), comp)
+
+ # Series
+ comp = assert_series_equal
+ self._check_view((item, date), comp)
+ self._check_view((item, date, NS), comp)
+ self._check_view((item, NS, 'C'), comp)
+ self._check_view((NS, date, 'C'), comp)
def test_getitem_callable(self):
- p = self.panel
- # GH 12533
+ with catch_warnings(record=True):
+ p = self.panel
+ # GH 12533
- assert_frame_equal(p[lambda x: 'ItemB'], p.loc['ItemB'])
- assert_panel_equal(p[lambda x: ['ItemB', 'ItemC']],
- p.loc[['ItemB', 'ItemC']])
+ assert_frame_equal(p[lambda x: 'ItemB'], p.loc['ItemB'])
+ assert_panel_equal(p[lambda x: ['ItemB', 'ItemC']],
+ p.loc[['ItemB', 'ItemC']])
def test_ix_setitem_slice_dataframe(self):
- a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
- minor_axis=[111, 222, 333])
- b = DataFrame(np.random.randn(2, 3), index=[111, 333],
- columns=[1, 2, 3])
+ with catch_warnings(record=True):
+ a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
+ minor_axis=[111, 222, 333])
+ b = DataFrame(np.random.randn(2, 3), index=[111, 333],
+ columns=[1, 2, 3])
- a.loc[:, 22, [111, 333]] = b
+ a.loc[:, 22, [111, 333]] = b
- assert_frame_equal(a.loc[:, 22, [111, 333]], b)
+ assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
- from pandas import Series
- b = Series(np.random.randn(10), name=0)
- b.sort_values()
- df_orig = Panel(np.random.randn(3, 10, 2))
- df = df_orig.copy()
+ with catch_warnings(record=True):
+ from pandas import Series
+ b = Series(np.random.randn(10), name=0)
+ b.sort_values()
+ df_orig = Panel(np.random.randn(3, 10, 2))
+ df = df_orig.copy()
- df.loc[0, :, 0] = b
- assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
+ df.loc[0, :, 0] = b
+ assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
- df = df_orig.swapaxes(0, 1)
- df.loc[:, 0, 0] = b
- assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
+ df = df_orig.swapaxes(0, 1)
+ df.loc[:, 0, 0] = b
+ assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
- df = df_orig.swapaxes(1, 2)
- df.loc[0, 0, :] = b
- assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
+ df = df_orig.swapaxes(1, 2)
+ df.loc[0, 0, :] = b
+ assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
- p_orig = tm.makePanel()
- df = p_orig.iloc[0].copy()
- assert_frame_equal(p_orig['ItemA'], df)
+ with catch_warnings(record=True):
+ p_orig = tm.makePanel()
+ df = p_orig.iloc[0].copy()
+ assert_frame_equal(p_orig['ItemA'], df)
- p = p_orig.copy()
- p.iloc[0, :, :] = df
- assert_panel_equal(p, p_orig)
+ p = p_orig.copy()
+ p.iloc[0, :, :] = df
+ assert_panel_equal(p, p_orig)
- p = p_orig.copy()
- p.iloc[0] = df
- assert_panel_equal(p, p_orig)
+ p = p_orig.copy()
+ p.iloc[0] = df
+ assert_panel_equal(p, p_orig)
- p = p_orig.copy()
- p.iloc[0, :, :] = df
- assert_panel_equal(p, p_orig)
+ p = p_orig.copy()
+ p.iloc[0, :, :] = df
+ assert_panel_equal(p, p_orig)
- p = p_orig.copy()
- p.iloc[0] = df
- assert_panel_equal(p, p_orig)
+ p = p_orig.copy()
+ p.iloc[0] = df
+ assert_panel_equal(p, p_orig)
- p = p_orig.copy()
- p.loc['ItemA'] = df
- assert_panel_equal(p, p_orig)
+ p = p_orig.copy()
+ p.loc['ItemA'] = df
+ assert_panel_equal(p, p_orig)
- p = p_orig.copy()
- p.loc['ItemA', :, :] = df
- assert_panel_equal(p, p_orig)
+ p = p_orig.copy()
+ p.loc['ItemA', :, :] = df
+ assert_panel_equal(p, p_orig)
- p = p_orig.copy()
- p['ItemA'] = df
- assert_panel_equal(p, p_orig)
+ p = p_orig.copy()
+ p['ItemA'] = df
+ assert_panel_equal(p, p_orig)
- p = p_orig.copy()
- p.iloc[0, [0, 1, 3, 5], -2:] = df
- out = p.iloc[0, [0, 1, 3, 5], -2:]
- assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
+ p = p_orig.copy()
+ p.iloc[0, [0, 1, 3, 5], -2:] = df
+ out = p.iloc[0, [0, 1, 3, 5], -2:]
+ assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
- # GH3830, panel assignent by values/frame
- for dtype in ['float64', 'int64']:
+ # GH3830, panel assignent by values/frame
+ for dtype in ['float64', 'int64']:
- panel = Panel(np.arange(40).reshape((2, 4, 5)),
- items=['a1', 'a2'], dtype=dtype)
- df1 = panel.iloc[0]
- df2 = panel.iloc[1]
+ panel = Panel(np.arange(40).reshape((2, 4, 5)),
+ items=['a1', 'a2'], dtype=dtype)
+ df1 = panel.iloc[0]
+ df2 = panel.iloc[1]
- tm.assert_frame_equal(panel.loc['a1'], df1)
- tm.assert_frame_equal(panel.loc['a2'], df2)
+ tm.assert_frame_equal(panel.loc['a1'], df1)
+ tm.assert_frame_equal(panel.loc['a2'], df2)
- # Assignment by Value Passes for 'a2'
- panel.loc['a2'] = df1.values
- tm.assert_frame_equal(panel.loc['a1'], df1)
- tm.assert_frame_equal(panel.loc['a2'], df1)
+ # Assignment by Value Passes for 'a2'
+ panel.loc['a2'] = df1.values
+ tm.assert_frame_equal(panel.loc['a1'], df1)
+ tm.assert_frame_equal(panel.loc['a2'], df1)
- # Assignment by DataFrame Ok w/o loc 'a2'
- panel['a2'] = df2
- tm.assert_frame_equal(panel.loc['a1'], df1)
- tm.assert_frame_equal(panel.loc['a2'], df2)
+ # Assignment by DataFrame Ok w/o loc 'a2'
+ panel['a2'] = df2
+ tm.assert_frame_equal(panel.loc['a1'], df1)
+ tm.assert_frame_equal(panel.loc['a2'], df2)
- # Assignment by DataFrame Fails for 'a2'
- panel.loc['a2'] = df2
- tm.assert_frame_equal(panel.loc['a1'], df1)
- tm.assert_frame_equal(panel.loc['a2'], df2)
+ # Assignment by DataFrame Fails for 'a2'
+ panel.loc['a2'] = df2
+ tm.assert_frame_equal(panel.loc['a1'], df1)
+ tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
@@ -776,57 +811,60 @@ def _check_view(self, indexer, comp):
comp(cp.loc[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
- d = Panel({'ItemA': {'a': [np.nan, False]},
- 'ItemB': {'a': [True, True]}})
+ with catch_warnings(record=True):
+ d = Panel({'ItemA': {'a': [np.nan, False]},
+ 'ItemB': {'a': [True, True]}})
- result = d['ItemA'] | d['ItemB']
- expected = DataFrame({'a': [np.nan, True]})
- assert_frame_equal(result, expected)
+ result = d['ItemA'] | d['ItemB']
+ expected = DataFrame({'a': [np.nan, True]})
+ assert_frame_equal(result, expected)
- # this is autodowncasted here
- result = d['ItemA'].fillna(False) | d['ItemB']
- expected = DataFrame({'a': [True, True]})
- assert_frame_equal(result, expected)
+ # this is autodowncasted here
+ result = d['ItemA'].fillna(False) | d['ItemB']
+ expected = DataFrame({'a': [True, True]})
+ assert_frame_equal(result, expected)
def test_neg(self):
- # what to do?
- assert_panel_equal(-self.panel, -1 * self.panel)
+ with catch_warnings(record=True):
+ assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
- assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
+ with catch_warnings(record=True):
+ assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
- p1 = tm.makePanel()
- p2 = tm.makePanel()
+ with catch_warnings(record=True):
+ p1 = tm.makePanel()
+ p2 = tm.makePanel()
- tp = p1.reindex(items=p1.items + ['foo'])
- df = p1[p1.items[0]]
+ tp = p1.reindex(items=p1.items + ['foo'])
+ df = p1[p1.items[0]]
- def test_comp(func):
+ def test_comp(func):
- # versus same index
- result = func(p1, p2)
- self.assert_numpy_array_equal(result.values,
- func(p1.values, p2.values))
+ # versus same index
+ result = func(p1, p2)
+ self.assert_numpy_array_equal(result.values,
+ func(p1.values, p2.values))
- # versus non-indexed same objs
- self.assertRaises(Exception, func, p1, tp)
+ # versus non-indexed same objs
+ self.assertRaises(Exception, func, p1, tp)
- # versus different objs
- self.assertRaises(Exception, func, p1, df)
+ # versus different objs
+ self.assertRaises(Exception, func, p1, df)
- # versus scalar
- result3 = func(self.panel, 0)
- self.assert_numpy_array_equal(result3.values,
- func(self.panel.values, 0))
+ # versus scalar
+ result3 = func(self.panel, 0)
+ self.assert_numpy_array_equal(result3.values,
+ func(self.panel.values, 0))
- with np.errstate(invalid='ignore'):
- test_comp(operator.eq)
- test_comp(operator.ne)
- test_comp(operator.lt)
- test_comp(operator.gt)
- test_comp(operator.ge)
- test_comp(operator.le)
+ with np.errstate(invalid='ignore'):
+ test_comp(operator.eq)
+ test_comp(operator.ne)
+ test_comp(operator.lt)
+ test_comp(operator.gt)
+ test_comp(operator.ge)
+ test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
@@ -840,28 +878,26 @@ def test_get_value(self):
self.panel.get_value('a')
def test_set_value(self):
- for item in self.panel.items:
- for mjr in self.panel.major_axis[::2]:
- for mnr in self.panel.minor_axis:
- self.panel.set_value(item, mjr, mnr, 1.)
- assert_almost_equal(self.panel[item][mnr][mjr], 1.)
-
- # resize
- res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
- tm.assertIsInstance(res, Panel)
- self.assertIsNot(res, self.panel)
- self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
-
- res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
- self.assertTrue(is_float_dtype(res3['ItemE'].values))
- with tm.assertRaisesRegexp(TypeError,
- "There must be an argument for each axis"
- " plus the value provided"):
- self.panel.set_value('a')
-
-
-_panel = tm.makePanel()
-tm.add_nans(_panel)
+ with catch_warnings(record=True):
+ for item in self.panel.items:
+ for mjr in self.panel.major_axis[::2]:
+ for mnr in self.panel.minor_axis:
+ self.panel.set_value(item, mjr, mnr, 1.)
+ assert_almost_equal(self.panel[item][mnr][mjr], 1.)
+
+ # resize
+ res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
+ tm.assertIsInstance(res, Panel)
+ self.assertIsNot(res, self.panel)
+ self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
+
+ res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
+ self.assertTrue(is_float_dtype(res3['ItemE'].values))
+ with tm.assertRaisesRegexp(TypeError,
+ "There must be an argument "
+ "for each axis"
+ " plus the value provided"):
+ self.panel.set_value('a')
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
@@ -872,292 +908,315 @@ def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
- self.panel = _panel.copy()
+ self.panel = make_test_panel()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_constructor(self):
- # with BlockManager
- wp = Panel(self.panel._data)
- self.assertIs(wp._data, self.panel._data)
-
- wp = Panel(self.panel._data, copy=True)
- self.assertIsNot(wp._data, self.panel._data)
- assert_panel_equal(wp, self.panel)
-
- # strings handled prop
- wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
- self.assertEqual(wp.values.dtype, np.object_)
-
- vals = self.panel.values
-
- # no copy
- wp = Panel(vals)
- self.assertIs(wp.values, vals)
-
- # copy
- wp = Panel(vals, copy=True)
- self.assertIsNot(wp.values, vals)
-
- # GH #8285, test when scalar data is used to construct a Panel
- # if dtype is not passed, it should be inferred
- value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
- ('foo', np.object_)]
- for (val, dtype) in value_and_dtype:
- wp = Panel(val, items=range(2), major_axis=range(3),
- minor_axis=range(4))
- vals = np.empty((2, 3, 4), dtype=dtype)
- vals.fill(val)
- assert_panel_equal(wp, Panel(vals, dtype=dtype))
-
- # test the case when dtype is passed
- wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
- dtype='float32')
- vals = np.empty((2, 3, 4), dtype='float32')
- vals.fill(1)
- assert_panel_equal(wp, Panel(vals, dtype='float32'))
+ with catch_warnings(record=True):
+ # with BlockManager
+ wp = Panel(self.panel._data)
+ self.assertIs(wp._data, self.panel._data)
+
+ wp = Panel(self.panel._data, copy=True)
+ self.assertIsNot(wp._data, self.panel._data)
+ assert_panel_equal(wp, self.panel)
+
+ # strings handled prop
+ wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
+ self.assertEqual(wp.values.dtype, np.object_)
+
+ vals = self.panel.values
+
+ # no copy
+ wp = Panel(vals)
+ self.assertIs(wp.values, vals)
+
+ # copy
+ wp = Panel(vals, copy=True)
+ self.assertIsNot(wp.values, vals)
+
+ # GH #8285, test when scalar data is used to construct a Panel
+ # if dtype is not passed, it should be inferred
+ value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
+ ('foo', np.object_)]
+ for (val, dtype) in value_and_dtype:
+ wp = Panel(val, items=range(2), major_axis=range(3),
+ minor_axis=range(4))
+ vals = np.empty((2, 3, 4), dtype=dtype)
+ vals.fill(val)
+ assert_panel_equal(wp, Panel(vals, dtype=dtype))
+
+ # test the case when dtype is passed
+ wp = Panel(1, items=range(2), major_axis=range(3),
+ minor_axis=range(4),
+ dtype='float32')
+ vals = np.empty((2, 3, 4), dtype='float32')
+ vals.fill(1)
+ assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
- zero_filled = self.panel.fillna(0)
+ with catch_warnings(record=True):
+ zero_filled = self.panel.fillna(0)
- casted = Panel(zero_filled._data, dtype=int)
- casted2 = Panel(zero_filled.values, dtype=int)
+ casted = Panel(zero_filled._data, dtype=int)
+ casted2 = Panel(zero_filled.values, dtype=int)
- exp_values = zero_filled.values.astype(int)
- assert_almost_equal(casted.values, exp_values)
- assert_almost_equal(casted2.values, exp_values)
+ exp_values = zero_filled.values.astype(int)
+ assert_almost_equal(casted.values, exp_values)
+ assert_almost_equal(casted2.values, exp_values)
- casted = Panel(zero_filled._data, dtype=np.int32)
- casted2 = Panel(zero_filled.values, dtype=np.int32)
+ casted = Panel(zero_filled._data, dtype=np.int32)
+ casted2 = Panel(zero_filled.values, dtype=np.int32)
- exp_values = zero_filled.values.astype(np.int32)
- assert_almost_equal(casted.values, exp_values)
- assert_almost_equal(casted2.values, exp_values)
+ exp_values = zero_filled.values.astype(np.int32)
+ assert_almost_equal(casted.values, exp_values)
+ assert_almost_equal(casted2.values, exp_values)
- # can't cast
- data = [[['foo', 'bar', 'baz']]]
- self.assertRaises(ValueError, Panel, data, dtype=float)
+ # can't cast
+ data = [[['foo', 'bar', 'baz']]]
+ self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
- empty = Panel()
- self.assertEqual(len(empty.items), 0)
- self.assertEqual(len(empty.major_axis), 0)
- self.assertEqual(len(empty.minor_axis), 0)
+ with catch_warnings(record=True):
+ empty = Panel()
+ self.assertEqual(len(empty.items), 0)
+ self.assertEqual(len(empty.major_axis), 0)
+ self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
- # GH #411
- panel = Panel(items=lrange(3), major_axis=lrange(3),
- minor_axis=lrange(3), dtype='O')
- self.assertEqual(panel.values.dtype, np.object_)
+ with catch_warnings(record=True):
+ # GH #411
+ panel = Panel(items=lrange(3), major_axis=lrange(3),
+ minor_axis=lrange(3), dtype='O')
+ self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
- # GH #797
-
- def _check_dtype(panel, dtype):
- for i in panel.items:
- self.assertEqual(panel[i].values.dtype.name, dtype)
-
- # only nan holding types allowed here
- for dtype in ['float64', 'float32', 'object']:
- panel = Panel(items=lrange(2), major_axis=lrange(10),
- minor_axis=lrange(5), dtype=dtype)
- _check_dtype(panel, dtype)
-
- for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
- panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
- items=lrange(2),
- major_axis=lrange(10),
- minor_axis=lrange(5), dtype=dtype)
- _check_dtype(panel, dtype)
-
- for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
- panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
- items=lrange(2),
- major_axis=lrange(10),
- minor_axis=lrange(5), dtype=dtype)
- _check_dtype(panel, dtype)
-
- for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
- panel = Panel(np.random.randn(2, 10, 5), items=lrange(
- 2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
- _check_dtype(panel, dtype)
-
- for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
- df1 = DataFrame(np.random.randn(2, 5),
- index=lrange(2), columns=lrange(5))
- df2 = DataFrame(np.random.randn(2, 5),
- index=lrange(2), columns=lrange(5))
- panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
- _check_dtype(panel, dtype)
+ with catch_warnings(record=True):
+ # GH #797
+
+ def _check_dtype(panel, dtype):
+ for i in panel.items:
+ self.assertEqual(panel[i].values.dtype.name, dtype)
+
+ # only nan holding types allowed here
+ for dtype in ['float64', 'float32', 'object']:
+ panel = Panel(items=lrange(2), major_axis=lrange(10),
+ minor_axis=lrange(5), dtype=dtype)
+ _check_dtype(panel, dtype)
+
+ for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
+ panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
+ items=lrange(2),
+ major_axis=lrange(10),
+ minor_axis=lrange(5), dtype=dtype)
+ _check_dtype(panel, dtype)
+
+ for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
+ panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
+ items=lrange(2),
+ major_axis=lrange(10),
+ minor_axis=lrange(5), dtype=dtype)
+ _check_dtype(panel, dtype)
+
+ for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
+ panel = Panel(
+ np.random.randn(2, 10, 5),
+ items=lrange(2), major_axis=lrange(10),
+ minor_axis=lrange(5),
+ dtype=dtype)
+ _check_dtype(panel, dtype)
+
+ for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
+ df1 = DataFrame(np.random.randn(2, 5),
+ index=lrange(2), columns=lrange(5))
+ df2 = DataFrame(np.random.randn(2, 5),
+ index=lrange(2), columns=lrange(5))
+ panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
+ _check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
- with tm.assertRaisesRegexp(ValueError,
- "The number of dimensions required is 3"):
- Panel(np.random.randn(10, 2))
+ with catch_warnings(record=True):
+ with tm.assertRaisesRegexp(ValueError, "The number of dimensions required is 3"): # noqa
+ Panel(np.random.randn(10, 2))
def test_consolidate(self):
- self.assertTrue(self.panel._data.is_consolidated())
+ with catch_warnings(record=True):
+ self.assertTrue(self.panel._data.is_consolidated())
- self.panel['foo'] = 1.
- self.assertFalse(self.panel._data.is_consolidated())
+ self.panel['foo'] = 1.
+ self.assertFalse(self.panel._data.is_consolidated())
- panel = self.panel._consolidate()
- self.assertTrue(panel._data.is_consolidated())
+ panel = self.panel._consolidate()
+ self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
- itema = self.panel['ItemA']
- itemb = self.panel['ItemB']
+ with catch_warnings(record=True):
+ itema = self.panel['ItemA']
+ itemb = self.panel['ItemB']
- d = {'A': itema, 'B': itemb[5:]}
- d2 = {'A': itema._series, 'B': itemb[5:]._series}
- d3 = {'A': None,
- 'B': DataFrame(itemb[5:]._series),
- 'C': DataFrame(itema._series)}
+ d = {'A': itema, 'B': itemb[5:]}
+ d2 = {'A': itema._series, 'B': itemb[5:]._series}
+ d3 = {'A': None,
+ 'B': DataFrame(itemb[5:]._series),
+ 'C': DataFrame(itema._series)}
- wp = Panel.from_dict(d)
- wp2 = Panel.from_dict(d2) # nested Dict
+ wp = Panel.from_dict(d)
+ wp2 = Panel.from_dict(d2) # nested Dict
- # TODO: unused?
- wp3 = Panel.from_dict(d3) # noqa
+ # TODO: unused?
+ wp3 = Panel.from_dict(d3) # noqa
- self.assert_index_equal(wp.major_axis, self.panel.major_axis)
- assert_panel_equal(wp, wp2)
+ self.assert_index_equal(wp.major_axis, self.panel.major_axis)
+ assert_panel_equal(wp, wp2)
- # intersect
- wp = Panel.from_dict(d, intersect=True)
- self.assert_index_equal(wp.major_axis, itemb.index[5:])
+ # intersect
+ wp = Panel.from_dict(d, intersect=True)
+ self.assert_index_equal(wp.major_axis, itemb.index[5:])
- # use constructor
- assert_panel_equal(Panel(d), Panel.from_dict(d))
- assert_panel_equal(Panel(d2), Panel.from_dict(d2))
- assert_panel_equal(Panel(d3), Panel.from_dict(d3))
+ # use constructor
+ assert_panel_equal(Panel(d), Panel.from_dict(d))
+ assert_panel_equal(Panel(d2), Panel.from_dict(d2))
+ assert_panel_equal(Panel(d3), Panel.from_dict(d3))
- # a pathological case
- d4 = {'A': None, 'B': None}
+ # a pathological case
+ d4 = {'A': None, 'B': None}
- # TODO: unused?
- wp4 = Panel.from_dict(d4) # noqa
+ # TODO: unused?
+ wp4 = Panel.from_dict(d4) # noqa
- assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
+ assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
- # cast
- dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
- for k, v in compat.iteritems(d))
- result = Panel(dcasted, dtype=int)
- expected = Panel(dict((k, v.astype(int))
- for k, v in compat.iteritems(dcasted)))
- assert_panel_equal(result, expected)
+ # cast
+ dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
+ for k, v in compat.iteritems(d))
+ result = Panel(dcasted, dtype=int)
+ expected = Panel(dict((k, v.astype(int))
+ for k, v in compat.iteritems(dcasted)))
+ assert_panel_equal(result, expected)
- result = Panel(dcasted, dtype=np.int32)
- expected = Panel(dict((k, v.astype(np.int32))
- for k, v in compat.iteritems(dcasted)))
- assert_panel_equal(result, expected)
+ result = Panel(dcasted, dtype=np.int32)
+ expected = Panel(dict((k, v.astype(np.int32))
+ for k, v in compat.iteritems(dcasted)))
+ assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
- data = dict((k, v.values) for k, v in self.panel.iteritems())
- result = Panel(data)
- exp_major = Index(np.arange(len(self.panel.major_axis)))
- self.assert_index_equal(result.major_axis, exp_major)
+ with catch_warnings(record=True):
+ data = dict((k, v.values) for k, v in self.panel.iteritems())
+ result = Panel(data)
+ exp_major = Index(np.arange(len(self.panel.major_axis)))
+ self.assert_index_equal(result.major_axis, exp_major)
- result = Panel(data, items=self.panel.items,
- major_axis=self.panel.major_axis,
- minor_axis=self.panel.minor_axis)
- assert_panel_equal(result, self.panel)
+ result = Panel(data, items=self.panel.items,
+ major_axis=self.panel.major_axis,
+ minor_axis=self.panel.minor_axis)
+ assert_panel_equal(result, self.panel)
- data['ItemC'] = self.panel['ItemC']
- result = Panel(data)
- assert_panel_equal(result, self.panel)
+ data['ItemC'] = self.panel['ItemC']
+ result = Panel(data)
+ assert_panel_equal(result, self.panel)
- # corner, blow up
- data['ItemB'] = data['ItemB'][:-1]
- self.assertRaises(Exception, Panel, data)
+ # corner, blow up
+ data['ItemB'] = data['ItemB'][:-1]
+ self.assertRaises(Exception, Panel, data)
- data['ItemB'] = self.panel['ItemB'].values[:, :-1]
- self.assertRaises(Exception, Panel, data)
+ data['ItemB'] = self.panel['ItemB'].values[:, :-1]
+ self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
- keys = list(set(np.random.randint(0, 5000, 100)))[
- :50] # unique random int keys
- d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
- p = Panel(d)
- self.assertTrue(list(p.items) == keys)
+ with catch_warnings(record=True):
+ keys = list(set(np.random.randint(0, 5000, 100)))[
+ :50] # unique random int keys
+ d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
+ p = Panel(d)
+ self.assertTrue(list(p.items) == keys)
- p = Panel.from_dict(d)
- self.assertTrue(list(p.items) == keys)
+ p = Panel.from_dict(d)
+ self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
- data = self.panel._data
- items = self.panel.items[:-1]
- major = self.panel.major_axis[:-1]
- minor = self.panel.minor_axis[:-1]
-
- result = Panel(data, items=items, major_axis=major, minor_axis=minor)
- expected = self.panel.reindex(items=items, major=major, minor=minor)
- assert_panel_equal(result, expected)
+ with catch_warnings(record=True):
+ data = self.panel._data
+ items = self.panel.items[:-1]
+ major = self.panel.major_axis[:-1]
+ minor = self.panel.minor_axis[:-1]
+
+ result = Panel(data, items=items,
+ major_axis=major, minor_axis=minor)
+ expected = self.panel.reindex(
+ items=items, major=major, minor=minor)
+ assert_panel_equal(result, expected)
- result = Panel(data, items=items, major_axis=major)
- expected = self.panel.reindex(items=items, major=major)
- assert_panel_equal(result, expected)
+ result = Panel(data, items=items, major_axis=major)
+ expected = self.panel.reindex(items=items, major=major)
+ assert_panel_equal(result, expected)
- result = Panel(data, items=items)
- expected = self.panel.reindex(items=items)
- assert_panel_equal(result, expected)
+ result = Panel(data, items=items)
+ expected = self.panel.reindex(items=items)
+ assert_panel_equal(result, expected)
- result = Panel(data, minor_axis=minor)
- expected = self.panel.reindex(minor=minor)
- assert_panel_equal(result, expected)
+ result = Panel(data, minor_axis=minor)
+ expected = self.panel.reindex(minor=minor)
+ assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
- df = tm.makeDataFrame()
- df['foo'] = 'bar'
+ with catch_warnings(record=True):
+ df = tm.makeDataFrame()
+ df['foo'] = 'bar'
- data = {'k1': df, 'k2': df}
+ data = {'k1': df, 'k2': df}
- panel = Panel.from_dict(data, orient='minor')
+ panel = Panel.from_dict(data, orient='minor')
- self.assertEqual(panel['foo'].values.dtype, np.object_)
- self.assertEqual(panel['A'].values.dtype, np.float64)
+ self.assertEqual(panel['foo'].values.dtype, np.object_)
+ self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
- def testit():
- Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
+ with catch_warnings(record=True):
+ def testit():
+ Panel(np.random.randn(3, 4, 5),
+ lrange(4), lrange(5), lrange(5))
- assertRaisesRegexp(ValueError,
- r"Shape of passed values is \(3, 4, 5\), "
- r"indices imply \(4, 5, 5\)",
- testit)
+ assertRaisesRegexp(ValueError,
+ r"Shape of passed values is \(3, 4, 5\), "
+ r"indices imply \(4, 5, 5\)",
+ testit)
- def testit():
- Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
+ def testit():
+ Panel(np.random.randn(3, 4, 5),
+ lrange(5), lrange(4), lrange(5))
- assertRaisesRegexp(ValueError,
- r"Shape of passed values is \(3, 4, 5\), "
- r"indices imply \(5, 4, 5\)",
- testit)
+ assertRaisesRegexp(ValueError,
+ r"Shape of passed values is \(3, 4, 5\), "
+ r"indices imply \(5, 4, 5\)",
+ testit)
- def testit():
- Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
+ def testit():
+ Panel(np.random.randn(3, 4, 5),
+ lrange(5), lrange(5), lrange(4))
- assertRaisesRegexp(ValueError,
- r"Shape of passed values is \(3, 4, 5\), "
- r"indices imply \(5, 5, 4\)",
- testit)
+ assertRaisesRegexp(ValueError,
+ r"Shape of passed values is \(3, 4, 5\), "
+ r"indices imply \(5, 5, 4\)",
+ testit)
def test_conform(self):
- df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
- conformed = self.panel.conform(df)
+ with catch_warnings(record=True):
+ df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
+ conformed = self.panel.conform(df)
- tm.assert_index_equal(conformed.index, self.panel.major_axis)
- tm.assert_index_equal(conformed.columns, self.panel.minor_axis)
+ tm.assert_index_equal(conformed.index, self.panel.major_axis)
+ tm.assert_index_equal(conformed.columns, self.panel.minor_axis)
def test_convert_objects(self):
+ with catch_warnings(record=True):
- # GH 4937
- p = Panel(dict(A=dict(a=['1', '1.0'])))
- expected = Panel(dict(A=dict(a=[1, 1.0])))
- result = p._convert(numeric=True, coerce=True)
- assert_panel_equal(result, expected)
+ # GH 4937
+ p = Panel(dict(A=dict(a=['1', '1.0'])))
+ expected = Panel(dict(A=dict(a=[1, 1.0])))
+ result = p._convert(numeric=True, coerce=True)
+ assert_panel_equal(result, expected)
def test_dtypes(self):
@@ -1166,875 +1225,940 @@ def test_dtypes(self):
assert_series_equal(result, expected)
def test_astype(self):
- # GH7271
- data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
- panel = Panel(data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
+ with catch_warnings(record=True):
+ # GH7271
+ data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
+ panel = Panel(data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
- str_data = np.array([[['1', '2'], ['3', '4']],
- [['5', '6'], ['7', '8']]])
- expected = Panel(str_data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
- assert_panel_equal(panel.astype(str), expected)
+ str_data = np.array([[['1', '2'], ['3', '4']],
+ [['5', '6'], ['7', '8']]])
+ expected = Panel(str_data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
+ assert_panel_equal(panel.astype(str), expected)
- self.assertRaises(NotImplementedError, panel.astype, {0: str})
+ self.assertRaises(NotImplementedError, panel.astype, {0: str})
def test_apply(self):
- # GH1148
-
- # ufunc
- applied = self.panel.apply(np.sqrt)
- with np.errstate(invalid='ignore'):
- expected = np.sqrt(self.panel.values)
- assert_almost_equal(applied.values, expected)
-
- # ufunc same shape
- result = self.panel.apply(lambda x: x * 2, axis='items')
- expected = self.panel * 2
- assert_panel_equal(result, expected)
- result = self.panel.apply(lambda x: x * 2, axis='major_axis')
- expected = self.panel * 2
- assert_panel_equal(result, expected)
- result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
- expected = self.panel * 2
- assert_panel_equal(result, expected)
-
- # reduction to DataFrame
- result = self.panel.apply(lambda x: x.dtype, axis='items')
- expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
- columns=self.panel.minor_axis)
- assert_frame_equal(result, expected)
- result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
- expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
- columns=self.panel.items)
- assert_frame_equal(result, expected)
- result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
- expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
- columns=self.panel.items)
- assert_frame_equal(result, expected)
+ with catch_warnings(record=True):
+ # GH1148
- # reductions via other dims
- expected = self.panel.sum(0)
- result = self.panel.apply(lambda x: x.sum(), axis='items')
- assert_frame_equal(result, expected)
- expected = self.panel.sum(1)
- result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
- assert_frame_equal(result, expected)
- expected = self.panel.sum(2)
- result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
- assert_frame_equal(result, expected)
+ # ufunc
+ applied = self.panel.apply(np.sqrt)
+ with np.errstate(invalid='ignore'):
+ expected = np.sqrt(self.panel.values)
+ assert_almost_equal(applied.values, expected)
- # pass kwargs
- result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
- expected = self.panel.sum(0) + 5
- assert_frame_equal(result, expected)
+ # ufunc same shape
+ result = self.panel.apply(lambda x: x * 2, axis='items')
+ expected = self.panel * 2
+ assert_panel_equal(result, expected)
+ result = self.panel.apply(lambda x: x * 2, axis='major_axis')
+ expected = self.panel * 2
+ assert_panel_equal(result, expected)
+ result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
+ expected = self.panel * 2
+ assert_panel_equal(result, expected)
+
+ # reduction to DataFrame
+ result = self.panel.apply(lambda x: x.dtype, axis='items')
+ expected = DataFrame(np.dtype('float64'),
+ index=self.panel.major_axis,
+ columns=self.panel.minor_axis)
+ assert_frame_equal(result, expected)
+ result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
+ expected = DataFrame(np.dtype('float64'),
+ index=self.panel.minor_axis,
+ columns=self.panel.items)
+ assert_frame_equal(result, expected)
+ result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
+ expected = DataFrame(np.dtype('float64'),
+ index=self.panel.major_axis,
+ columns=self.panel.items)
+ assert_frame_equal(result, expected)
+
+ # reductions via other dims
+ expected = self.panel.sum(0)
+ result = self.panel.apply(lambda x: x.sum(), axis='items')
+ assert_frame_equal(result, expected)
+ expected = self.panel.sum(1)
+ result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
+ assert_frame_equal(result, expected)
+ expected = self.panel.sum(2)
+ result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
+ assert_frame_equal(result, expected)
+
+ # pass kwargs
+ result = self.panel.apply(
+ lambda x, y: x.sum() + y, axis='items', y=5)
+ expected = self.panel.sum(0) + 5
+ assert_frame_equal(result, expected)
def test_apply_slabs(self):
+ with catch_warnings(record=True):
- # same shape as original
- result = self.panel.apply(lambda x: x * 2,
- axis=['items', 'major_axis'])
- expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
- 'items')
- assert_panel_equal(result, expected)
- result = self.panel.apply(lambda x: x * 2,
- axis=['major_axis', 'items'])
- assert_panel_equal(result, expected)
-
- result = self.panel.apply(lambda x: x * 2,
- axis=['items', 'minor_axis'])
- expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
- 'items')
- assert_panel_equal(result, expected)
- result = self.panel.apply(lambda x: x * 2,
- axis=['minor_axis', 'items'])
- assert_panel_equal(result, expected)
-
- result = self.panel.apply(lambda x: x * 2,
- axis=['major_axis', 'minor_axis'])
- expected = self.panel * 2
- assert_panel_equal(result, expected)
- result = self.panel.apply(lambda x: x * 2,
- axis=['minor_axis', 'major_axis'])
- assert_panel_equal(result, expected)
-
- # reductions
- result = self.panel.apply(lambda x: x.sum(0), axis=[
- 'items', 'major_axis'
- ])
- expected = self.panel.sum(1).T
- assert_frame_equal(result, expected)
+ # same shape as original
+ result = self.panel.apply(lambda x: x * 2,
+ axis=['items', 'major_axis'])
+ expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
+ 'items')
+ assert_panel_equal(result, expected)
+ result = self.panel.apply(lambda x: x * 2,
+ axis=['major_axis', 'items'])
+ assert_panel_equal(result, expected)
- result = self.panel.apply(lambda x: x.sum(1), axis=[
- 'items', 'major_axis'
- ])
- expected = self.panel.sum(0)
- assert_frame_equal(result, expected)
+ result = self.panel.apply(lambda x: x * 2,
+ axis=['items', 'minor_axis'])
+ expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
+ 'items')
+ assert_panel_equal(result, expected)
+ result = self.panel.apply(lambda x: x * 2,
+ axis=['minor_axis', 'items'])
+ assert_panel_equal(result, expected)
+
+ result = self.panel.apply(lambda x: x * 2,
+ axis=['major_axis', 'minor_axis'])
+ expected = self.panel * 2
+ assert_panel_equal(result, expected)
+ result = self.panel.apply(lambda x: x * 2,
+ axis=['minor_axis', 'major_axis'])
+ assert_panel_equal(result, expected)
- # transforms
- f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
+ # reductions
+ result = self.panel.apply(lambda x: x.sum(0), axis=[
+ 'items', 'major_axis'
+ ])
+ expected = self.panel.sum(1).T
+ assert_frame_equal(result, expected)
# make sure that we don't trigger any warnings
with catch_warnings(record=True):
+ result = self.panel.apply(lambda x: x.sum(1), axis=[
+ 'items', 'major_axis'
+ ])
+ expected = self.panel.sum(0)
+ assert_frame_equal(result, expected)
+
+ # transforms
+ f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
+
+ # make sure that we don't trigger any warnings
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
- result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
- expected = Panel(dict([(ax, f(self.panel.loc[ax]))
- for ax in self.panel.items]))
- assert_panel_equal(result, expected)
-
- result = self.panel.apply(f, axis=['minor_axis', 'items'])
- expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
- for ax in self.panel.major_axis]))
- assert_panel_equal(result, expected)
-
- # with multi-indexes
- # GH7469
- index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
- 'two', 'a'), ('two', 'b')])
- dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
- 4, 3), columns=list("ABC"), index=index)
- dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
- 4, 3), columns=list("ABC"), index=index)
- p = Panel({'f': dfa, 'g': dfb})
- result = p.apply(lambda x: x.sum(), axis=0)
-
- # on windows this will be in32
- result = result.astype('int64')
- expected = p.sum(0)
- assert_frame_equal(result, expected)
+ result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
+ expected = Panel(dict([(ax, f(self.panel.loc[ax]))
+ for ax in self.panel.items]))
+ assert_panel_equal(result, expected)
+
+ result = self.panel.apply(f, axis=['minor_axis', 'items'])
+ expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
+ for ax in self.panel.major_axis]))
+ assert_panel_equal(result, expected)
+
+ # with multi-indexes
+ # GH7469
+ index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
+ 'two', 'a'), ('two', 'b')])
+ dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
+ 4, 3), columns=list("ABC"), index=index)
+ dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
+ 4, 3), columns=list("ABC"), index=index)
+ p = Panel({'f': dfa, 'g': dfb})
+ result = p.apply(lambda x: x.sum(), axis=0)
+
+ # on windows this will be in32
+ result = result.astype('int64')
+ expected = p.sum(0)
+ assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
- # GH10332
- self.panel = Panel(np.random.rand(5, 5, 5))
+ with catch_warnings(record=True):
+ # GH10332
+ self.panel = Panel(np.random.rand(5, 5, 5))
- result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
- result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
- result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
- result_float64 = self.panel.apply(lambda df: np.float64(0.0),
- axis=[1, 2])
+ result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
+ result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
+ result_int64 = self.panel.apply(
+ lambda df: np.int64(0), axis=[1, 2])
+ result_float64 = self.panel.apply(lambda df: np.float64(0.0),
+ axis=[1, 2])
- expected_int = expected_int64 = Series([0] * 5)
- expected_float = expected_float64 = Series([0.0] * 5)
+ expected_int = expected_int64 = Series([0] * 5)
+ expected_float = expected_float64 = Series([0.0] * 5)
- assert_series_equal(result_int, expected_int)
- assert_series_equal(result_int64, expected_int64)
- assert_series_equal(result_float, expected_float)
- assert_series_equal(result_float64, expected_float64)
+ assert_series_equal(result_int, expected_int)
+ assert_series_equal(result_int64, expected_int64)
+ assert_series_equal(result_float, expected_float)
+ assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
- ref = self.panel['ItemB']
+ with catch_warnings(record=True):
+ ref = self.panel['ItemB']
- # items
- result = self.panel.reindex(items=['ItemA', 'ItemB'])
- assert_frame_equal(result['ItemB'], ref)
+ # items
+ result = self.panel.reindex(items=['ItemA', 'ItemB'])
+ assert_frame_equal(result['ItemB'], ref)
- # major
- new_major = list(self.panel.major_axis[:10])
- result = self.panel.reindex(major=new_major)
- assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
+ # major
+ new_major = list(self.panel.major_axis[:10])
+ result = self.panel.reindex(major=new_major)
+ assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
- # raise exception put both major and major_axis
- self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
- major=new_major)
+ # raise exception put both major and major_axis
+ self.assertRaises(Exception, self.panel.reindex,
+ major_axis=new_major,
+ major=new_major)
- # minor
- new_minor = list(self.panel.minor_axis[:2])
- result = self.panel.reindex(minor=new_minor)
- assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
+ # minor
+ new_minor = list(self.panel.minor_axis[:2])
+ result = self.panel.reindex(minor=new_minor)
+ assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
- # this ok
- result = self.panel.reindex()
- assert_panel_equal(result, self.panel)
- self.assertFalse(result is self.panel)
+ # this ok
+ result = self.panel.reindex()
+ assert_panel_equal(result, self.panel)
+ self.assertFalse(result is self.panel)
- # with filling
- smaller_major = self.panel.major_axis[::5]
- smaller = self.panel.reindex(major=smaller_major)
+ # with filling
+ smaller_major = self.panel.major_axis[::5]
+ smaller = self.panel.reindex(major=smaller_major)
- larger = smaller.reindex(major=self.panel.major_axis, method='pad')
+ larger = smaller.reindex(major=self.panel.major_axis, method='pad')
- assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
- smaller.major_xs(smaller_major[0]))
+ assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
+ smaller.major_xs(smaller_major[0]))
- # don't necessarily copy
- result = self.panel.reindex(major=self.panel.major_axis, copy=False)
- assert_panel_equal(result, self.panel)
- self.assertTrue(result is self.panel)
+ # don't necessarily copy
+ result = self.panel.reindex(
+ major=self.panel.major_axis, copy=False)
+ assert_panel_equal(result, self.panel)
+ self.assertTrue(result is self.panel)
def test_reindex_multi(self):
+ with catch_warnings(record=True):
- # with and without copy full reindexing
- result = self.panel.reindex(items=self.panel.items,
- major=self.panel.major_axis,
- minor=self.panel.minor_axis, copy=False)
-
- self.assertIs(result.items, self.panel.items)
- self.assertIs(result.major_axis, self.panel.major_axis)
- self.assertIs(result.minor_axis, self.panel.minor_axis)
-
- result = self.panel.reindex(items=self.panel.items,
- major=self.panel.major_axis,
- minor=self.panel.minor_axis, copy=False)
- assert_panel_equal(result, self.panel)
-
- # multi-axis indexing consistency
- # GH 5900
- df = DataFrame(np.random.randn(4, 3))
- p = Panel({'Item1': df})
- expected = Panel({'Item1': df})
- expected['Item2'] = np.nan
-
- items = ['Item1', 'Item2']
- major_axis = np.arange(4)
- minor_axis = np.arange(3)
-
- results = []
- results.append(p.reindex(items=items, major_axis=major_axis,
- copy=True))
- results.append(p.reindex(items=items, major_axis=major_axis,
- copy=False))
- results.append(p.reindex(items=items, minor_axis=minor_axis,
- copy=True))
- results.append(p.reindex(items=items, minor_axis=minor_axis,
- copy=False))
- results.append(p.reindex(items=items, major_axis=major_axis,
- minor_axis=minor_axis, copy=True))
- results.append(p.reindex(items=items, major_axis=major_axis,
- minor_axis=minor_axis, copy=False))
-
- for i, r in enumerate(results):
- assert_panel_equal(expected, r)
+ # with and without copy full reindexing
+ result = self.panel.reindex(
+ items=self.panel.items,
+ major=self.panel.major_axis,
+ minor=self.panel.minor_axis, copy=False)
+
+ self.assertIs(result.items, self.panel.items)
+ self.assertIs(result.major_axis, self.panel.major_axis)
+ self.assertIs(result.minor_axis, self.panel.minor_axis)
+
+ result = self.panel.reindex(
+ items=self.panel.items,
+ major=self.panel.major_axis,
+ minor=self.panel.minor_axis, copy=False)
+ assert_panel_equal(result, self.panel)
+
+ # multi-axis indexing consistency
+ # GH 5900
+ df = DataFrame(np.random.randn(4, 3))
+ p = Panel({'Item1': df})
+ expected = Panel({'Item1': df})
+ expected['Item2'] = np.nan
+
+ items = ['Item1', 'Item2']
+ major_axis = np.arange(4)
+ minor_axis = np.arange(3)
+
+ results = []
+ results.append(p.reindex(items=items, major_axis=major_axis,
+ copy=True))
+ results.append(p.reindex(items=items, major_axis=major_axis,
+ copy=False))
+ results.append(p.reindex(items=items, minor_axis=minor_axis,
+ copy=True))
+ results.append(p.reindex(items=items, minor_axis=minor_axis,
+ copy=False))
+ results.append(p.reindex(items=items, major_axis=major_axis,
+ minor_axis=minor_axis, copy=True))
+ results.append(p.reindex(items=items, major_axis=major_axis,
+ minor_axis=minor_axis, copy=False))
+
+ for i, r in enumerate(results):
+ assert_panel_equal(expected, r)
def test_reindex_like(self):
- # reindex_like
- smaller = self.panel.reindex(items=self.panel.items[:-1],
- major=self.panel.major_axis[:-1],
- minor=self.panel.minor_axis[:-1])
- smaller_like = self.panel.reindex_like(smaller)
- assert_panel_equal(smaller, smaller_like)
+ with catch_warnings(record=True):
+ # reindex_like
+ smaller = self.panel.reindex(items=self.panel.items[:-1],
+ major=self.panel.major_axis[:-1],
+ minor=self.panel.minor_axis[:-1])
+ smaller_like = self.panel.reindex_like(smaller)
+ assert_panel_equal(smaller, smaller_like)
def test_take(self):
- # axis == 0
- result = self.panel.take([2, 0, 1], axis=0)
- expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
- assert_panel_equal(result, expected)
+ with catch_warnings(record=True):
+ # axis == 0
+ result = self.panel.take([2, 0, 1], axis=0)
+ expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
+ assert_panel_equal(result, expected)
- # axis >= 1
- result = self.panel.take([3, 0, 1, 2], axis=2)
- expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
- assert_panel_equal(result, expected)
+ # axis >= 1
+ result = self.panel.take([3, 0, 1, 2], axis=2)
+ expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
+ assert_panel_equal(result, expected)
- # neg indicies ok
- expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
- result = self.panel.take([3, -1, 1, 2], axis=2)
- assert_panel_equal(result, expected)
+ # neg indicies ok
+ expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
+ result = self.panel.take([3, -1, 1, 2], axis=2)
+ assert_panel_equal(result, expected)
- self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
+ self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
- import random
-
- ritems = list(self.panel.items)
- rmajor = list(self.panel.major_axis)
- rminor = list(self.panel.minor_axis)
- random.shuffle(ritems)
- random.shuffle(rmajor)
- random.shuffle(rminor)
-
- random_order = self.panel.reindex(items=ritems)
- sorted_panel = random_order.sort_index(axis=0)
- assert_panel_equal(sorted_panel, self.panel)
-
- # descending
- random_order = self.panel.reindex(items=ritems)
- sorted_panel = random_order.sort_index(axis=0, ascending=False)
- assert_panel_equal(sorted_panel,
- self.panel.reindex(items=self.panel.items[::-1]))
-
- random_order = self.panel.reindex(major=rmajor)
- sorted_panel = random_order.sort_index(axis=1)
- assert_panel_equal(sorted_panel, self.panel)
-
- random_order = self.panel.reindex(minor=rminor)
- sorted_panel = random_order.sort_index(axis=2)
- assert_panel_equal(sorted_panel, self.panel)
+ with catch_warnings(record=True):
+ import random
+
+ ritems = list(self.panel.items)
+ rmajor = list(self.panel.major_axis)
+ rminor = list(self.panel.minor_axis)
+ random.shuffle(ritems)
+ random.shuffle(rmajor)
+ random.shuffle(rminor)
+
+ random_order = self.panel.reindex(items=ritems)
+ sorted_panel = random_order.sort_index(axis=0)
+ assert_panel_equal(sorted_panel, self.panel)
+
+ # descending
+ random_order = self.panel.reindex(items=ritems)
+ sorted_panel = random_order.sort_index(axis=0, ascending=False)
+ assert_panel_equal(
+ sorted_panel,
+ self.panel.reindex(items=self.panel.items[::-1]))
+
+ random_order = self.panel.reindex(major=rmajor)
+ sorted_panel = random_order.sort_index(axis=1)
+ assert_panel_equal(sorted_panel, self.panel)
+
+ random_order = self.panel.reindex(minor=rminor)
+ sorted_panel = random_order.sort_index(axis=2)
+ assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
- filled = self.panel.fillna(0)
- self.assertTrue(np.isfinite(filled.values).all())
-
- filled = self.panel.fillna(method='backfill')
- assert_frame_equal(filled['ItemA'],
- self.panel['ItemA'].fillna(method='backfill'))
-
- panel = self.panel.copy()
- panel['str'] = 'foo'
-
- filled = panel.fillna(method='backfill')
- assert_frame_equal(filled['ItemA'],
- panel['ItemA'].fillna(method='backfill'))
-
- empty = self.panel.reindex(items=[])
- filled = empty.fillna(0)
- assert_panel_equal(filled, empty)
-
- self.assertRaises(ValueError, self.panel.fillna)
- self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
-
- self.assertRaises(TypeError, self.panel.fillna, [1, 2])
- self.assertRaises(TypeError, self.panel.fillna, (1, 2))
-
- # limit not implemented when only value is specified
- p = Panel(np.random.randn(3, 4, 5))
- p.iloc[0:2, 0:2, 0:2] = np.nan
- self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
-
- # Test in place fillNA
- # Expected result
- expected = Panel([[[0, 1], [2, 1]], [[10, 11], [12, 11]]],
- items=['a', 'b'], minor_axis=['x', 'y'],
- dtype=np.float64)
- # method='ffill'
- p1 = Panel([[[0, 1], [2, np.nan]], [[10, 11], [12, np.nan]]],
- items=['a', 'b'], minor_axis=['x', 'y'],
- dtype=np.float64)
- p1.fillna(method='ffill', inplace=True)
- assert_panel_equal(p1, expected)
-
- # method='bfill'
- p2 = Panel([[[0, np.nan], [2, 1]], [[10, np.nan], [12, 11]]],
- items=['a', 'b'], minor_axis=['x', 'y'], dtype=np.float64)
- p2.fillna(method='bfill', inplace=True)
- assert_panel_equal(p2, expected)
+ with catch_warnings(record=True):
+ filled = self.panel.fillna(0)
+ self.assertTrue(np.isfinite(filled.values).all())
+
+ filled = self.panel.fillna(method='backfill')
+ assert_frame_equal(filled['ItemA'],
+ self.panel['ItemA'].fillna(method='backfill'))
+
+ panel = self.panel.copy()
+ panel['str'] = 'foo'
+
+ filled = panel.fillna(method='backfill')
+ assert_frame_equal(filled['ItemA'],
+ panel['ItemA'].fillna(method='backfill'))
+
+ empty = self.panel.reindex(items=[])
+ filled = empty.fillna(0)
+ assert_panel_equal(filled, empty)
+
+ self.assertRaises(ValueError, self.panel.fillna)
+ self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
+
+ self.assertRaises(TypeError, self.panel.fillna, [1, 2])
+ self.assertRaises(TypeError, self.panel.fillna, (1, 2))
+
+ # limit not implemented when only value is specified
+ p = Panel(np.random.randn(3, 4, 5))
+ p.iloc[0:2, 0:2, 0:2] = np.nan
+ self.assertRaises(NotImplementedError,
+ lambda: p.fillna(999, limit=1))
+
+ # Test in place fillNA
+ # Expected result
+ expected = Panel([[[0, 1], [2, 1]], [[10, 11], [12, 11]]],
+ items=['a', 'b'], minor_axis=['x', 'y'],
+ dtype=np.float64)
+ # method='ffill'
+ p1 = Panel([[[0, 1], [2, np.nan]], [[10, 11], [12, np.nan]]],
+ items=['a', 'b'], minor_axis=['x', 'y'],
+ dtype=np.float64)
+ p1.fillna(method='ffill', inplace=True)
+ assert_panel_equal(p1, expected)
+
+ # method='bfill'
+ p2 = Panel([[[0, np.nan], [2, 1]], [[10, np.nan], [12, 11]]],
+ items=['a', 'b'], minor_axis=['x', 'y'],
+ dtype=np.float64)
+ p2.fillna(method='bfill', inplace=True)
+ assert_panel_equal(p2, expected)
def test_ffill_bfill(self):
- assert_panel_equal(self.panel.ffill(),
- self.panel.fillna(method='ffill'))
- assert_panel_equal(self.panel.bfill(),
- self.panel.fillna(method='bfill'))
+ with catch_warnings(record=True):
+ assert_panel_equal(self.panel.ffill(),
+ self.panel.fillna(method='ffill'))
+ assert_panel_equal(self.panel.bfill(),
+ self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
- # #1823
- result = self.panel.truncate(before=None, after=None, axis='items')
+ with catch_warnings(record=True):
+ # #1823
+ result = self.panel.truncate(before=None, after=None, axis='items')
- # it works!
- result.fillna(value=0.0)
+ # it works!
+ result.fillna(value=0.0)
def test_swapaxes(self):
- result = self.panel.swapaxes('items', 'minor')
- self.assertIs(result.items, self.panel.minor_axis)
+ with catch_warnings(record=True):
+ result = self.panel.swapaxes('items', 'minor')
+ self.assertIs(result.items, self.panel.minor_axis)
- result = self.panel.swapaxes('items', 'major')
- self.assertIs(result.items, self.panel.major_axis)
+ result = self.panel.swapaxes('items', 'major')
+ self.assertIs(result.items, self.panel.major_axis)
- result = self.panel.swapaxes('major', 'minor')
- self.assertIs(result.major_axis, self.panel.minor_axis)
+ result = self.panel.swapaxes('major', 'minor')
+ self.assertIs(result.major_axis, self.panel.minor_axis)
- panel = self.panel.copy()
- result = panel.swapaxes('major', 'minor')
- panel.values[0, 0, 1] = np.nan
- expected = panel.swapaxes('major', 'minor')
- assert_panel_equal(result, expected)
+ panel = self.panel.copy()
+ result = panel.swapaxes('major', 'minor')
+ panel.values[0, 0, 1] = np.nan
+ expected = panel.swapaxes('major', 'minor')
+ assert_panel_equal(result, expected)
- # this should also work
- result = self.panel.swapaxes(0, 1)
- self.assertIs(result.items, self.panel.major_axis)
+ # this should also work
+ result = self.panel.swapaxes(0, 1)
+ self.assertIs(result.items, self.panel.major_axis)
- # this works, but return a copy
- result = self.panel.swapaxes('items', 'items')
- assert_panel_equal(self.panel, result)
- self.assertNotEqual(id(self.panel), id(result))
+ # this works, but return a copy
+ result = self.panel.swapaxes('items', 'items')
+ assert_panel_equal(self.panel, result)
+ self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
- result = self.panel.transpose('minor', 'major', 'items')
- expected = self.panel.swapaxes('items', 'minor')
- assert_panel_equal(result, expected)
-
- # test kwargs
- result = self.panel.transpose(items='minor', major='major',
- minor='items')
- expected = self.panel.swapaxes('items', 'minor')
- assert_panel_equal(result, expected)
-
- # text mixture of args
- result = self.panel.transpose('minor', major='major', minor='items')
- expected = self.panel.swapaxes('items', 'minor')
- assert_panel_equal(result, expected)
-
- result = self.panel.transpose('minor', 'major', minor='items')
- expected = self.panel.swapaxes('items', 'minor')
- assert_panel_equal(result, expected)
-
- # duplicate axes
- with tm.assertRaisesRegexp(TypeError,
- 'not enough/duplicate arguments'):
- self.panel.transpose('minor', maj='major', minor='items')
+ with catch_warnings(record=True):
+ result = self.panel.transpose('minor', 'major', 'items')
+ expected = self.panel.swapaxes('items', 'minor')
+ assert_panel_equal(result, expected)
- with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
- self.panel.transpose('minor', 'major', major='minor',
- minor='items')
+ # test kwargs
+ result = self.panel.transpose(items='minor', major='major',
+ minor='items')
+ expected = self.panel.swapaxes('items', 'minor')
+ assert_panel_equal(result, expected)
- result = self.panel.transpose(2, 1, 0)
- assert_panel_equal(result, expected)
+ # text mixture of args
+ result = self.panel.transpose(
+ 'minor', major='major', minor='items')
+ expected = self.panel.swapaxes('items', 'minor')
+ assert_panel_equal(result, expected)
- result = self.panel.transpose('minor', 'items', 'major')
- expected = self.panel.swapaxes('items', 'minor')
- expected = expected.swapaxes('major', 'minor')
- assert_panel_equal(result, expected)
+ result = self.panel.transpose('minor',
+ 'major',
+ minor='items')
+ expected = self.panel.swapaxes('items', 'minor')
+ assert_panel_equal(result, expected)
- result = self.panel.transpose(2, 0, 1)
- assert_panel_equal(result, expected)
+ # duplicate axes
+ with tm.assertRaisesRegexp(TypeError,
+ 'not enough/duplicate arguments'):
+ self.panel.transpose('minor', maj='major', minor='items')
- self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
+ with tm.assertRaisesRegexp(ValueError,
+ 'repeated axis in transpose'):
+ self.panel.transpose('minor', 'major', major='minor',
+ minor='items')
+
+ result = self.panel.transpose(2, 1, 0)
+ assert_panel_equal(result, expected)
+
+ result = self.panel.transpose('minor', 'items', 'major')
+ expected = self.panel.swapaxes('items', 'minor')
+ expected = expected.swapaxes('major', 'minor')
+ assert_panel_equal(result, expected)
+
+ result = self.panel.transpose(2, 0, 1)
+ assert_panel_equal(result, expected)
+
+ self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
- panel = self.panel.copy()
- result = panel.transpose(2, 0, 1, copy=True)
- expected = panel.swapaxes('items', 'minor')
- expected = expected.swapaxes('major', 'minor')
- assert_panel_equal(result, expected)
+ with catch_warnings(record=True):
+ panel = self.panel.copy()
+ result = panel.transpose(2, 0, 1, copy=True)
+ expected = panel.swapaxes('items', 'minor')
+ expected = expected.swapaxes('major', 'minor')
+ assert_panel_equal(result, expected)
- panel.values[0, 1, 1] = np.nan
- self.assertTrue(notnull(result.values[1, 0, 1]))
+ panel.values[0, 1, 1] = np.nan
+ self.assertTrue(notnull(result.values[1, 0, 1]))
def test_to_frame(self):
- # filtered
- filtered = self.panel.to_frame()
- expected = self.panel.to_frame().dropna(how='any')
- assert_frame_equal(filtered, expected)
-
- # unfiltered
- unfiltered = self.panel.to_frame(filter_observations=False)
- assert_panel_equal(unfiltered.to_panel(), self.panel)
-
- # names
- self.assertEqual(unfiltered.index.names, ('major', 'minor'))
-
- # unsorted, round trip
- df = self.panel.to_frame(filter_observations=False)
- unsorted = df.take(np.random.permutation(len(df)))
- pan = unsorted.to_panel()
- assert_panel_equal(pan, self.panel)
-
- # preserve original index names
- df = DataFrame(np.random.randn(6, 2),
- index=[['a', 'a', 'b', 'b', 'c', 'c'],
- [0, 1, 0, 1, 0, 1]],
- columns=['one', 'two'])
- df.index.names = ['foo', 'bar']
- df.columns.name = 'baz'
-
- rdf = df.to_panel().to_frame()
- self.assertEqual(rdf.index.names, df.index.names)
- self.assertEqual(rdf.columns.names, df.columns.names)
+ with catch_warnings(record=True):
+ # filtered
+ filtered = self.panel.to_frame()
+ expected = self.panel.to_frame().dropna(how='any')
+ assert_frame_equal(filtered, expected)
+
+ # unfiltered
+ unfiltered = self.panel.to_frame(filter_observations=False)
+ assert_panel_equal(unfiltered.to_panel(), self.panel)
+
+ # names
+ self.assertEqual(unfiltered.index.names, ('major', 'minor'))
+
+ # unsorted, round trip
+ df = self.panel.to_frame(filter_observations=False)
+ unsorted = df.take(np.random.permutation(len(df)))
+ pan = unsorted.to_panel()
+ assert_panel_equal(pan, self.panel)
+
+ # preserve original index names
+ df = DataFrame(np.random.randn(6, 2),
+ index=[['a', 'a', 'b', 'b', 'c', 'c'],
+ [0, 1, 0, 1, 0, 1]],
+ columns=['one', 'two'])
+ df.index.names = ['foo', 'bar']
+ df.columns.name = 'baz'
+
+ rdf = df.to_panel().to_frame()
+ self.assertEqual(rdf.index.names, df.index.names)
+ self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
- panel = self.panel.fillna(0)
- panel['str'] = 'foo'
- panel['bool'] = panel['ItemA'] > 0
-
- lp = panel.to_frame()
- wp = lp.to_panel()
- self.assertEqual(wp['bool'].values.dtype, np.bool_)
- # Previously, this was mutating the underlying index and changing its
- # name
- assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
-
- # GH 8704
- # with categorical
- df = panel.to_frame()
- df['category'] = df['str'].astype('category')
-
- # to_panel
- # TODO: this converts back to object
- p = df.to_panel()
- expected = panel.copy()
- expected['category'] = 'foo'
- assert_panel_equal(p, expected)
+ with catch_warnings(record=True):
+ panel = self.panel.fillna(0)
+ panel['str'] = 'foo'
+ panel['bool'] = panel['ItemA'] > 0
+
+ lp = panel.to_frame()
+ wp = lp.to_panel()
+ self.assertEqual(wp['bool'].values.dtype, np.bool_)
+ # Previously, this was mutating the underlying
+ # index and changing its name
+ assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
+
+ # GH 8704
+ # with categorical
+ df = panel.to_frame()
+ df['category'] = df['str'].astype('category')
+
+ # to_panel
+ # TODO: this converts back to object
+ p = df.to_panel()
+ expected = panel.copy()
+ expected['category'] = 'foo'
+ assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
- idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
- 2, 'two')])
- df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
- columns=['A', 'B', 'C'], index=idx)
- wp = Panel({'i1': df, 'i2': df})
- expected_idx = MultiIndex.from_tuples(
- [
- (1, 'one', 'A'), (1, 'one', 'B'),
- (1, 'one', 'C'), (1, 'two', 'A'),
- (1, 'two', 'B'), (1, 'two', 'C'),
- (2, 'one', 'A'), (2, 'one', 'B'),
- (2, 'one', 'C'), (2, 'two', 'A'),
- (2, 'two', 'B'), (2, 'two', 'C')
- ],
- names=[None, None, 'minor'])
- expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
- 'c', 1, 4, 'd', 1],
- 'i2': [1, 'a', 1, 2, 'b',
- 1, 3, 'c', 1, 4, 'd', 1]},
- index=expected_idx)
- result = wp.to_frame()
- assert_frame_equal(result, expected)
-
- wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
- result = wp.to_frame()
- assert_frame_equal(result, expected[1:])
-
- idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
- np.nan, 'two')])
- df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
- columns=['A', 'B', 'C'], index=idx)
- wp = Panel({'i1': df, 'i2': df})
- ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
- (1, 'two', 'C'),
- (1, 'one', 'A'),
- (1, 'one', 'B'),
- (1, 'one', 'C'),
- (2, 'one', 'A'),
- (2, 'one', 'B'),
- (2, 'one', 'C'),
- (np.nan, 'two', 'A'),
- (np.nan, 'two', 'B'),
- (np.nan, 'two', 'C')],
- names=[None, None, 'minor'])
- expected.index = ex_idx
- result = wp.to_frame()
- assert_frame_equal(result, expected)
+ with catch_warnings(record=True):
+ idx = MultiIndex.from_tuples(
+ [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')])
+ df = DataFrame([[1, 'a', 1], [2, 'b', 1],
+ [3, 'c', 1], [4, 'd', 1]],
+ columns=['A', 'B', 'C'], index=idx)
+ wp = Panel({'i1': df, 'i2': df})
+ expected_idx = MultiIndex.from_tuples(
+ [
+ (1, 'one', 'A'), (1, 'one', 'B'),
+ (1, 'one', 'C'), (1, 'two', 'A'),
+ (1, 'two', 'B'), (1, 'two', 'C'),
+ (2, 'one', 'A'), (2, 'one', 'B'),
+ (2, 'one', 'C'), (2, 'two', 'A'),
+ (2, 'two', 'B'), (2, 'two', 'C')
+ ],
+ names=[None, None, 'minor'])
+ expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
+ 'c', 1, 4, 'd', 1],
+ 'i2': [1, 'a', 1, 2, 'b',
+ 1, 3, 'c', 1, 4, 'd', 1]},
+ index=expected_idx)
+ result = wp.to_frame()
+ assert_frame_equal(result, expected)
+
+ wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
+ result = wp.to_frame()
+ assert_frame_equal(result, expected[1:])
+
+ idx = MultiIndex.from_tuples(
+ [(1, 'two'), (1, 'one'), (2, 'one'), (np.nan, 'two')])
+ df = DataFrame([[1, 'a', 1], [2, 'b', 1],
+ [3, 'c', 1], [4, 'd', 1]],
+ columns=['A', 'B', 'C'], index=idx)
+ wp = Panel({'i1': df, 'i2': df})
+ ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
+ (1, 'two', 'C'),
+ (1, 'one', 'A'),
+ (1, 'one', 'B'),
+ (1, 'one', 'C'),
+ (2, 'one', 'A'),
+ (2, 'one', 'B'),
+ (2, 'one', 'C'),
+ (np.nan, 'two', 'A'),
+ (np.nan, 'two', 'B'),
+ (np.nan, 'two', 'C')],
+ names=[None, None, 'minor'])
+ expected.index = ex_idx
+ result = wp.to_frame()
+ assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
- cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
- labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
- idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
- 2, 'two'), (3, 'three'), (4, 'four')])
- df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
- ['a', 'b', 'w', 'x'],
- ['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
- [-5, -6, -7, -8]], columns=cols, index=idx)
- wp = Panel({'i1': df, 'i2': df})
-
- exp_idx = MultiIndex.from_tuples(
- [(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
- (1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
- (1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
- (1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
- (2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
- (2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
- (2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
- (2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
- (3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
- (3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
- (4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
- (4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
- names=[None, None, None, None])
- exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
- [13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
- ['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
- [-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
- [-7, -7], [-8, -8]]
- result = wp.to_frame()
- expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
- assert_frame_equal(result, expected)
+ with catch_warnings(record=True):
+ cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
+ idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
+ 2, 'two'), (3, 'three'), (4, 'four')])
+ df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
+ ['a', 'b', 'w', 'x'],
+ ['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
+ [-5, -6, -7, -8]], columns=cols, index=idx)
+ wp = Panel({'i1': df, 'i2': df})
+
+ exp_idx = MultiIndex.from_tuples(
+ [(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
+ (1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
+ (1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
+ (1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
+ (2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
+ (2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
+ (2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
+ (2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
+ (3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
+ (3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
+ (4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
+ (4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
+ names=[None, None, None, None])
+ exp_val = [[1, 1], [2, 2], [11, 11], [12, 12],
+ [3, 3], [4, 4],
+ [13, 13], [14, 14], ['a', 'a'],
+ ['b', 'b'], ['w', 'w'],
+ ['x', 'x'], ['c', 'c'], ['d', 'd'], [
+ 'y', 'y'], ['z', 'z'],
+ [-1, -1], [-2, -2], [-3, -3], [-4, -4],
+ [-5, -5], [-6, -6],
+ [-7, -7], [-8, -8]]
+ result = wp.to_frame()
+ expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
+ assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
- idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
- df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
- wp = Panel({'i1': df, 'i2': df})
- result = wp.to_frame()
- exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
- names=[None, None, 'minor'])
- expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
- assert_frame_equal(result, expected)
+ with catch_warnings(record=True):
+ idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
+ df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
+ wp = Panel({'i1': df, 'i2': df})
+ result = wp.to_frame()
+ exp_idx = MultiIndex.from_tuples(
+ [(2, 'one', 'A'), (2, 'two', 'A')],
+ names=[None, None, 'minor'])
+ expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
+ assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
- df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
- index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
- [0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
+ with catch_warnings(record=True):
+ df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
+ index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
+ [0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
- panel = df.to_panel()
- self.assertTrue(isnull(panel[0].loc[1, [0, 1]]).all())
+ panel = df.to_panel()
+ self.assertTrue(isnull(panel[0].loc[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
- df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
- idf = df.set_index(['a', 'b'])
- assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
+ with catch_warnings(record=True):
+ df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
+ idf = df.set_index(['a', 'b'])
+ assertRaisesRegexp(
+ ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
+ with catch_warnings(record=True):
- # GH 4960
- # duplicates in an index
+ # GH 4960
+ # duplicates in an index
- # items
- data = np.random.randn(5, 100, 5)
- no_dup_panel = Panel(data, items=list("ABCDE"))
- panel = Panel(data, items=list("AACDE"))
+ # items
+ data = np.random.randn(5, 100, 5)
+ no_dup_panel = Panel(data, items=list("ABCDE"))
+ panel = Panel(data, items=list("AACDE"))
- expected = no_dup_panel['A']
- result = panel.iloc[0]
- assert_frame_equal(result, expected)
+ expected = no_dup_panel['A']
+ result = panel.iloc[0]
+ assert_frame_equal(result, expected)
- expected = no_dup_panel['E']
- result = panel.loc['E']
- assert_frame_equal(result, expected)
+ expected = no_dup_panel['E']
+ result = panel.loc['E']
+ assert_frame_equal(result, expected)
- expected = no_dup_panel.loc[['A', 'B']]
- expected.items = ['A', 'A']
- result = panel.loc['A']
- assert_panel_equal(result, expected)
+ expected = no_dup_panel.loc[['A', 'B']]
+ expected.items = ['A', 'A']
+ result = panel.loc['A']
+ assert_panel_equal(result, expected)
- # major
- data = np.random.randn(5, 5, 5)
- no_dup_panel = Panel(data, major_axis=list("ABCDE"))
- panel = Panel(data, major_axis=list("AACDE"))
+ # major
+ data = np.random.randn(5, 5, 5)
+ no_dup_panel = Panel(data, major_axis=list("ABCDE"))
+ panel = Panel(data, major_axis=list("AACDE"))
- expected = no_dup_panel.loc[:, 'A']
- result = panel.iloc[:, 0]
- assert_frame_equal(result, expected)
+ expected = no_dup_panel.loc[:, 'A']
+ result = panel.iloc[:, 0]
+ assert_frame_equal(result, expected)
- expected = no_dup_panel.loc[:, 'E']
- result = panel.loc[:, 'E']
- assert_frame_equal(result, expected)
+ expected = no_dup_panel.loc[:, 'E']
+ result = panel.loc[:, 'E']
+ assert_frame_equal(result, expected)
- expected = no_dup_panel.loc[:, ['A', 'B']]
- expected.major_axis = ['A', 'A']
- result = panel.loc[:, 'A']
- assert_panel_equal(result, expected)
+ expected = no_dup_panel.loc[:, ['A', 'B']]
+ expected.major_axis = ['A', 'A']
+ result = panel.loc[:, 'A']
+ assert_panel_equal(result, expected)
- # minor
- data = np.random.randn(5, 100, 5)
- no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
- panel = Panel(data, minor_axis=list("AACDE"))
+ # minor
+ data = np.random.randn(5, 100, 5)
+ no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
+ panel = Panel(data, minor_axis=list("AACDE"))
- expected = no_dup_panel.loc[:, :, 'A']
- result = panel.iloc[:, :, 0]
- assert_frame_equal(result, expected)
+ expected = no_dup_panel.loc[:, :, 'A']
+ result = panel.iloc[:, :, 0]
+ assert_frame_equal(result, expected)
- expected = no_dup_panel.loc[:, :, 'E']
- result = panel.loc[:, :, 'E']
- assert_frame_equal(result, expected)
+ expected = no_dup_panel.loc[:, :, 'E']
+ result = panel.loc[:, :, 'E']
+ assert_frame_equal(result, expected)
- expected = no_dup_panel.loc[:, :, ['A', 'B']]
- expected.minor_axis = ['A', 'A']
- result = panel.loc[:, :, 'A']
- assert_panel_equal(result, expected)
+ expected = no_dup_panel.loc[:, :, ['A', 'B']]
+ expected.minor_axis = ['A', 'A']
+ result = panel.loc[:, :, 'A']
+ assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
- compounded = self.panel.compound()
+ with catch_warnings(record=True):
+ compounded = self.panel.compound()
- assert_series_equal(compounded['ItemA'],
- (1 + self.panel['ItemA']).product(0) - 1,
- check_names=False)
+ assert_series_equal(compounded['ItemA'],
+ (1 + self.panel['ItemA']).product(0) - 1,
+ check_names=False)
def test_shift(self):
- # major
- idx = self.panel.major_axis[0]
- idx_lag = self.panel.major_axis[1]
- shifted = self.panel.shift(1)
- assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
-
- # minor
- idx = self.panel.minor_axis[0]
- idx_lag = self.panel.minor_axis[1]
- shifted = self.panel.shift(1, axis='minor')
- assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
-
- # items
- idx = self.panel.items[0]
- idx_lag = self.panel.items[1]
- shifted = self.panel.shift(1, axis='items')
- assert_frame_equal(self.panel[idx], shifted[idx_lag])
-
- # negative numbers, #2164
- result = self.panel.shift(-1)
- expected = Panel(dict((i, f.shift(-1)[:-1])
- for i, f in self.panel.iteritems()))
- assert_panel_equal(result, expected)
-
- # mixed dtypes #6959
- data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
- data = dict(data)
- mixed_panel = Panel.from_dict(data, orient='minor')
- shifted = mixed_panel.shift(1)
- assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
+ with catch_warnings(record=True):
+ # major
+ idx = self.panel.major_axis[0]
+ idx_lag = self.panel.major_axis[1]
+ shifted = self.panel.shift(1)
+ assert_frame_equal(self.panel.major_xs(idx),
+ shifted.major_xs(idx_lag))
+
+ # minor
+ idx = self.panel.minor_axis[0]
+ idx_lag = self.panel.minor_axis[1]
+ shifted = self.panel.shift(1, axis='minor')
+ assert_frame_equal(self.panel.minor_xs(idx),
+ shifted.minor_xs(idx_lag))
+
+ # items
+ idx = self.panel.items[0]
+ idx_lag = self.panel.items[1]
+ shifted = self.panel.shift(1, axis='items')
+ assert_frame_equal(self.panel[idx], shifted[idx_lag])
+
+ # negative numbers, #2164
+ result = self.panel.shift(-1)
+ expected = Panel(dict((i, f.shift(-1)[:-1])
+ for i, f in self.panel.iteritems()))
+ assert_panel_equal(result, expected)
+
+ # mixed dtypes #6959
+ data = [('item ' + ch, makeMixedDataFrame())
+ for ch in list('abcde')]
+ data = dict(data)
+ mixed_panel = Panel.from_dict(data, orient='minor')
+ shifted = mixed_panel.shift(1)
+ assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
- ps = tm.makePeriodPanel()
- shifted = ps.tshift(1)
- unshifted = shifted.tshift(-1)
+ with catch_warnings(record=True):
+ ps = tm.makePeriodPanel()
+ shifted = ps.tshift(1)
+ unshifted = shifted.tshift(-1)
- assert_panel_equal(unshifted, ps)
+ assert_panel_equal(unshifted, ps)
- shifted2 = ps.tshift(freq='B')
- assert_panel_equal(shifted, shifted2)
+ shifted2 = ps.tshift(freq='B')
+ assert_panel_equal(shifted, shifted2)
- shifted3 = ps.tshift(freq=BDay())
- assert_panel_equal(shifted, shifted3)
+ shifted3 = ps.tshift(freq=BDay())
+ assert_panel_equal(shifted, shifted3)
- assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
+ assertRaisesRegexp(ValueError, 'does not match',
+ ps.tshift, freq='M')
- # DatetimeIndex
- panel = _panel
- shifted = panel.tshift(1)
- unshifted = shifted.tshift(-1)
+ # DatetimeIndex
+ panel = make_test_panel()
+ shifted = panel.tshift(1)
+ unshifted = shifted.tshift(-1)
- assert_panel_equal(panel, unshifted)
+ assert_panel_equal(panel, unshifted)
- shifted2 = panel.tshift(freq=panel.major_axis.freq)
- assert_panel_equal(shifted, shifted2)
+ shifted2 = panel.tshift(freq=panel.major_axis.freq)
+ assert_panel_equal(shifted, shifted2)
- inferred_ts = Panel(panel.values, items=panel.items,
- major_axis=Index(np.asarray(panel.major_axis)),
- minor_axis=panel.minor_axis)
- shifted = inferred_ts.tshift(1)
- unshifted = shifted.tshift(-1)
- assert_panel_equal(shifted, panel.tshift(1))
- assert_panel_equal(unshifted, inferred_ts)
+ inferred_ts = Panel(panel.values, items=panel.items,
+ major_axis=Index(np.asarray(panel.major_axis)),
+ minor_axis=panel.minor_axis)
+ shifted = inferred_ts.tshift(1)
+ unshifted = shifted.tshift(-1)
+ assert_panel_equal(shifted, panel.tshift(1))
+ assert_panel_equal(unshifted, inferred_ts)
- no_freq = panel.iloc[:, [0, 5, 7], :]
- self.assertRaises(ValueError, no_freq.tshift)
+ no_freq = panel.iloc[:, [0, 5, 7], :]
+ self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
- df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
- df2 = df1 + 1
- df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
- wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
- # major, 1
- result = wp.pct_change() # axis='major'
- expected = Panel({'i1': df1.pct_change(),
- 'i2': df2.pct_change(),
- 'i3': df3.pct_change()})
- assert_panel_equal(result, expected)
- result = wp.pct_change(axis=1)
- assert_panel_equal(result, expected)
- # major, 2
- result = wp.pct_change(periods=2)
- expected = Panel({'i1': df1.pct_change(2),
- 'i2': df2.pct_change(2),
- 'i3': df3.pct_change(2)})
- assert_panel_equal(result, expected)
- # minor, 1
- result = wp.pct_change(axis='minor')
- expected = Panel({'i1': df1.pct_change(axis=1),
- 'i2': df2.pct_change(axis=1),
- 'i3': df3.pct_change(axis=1)})
- assert_panel_equal(result, expected)
- result = wp.pct_change(axis=2)
- assert_panel_equal(result, expected)
- # minor, 2
- result = wp.pct_change(periods=2, axis='minor')
- expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
- 'i2': df2.pct_change(periods=2, axis=1),
- 'i3': df3.pct_change(periods=2, axis=1)})
- assert_panel_equal(result, expected)
- # items, 1
- result = wp.pct_change(axis='items')
- expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
- 'c2': [np.nan, np.nan, np.nan]}),
- 'i2': DataFrame({'c1': [1, 0.5, .2],
- 'c2': [1. / 3, 0.25, 1. / 6]}),
- 'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
- 'c2': [.25, .2, 1. / 7]})})
- assert_panel_equal(result, expected)
- result = wp.pct_change(axis=0)
- assert_panel_equal(result, expected)
- # items, 2
- result = wp.pct_change(periods=2, axis='items')
- expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
- 'c2': [np.nan, np.nan, np.nan]}),
- 'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
- 'c2': [np.nan, np.nan, np.nan]}),
- 'i3': DataFrame({'c1': [2, 1, .4],
- 'c2': [2. / 3, .5, 1. / 3]})})
- assert_panel_equal(result, expected)
+ with catch_warnings(record=True):
+ df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
+ df2 = df1 + 1
+ df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
+ wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
+ # major, 1
+ result = wp.pct_change() # axis='major'
+ expected = Panel({'i1': df1.pct_change(),
+ 'i2': df2.pct_change(),
+ 'i3': df3.pct_change()})
+ assert_panel_equal(result, expected)
+ result = wp.pct_change(axis=1)
+ assert_panel_equal(result, expected)
+ # major, 2
+ result = wp.pct_change(periods=2)
+ expected = Panel({'i1': df1.pct_change(2),
+ 'i2': df2.pct_change(2),
+ 'i3': df3.pct_change(2)})
+ assert_panel_equal(result, expected)
+ # minor, 1
+ result = wp.pct_change(axis='minor')
+ expected = Panel({'i1': df1.pct_change(axis=1),
+ 'i2': df2.pct_change(axis=1),
+ 'i3': df3.pct_change(axis=1)})
+ assert_panel_equal(result, expected)
+ result = wp.pct_change(axis=2)
+ assert_panel_equal(result, expected)
+ # minor, 2
+ result = wp.pct_change(periods=2, axis='minor')
+ expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
+ 'i2': df2.pct_change(periods=2, axis=1),
+ 'i3': df3.pct_change(periods=2, axis=1)})
+ assert_panel_equal(result, expected)
+ # items, 1
+ result = wp.pct_change(axis='items')
+ expected = Panel(
+ {'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
+ 'c2': [np.nan, np.nan, np.nan]}),
+ 'i2': DataFrame({'c1': [1, 0.5, .2],
+ 'c2': [1. / 3, 0.25, 1. / 6]}),
+ 'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
+ 'c2': [.25, .2, 1. / 7]})})
+ assert_panel_equal(result, expected)
+ result = wp.pct_change(axis=0)
+ assert_panel_equal(result, expected)
+ # items, 2
+ result = wp.pct_change(periods=2, axis='items')
+ expected = Panel(
+ {'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
+ 'c2': [np.nan, np.nan, np.nan]}),
+ 'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
+ 'c2': [np.nan, np.nan, np.nan]}),
+ 'i3': DataFrame({'c1': [2, 1, .4],
+ 'c2': [2. / 3, .5, 1. / 3]})})
+ assert_panel_equal(result, expected)
def test_round(self):
- values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
- [-1566.213, 88.88], [-12, 94.5]],
- [[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
- [272.212, -99.99], [23, -76.5]]]
- evalues = [[[float(np.around(i)) for i in j] for j in k]
- for k in values]
- p = Panel(values, items=['Item1', 'Item2'],
- major_axis=pd.date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B'])
- expected = Panel(evalues, items=['Item1', 'Item2'],
- major_axis=pd.date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B'])
- result = p.round()
- self.assert_panel_equal(expected, result)
+ with catch_warnings(record=True):
+ values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
+ [-1566.213, 88.88], [-12, 94.5]],
+ [[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
+ [272.212, -99.99], [23, -76.5]]]
+ evalues = [[[float(np.around(i)) for i in j] for j in k]
+ for k in values]
+ p = Panel(values, items=['Item1', 'Item2'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B'])
+ expected = Panel(evalues, items=['Item1', 'Item2'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B'])
+ result = p.round()
+ assert_panel_equal(expected, result)
def test_numpy_round(self):
- values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
- [-1566.213, 88.88], [-12, 94.5]],
- [[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
- [272.212, -99.99], [23, -76.5]]]
- evalues = [[[float(np.around(i)) for i in j] for j in k]
- for k in values]
- p = Panel(values, items=['Item1', 'Item2'],
- major_axis=pd.date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B'])
- expected = Panel(evalues, items=['Item1', 'Item2'],
- major_axis=pd.date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B'])
- result = np.round(p)
- self.assert_panel_equal(expected, result)
-
- msg = "the 'out' parameter is not supported"
- tm.assertRaisesRegexp(ValueError, msg, np.round, p, out=p)
+ with catch_warnings(record=True):
+ values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
+ [-1566.213, 88.88], [-12, 94.5]],
+ [[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
+ [272.212, -99.99], [23, -76.5]]]
+ evalues = [[[float(np.around(i)) for i in j] for j in k]
+ for k in values]
+ p = Panel(values, items=['Item1', 'Item2'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B'])
+ expected = Panel(evalues, items=['Item1', 'Item2'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B'])
+ result = np.round(p)
+ assert_panel_equal(expected, result)
+
+ msg = "the 'out' parameter is not supported"
+ tm.assertRaisesRegexp(ValueError, msg, np.round, p, out=p)
def test_multiindex_get(self):
- ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
- names=['first', 'second'])
- wp = Panel(np.random.random((4, 5, 5)),
- items=ind,
- major_axis=np.arange(5),
- minor_axis=np.arange(5))
- f1 = wp['a']
- f2 = wp.loc['a']
- assert_panel_equal(f1, f2)
-
- self.assertTrue((f1.items == [1, 2]).all())
- self.assertTrue((f2.items == [1, 2]).all())
-
- ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
- names=['first', 'second'])
+ with catch_warnings(record=True):
+ ind = MultiIndex.from_tuples(
+ [('a', 1), ('a', 2), ('b', 1), ('b', 2)],
+ names=['first', 'second'])
+ wp = Panel(np.random.random((4, 5, 5)),
+ items=ind,
+ major_axis=np.arange(5),
+ minor_axis=np.arange(5))
+ f1 = wp['a']
+ f2 = wp.loc['a']
+ assert_panel_equal(f1, f2)
+
+ self.assertTrue((f1.items == [1, 2]).all())
+ self.assertTrue((f2.items == [1, 2]).all())
+
+ ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
+ names=['first', 'second'])
def test_multiindex_blocks(self):
- ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
- names=['first', 'second'])
- wp = Panel(self.panel._data)
- wp.items = ind
- f1 = wp['a']
- self.assertTrue((f1.items == [1, 2]).all())
+ with catch_warnings(record=True):
+ ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
+ names=['first', 'second'])
+ wp = Panel(self.panel._data)
+ wp.items = ind
+ f1 = wp['a']
+ self.assertTrue((f1.items == [1, 2]).all())
- f1 = wp[('b', 1)]
- self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
+ f1 = wp[('b', 1)]
+ self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
- empty = Panel()
- repr(empty)
+ with catch_warnings(record=True):
+ empty = Panel()
+ repr(empty)
def test_rename(self):
- mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
+ with catch_warnings(record=True):
+ mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
- renamed = self.panel.rename_axis(mapper, axis=0)
- exp = Index(['foo', 'bar', 'baz'])
- self.assert_index_equal(renamed.items, exp)
+ renamed = self.panel.rename_axis(mapper, axis=0)
+ exp = Index(['foo', 'bar', 'baz'])
+ self.assert_index_equal(renamed.items, exp)
- renamed = self.panel.rename_axis(str.lower, axis=2)
- exp = Index(['a', 'b', 'c', 'd'])
- self.assert_index_equal(renamed.minor_axis, exp)
+ renamed = self.panel.rename_axis(str.lower, axis=2)
+ exp = Index(['a', 'b', 'c', 'd'])
+ self.assert_index_equal(renamed.minor_axis, exp)
- # don't copy
- renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
- renamed_nocopy['foo'] = 3.
- self.assertTrue((self.panel['ItemA'].values == 3).all())
+ # don't copy
+ renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
+ renamed_nocopy['foo'] = 3.
+ self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
@@ -2046,12 +2170,13 @@ def test_get_attr(self):
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
- tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
- ('MSFT', 1)]
- midx = MultiIndex.from_tuples(tuples)
- df = DataFrame(np.random.rand(5, 4), index=midx)
- p = df.to_panel()
- assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
+ with catch_warnings(record=True):
+ tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
+ ('MSFT', 1)]
+ midx = MultiIndex.from_tuples(tuples)
+ df = DataFrame(np.random.rand(5, 4), index=midx)
+ p = df.to_panel()
+ assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
@@ -2094,162 +2219,191 @@ def test_to_excel_xlsxwriter(self):
assert_frame_equal(df, recdf)
def test_dropna(self):
- p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
- p.loc[:, ['b', 'd'], 0] = np.nan
+ with catch_warnings(record=True):
+ p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
+ p.loc[:, ['b', 'd'], 0] = np.nan
- result = p.dropna(axis=1)
- exp = p.loc[:, ['a', 'c', 'e'], :]
- assert_panel_equal(result, exp)
- inp = p.copy()
- inp.dropna(axis=1, inplace=True)
- assert_panel_equal(inp, exp)
+ result = p.dropna(axis=1)
+ exp = p.loc[:, ['a', 'c', 'e'], :]
+ assert_panel_equal(result, exp)
+ inp = p.copy()
+ inp.dropna(axis=1, inplace=True)
+ assert_panel_equal(inp, exp)
- result = p.dropna(axis=1, how='all')
- assert_panel_equal(result, p)
+ result = p.dropna(axis=1, how='all')
+ assert_panel_equal(result, p)
- p.loc[:, ['b', 'd'], :] = np.nan
- result = p.dropna(axis=1, how='all')
- exp = p.loc[:, ['a', 'c', 'e'], :]
- assert_panel_equal(result, exp)
+ p.loc[:, ['b', 'd'], :] = np.nan
+ result = p.dropna(axis=1, how='all')
+ exp = p.loc[:, ['a', 'c', 'e'], :]
+ assert_panel_equal(result, exp)
- p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
- p.loc[['b'], :, 0] = np.nan
+ p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
+ p.loc[['b'], :, 0] = np.nan
- result = p.dropna()
- exp = p.loc[['a', 'c', 'd']]
- assert_panel_equal(result, exp)
+ result = p.dropna()
+ exp = p.loc[['a', 'c', 'd']]
+ assert_panel_equal(result, exp)
- result = p.dropna(how='all')
- assert_panel_equal(result, p)
+ result = p.dropna(how='all')
+ assert_panel_equal(result, p)
- p.loc['b'] = np.nan
- result = p.dropna(how='all')
- exp = p.loc[['a', 'c', 'd']]
- assert_panel_equal(result, exp)
+ p.loc['b'] = np.nan
+ result = p.dropna(how='all')
+ exp = p.loc[['a', 'c', 'd']]
+ assert_panel_equal(result, exp)
def test_drop(self):
- df = DataFrame({"A": [1, 2], "B": [3, 4]})
- panel = Panel({"One": df, "Two": df})
+ with catch_warnings(record=True):
+ df = DataFrame({"A": [1, 2], "B": [3, 4]})
+ panel = Panel({"One": df, "Two": df})
- def check_drop(drop_val, axis_number, aliases, expected):
- try:
- actual = panel.drop(drop_val, axis=axis_number)
- assert_panel_equal(actual, expected)
- for alias in aliases:
- actual = panel.drop(drop_val, axis=alias)
+ def check_drop(drop_val, axis_number, aliases, expected):
+ try:
+ actual = panel.drop(drop_val, axis=axis_number)
assert_panel_equal(actual, expected)
- except AssertionError:
- pprint_thing("Failed with axis_number %d and aliases: %s" %
- (axis_number, aliases))
- raise
- # Items
- expected = Panel({"One": df})
- check_drop('Two', 0, ['items'], expected)
-
- self.assertRaises(ValueError, panel.drop, 'Three')
-
- # errors = 'ignore'
- dropped = panel.drop('Three', errors='ignore')
- assert_panel_equal(dropped, panel)
- dropped = panel.drop(['Two', 'Three'], errors='ignore')
- expected = Panel({"One": df})
- assert_panel_equal(dropped, expected)
-
- # Major
- exp_df = DataFrame({"A": [2], "B": [4]}, index=[1])
- expected = Panel({"One": exp_df, "Two": exp_df})
- check_drop(0, 1, ['major_axis', 'major'], expected)
-
- exp_df = DataFrame({"A": [1], "B": [3]}, index=[0])
- expected = Panel({"One": exp_df, "Two": exp_df})
- check_drop([1], 1, ['major_axis', 'major'], expected)
-
- # Minor
- exp_df = df[['B']]
- expected = Panel({"One": exp_df, "Two": exp_df})
- check_drop(["A"], 2, ['minor_axis', 'minor'], expected)
-
- exp_df = df[['A']]
- expected = Panel({"One": exp_df, "Two": exp_df})
- check_drop("B", 2, ['minor_axis', 'minor'], expected)
+ for alias in aliases:
+ actual = panel.drop(drop_val, axis=alias)
+ assert_panel_equal(actual, expected)
+ except AssertionError:
+ pprint_thing("Failed with axis_number %d and aliases: %s" %
+ (axis_number, aliases))
+ raise
+ # Items
+ expected = Panel({"One": df})
+ check_drop('Two', 0, ['items'], expected)
+
+ self.assertRaises(ValueError, panel.drop, 'Three')
+
+ # errors = 'ignore'
+ dropped = panel.drop('Three', errors='ignore')
+ assert_panel_equal(dropped, panel)
+ dropped = panel.drop(['Two', 'Three'], errors='ignore')
+ expected = Panel({"One": df})
+ assert_panel_equal(dropped, expected)
+
+ # Major
+ exp_df = DataFrame({"A": [2], "B": [4]}, index=[1])
+ expected = Panel({"One": exp_df, "Two": exp_df})
+ check_drop(0, 1, ['major_axis', 'major'], expected)
+
+ exp_df = DataFrame({"A": [1], "B": [3]}, index=[0])
+ expected = Panel({"One": exp_df, "Two": exp_df})
+ check_drop([1], 1, ['major_axis', 'major'], expected)
+
+ # Minor
+ exp_df = df[['B']]
+ expected = Panel({"One": exp_df, "Two": exp_df})
+ check_drop(["A"], 2, ['minor_axis', 'minor'], expected)
+
+ exp_df = df[['A']]
+ expected = Panel({"One": exp_df, "Two": exp_df})
+ check_drop("B", 2, ['minor_axis', 'minor'], expected)
def test_update(self):
- pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]],
- [[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]]])
+ with catch_warnings(record=True):
+ pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]],
+ [[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]]])
- other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
+ other = Panel(
+ [[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
- pan.update(other)
+ pan.update(other)
- expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.], [1.5, np.nan, 3.]],
- [[3.6, 2., 3], [1.5, np.nan, 7], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]]])
+ expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.], [1.5, np.nan, 3.]],
+ [[3.6, 2., 3], [1.5, np.nan, 7],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]]])
- assert_panel_equal(pan, expected)
+ assert_panel_equal(pan, expected)
def test_update_from_dict(self):
- pan = Panel({'one': DataFrame([[1.5, np.nan, 3], [1.5, np.nan, 3],
- [1.5, np.nan, 3.], [1.5, np.nan, 3.]]),
- 'two': DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.], [1.5, np.nan, 3.]])})
-
- other = {'two': DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]])}
-
- pan.update(other)
-
- expected = Panel(
- {'two': DataFrame([[3.6, 2., 3], [1.5, np.nan, 7],
- [1.5, np.nan, 3.], [1.5, np.nan, 3.]]),
- 'one': DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.], [1.5, np.nan, 3.]])})
-
- assert_panel_equal(pan, expected)
+ with catch_warnings(record=True):
+ pan = Panel({'one': DataFrame([[1.5, np.nan, 3],
+ [1.5, np.nan, 3],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]]),
+ 'two': DataFrame([[1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]])})
+
+ other = {'two': DataFrame(
+ [[3.6, 2., np.nan], [np.nan, np.nan, 7]])}
+
+ pan.update(other)
+
+ expected = Panel(
+ {'two': DataFrame([[3.6, 2., 3],
+ [1.5, np.nan, 7],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]]),
+ 'one': DataFrame([[1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]])})
+
+ assert_panel_equal(pan, expected)
def test_update_nooverwrite(self):
- pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]],
- [[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]]])
+ with catch_warnings(record=True):
+ pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]],
+ [[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]]])
- other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
+ other = Panel(
+ [[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
- pan.update(other, overwrite=False)
+ pan.update(other, overwrite=False)
- expected = Panel([[[1.5, np.nan, 3], [1.5, np.nan, 3],
- [1.5, np.nan, 3.], [1.5, np.nan, 3.]],
- [[1.5, 2., 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]]])
+ expected = Panel([[[1.5, np.nan, 3], [1.5, np.nan, 3],
+ [1.5, np.nan, 3.], [1.5, np.nan, 3.]],
+ [[1.5, 2., 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]]])
- assert_panel_equal(pan, expected)
+ assert_panel_equal(pan, expected)
def test_update_filtered(self):
- pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]],
- [[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]]])
+ with catch_warnings(record=True):
+ pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]],
+ [[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]]])
- other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
+ other = Panel(
+ [[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
- pan.update(other, filter_func=lambda x: x > 2)
+ pan.update(other, filter_func=lambda x: x > 2)
- expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.], [1.5, np.nan, 3.]],
- [[1.5, np.nan, 3], [1.5, np.nan, 7],
- [1.5, np.nan, 3.], [1.5, np.nan, 3.]]])
+ expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.], [1.5, np.nan, 3.]],
+ [[1.5, np.nan, 3], [1.5, np.nan, 7],
+ [1.5, np.nan, 3.], [1.5, np.nan, 3.]]])
- assert_panel_equal(pan, expected)
+ assert_panel_equal(pan, expected)
def test_update_raise(self):
- pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]],
- [[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]]])
+ with catch_warnings(record=True):
+ pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]],
+ [[1.5, np.nan, 3.], [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.],
+ [1.5, np.nan, 3.]]])
- self.assertRaises(Exception, pan.update, *(pan, ),
- **{'raise_conflict': True})
+ self.assertRaises(Exception, pan.update, *(pan, ),
+ **{'raise_conflict': True})
def test_all_any(self):
self.assertTrue((self.panel.all(axis=0).values == nanall(
@@ -2276,90 +2430,95 @@ class TestLongPanel(tm.TestCase):
"""
def setUp(self):
- import warnings
- warnings.filterwarnings(action='ignore', category=FutureWarning)
-
- panel = tm.makePanel()
- tm.add_nans(panel)
-
+ panel = make_test_panel()
self.panel = panel.to_frame()
self.unfiltered_panel = panel.to_frame(filter_observations=False)
def test_ops_differently_indexed(self):
- # trying to set non-identically indexed panel
- wp = self.panel.to_panel()
- wp2 = wp.reindex(major=wp.major_axis[:-1])
- lp2 = wp2.to_frame()
+ with catch_warnings(record=True):
+ # trying to set non-identically indexed panel
+ wp = self.panel.to_panel()
+ wp2 = wp.reindex(major=wp.major_axis[:-1])
+ lp2 = wp2.to_frame()
- result = self.panel + lp2
- assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
+ result = self.panel + lp2
+ assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
- # careful, mutation
- self.panel['foo'] = lp2['ItemA']
- assert_series_equal(self.panel['foo'].reindex(lp2.index), lp2['ItemA'],
- check_names=False)
+ # careful, mutation
+ self.panel['foo'] = lp2['ItemA']
+ assert_series_equal(self.panel['foo'].reindex(lp2.index),
+ lp2['ItemA'],
+ check_names=False)
def test_ops_scalar(self):
- result = self.panel.mul(2)
- expected = DataFrame.__mul__(self.panel, 2)
- assert_frame_equal(result, expected)
+ with catch_warnings(record=True):
+ result = self.panel.mul(2)
+ expected = DataFrame.__mul__(self.panel, 2)
+ assert_frame_equal(result, expected)
def test_combineFrame(self):
- wp = self.panel.to_panel()
- result = self.panel.add(wp['ItemA'].stack(), axis=0)
- assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2)
+ with catch_warnings(record=True):
+ wp = self.panel.to_panel()
+ result = self.panel.add(wp['ItemA'].stack(), axis=0)
+ assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2)
def test_combinePanel(self):
- wp = self.panel.to_panel()
- result = self.panel.add(self.panel)
- wide_result = result.to_panel()
- assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
+ with catch_warnings(record=True):
+ wp = self.panel.to_panel()
+ result = self.panel.add(self.panel)
+ wide_result = result.to_panel()
+ assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
- # one item
- result = self.panel.add(self.panel.filter(['ItemA']))
+ # one item
+ result = self.panel.add(self.panel.filter(['ItemA']))
def test_combine_scalar(self):
- result = self.panel.mul(2)
- expected = DataFrame(self.panel._data) * 2
- assert_frame_equal(result, expected)
+ with catch_warnings(record=True):
+ result = self.panel.mul(2)
+ expected = DataFrame(self.panel._data) * 2
+ assert_frame_equal(result, expected)
def test_combine_series(self):
- s = self.panel['ItemA'][:10]
- result = self.panel.add(s, axis=0)
- expected = DataFrame.add(self.panel, s, axis=0)
- assert_frame_equal(result, expected)
+ with catch_warnings(record=True):
+ s = self.panel['ItemA'][:10]
+ result = self.panel.add(s, axis=0)
+ expected = DataFrame.add(self.panel, s, axis=0)
+ assert_frame_equal(result, expected)
- s = self.panel.iloc[5]
- result = self.panel + s
- expected = DataFrame.add(self.panel, s, axis=1)
- assert_frame_equal(result, expected)
+ s = self.panel.iloc[5]
+ result = self.panel + s
+ expected = DataFrame.add(self.panel, s, axis=1)
+ assert_frame_equal(result, expected)
def test_operators(self):
- wp = self.panel.to_panel()
- result = (self.panel + 1).to_panel()
- assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
+ with catch_warnings(record=True):
+ wp = self.panel.to_panel()
+ result = (self.panel + 1).to_panel()
+ assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
def test_arith_flex_panel(self):
- ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
- if not compat.PY3:
- aliases = {}
- else:
- aliases = {'div': 'truediv'}
- self.panel = self.panel.to_panel()
-
- for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
- for op in ops:
- alias = aliases.get(op, op)
- f = getattr(operator, alias)
- exp = f(self.panel, n)
- result = getattr(self.panel, op)(n)
- assert_panel_equal(result, exp, check_panel_type=True)
-
- # rops
- r_f = lambda x, y: f(y, x)
- exp = r_f(self.panel, n)
- result = getattr(self.panel, 'r' + op)(n)
- assert_panel_equal(result, exp)
+ with catch_warnings(record=True):
+ ops = ['add', 'sub', 'mul', 'div',
+ 'truediv', 'pow', 'floordiv', 'mod']
+ if not compat.PY3:
+ aliases = {}
+ else:
+ aliases = {'div': 'truediv'}
+ self.panel = self.panel.to_panel()
+
+ for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
+ for op in ops:
+ alias = aliases.get(op, op)
+ f = getattr(operator, alias)
+ exp = f(self.panel, n)
+ result = getattr(self.panel, op)(n)
+ assert_panel_equal(result, exp, check_panel_type=True)
+
+ # rops
+ r_f = lambda x, y: f(y, x)
+ exp = r_f(self.panel, n)
+ result = getattr(self.panel, 'r' + op)(n)
+ assert_panel_equal(result, exp)
def test_sort(self):
def is_sorted(arr):
@@ -2382,43 +2541,44 @@ def test_to_sparse(self):
self.panel.to_sparse)
def test_truncate(self):
- dates = self.panel.index.levels[0]
- start, end = dates[1], dates[5]
+ with catch_warnings(record=True):
+ dates = self.panel.index.levels[0]
+ start, end = dates[1], dates[5]
- trunced = self.panel.truncate(start, end).to_panel()
- expected = self.panel.to_panel()['ItemA'].truncate(start, end)
+ trunced = self.panel.truncate(start, end).to_panel()
+ expected = self.panel.to_panel()['ItemA'].truncate(start, end)
- # TODO trucate drops index.names
- assert_frame_equal(trunced['ItemA'], expected, check_names=False)
+ # TODO trucate drops index.names
+ assert_frame_equal(trunced['ItemA'], expected, check_names=False)
- trunced = self.panel.truncate(before=start).to_panel()
- expected = self.panel.to_panel()['ItemA'].truncate(before=start)
+ trunced = self.panel.truncate(before=start).to_panel()
+ expected = self.panel.to_panel()['ItemA'].truncate(before=start)
- # TODO trucate drops index.names
- assert_frame_equal(trunced['ItemA'], expected, check_names=False)
+ # TODO trucate drops index.names
+ assert_frame_equal(trunced['ItemA'], expected, check_names=False)
- trunced = self.panel.truncate(after=end).to_panel()
- expected = self.panel.to_panel()['ItemA'].truncate(after=end)
+ trunced = self.panel.truncate(after=end).to_panel()
+ expected = self.panel.to_panel()['ItemA'].truncate(after=end)
- # TODO trucate drops index.names
- assert_frame_equal(trunced['ItemA'], expected, check_names=False)
+ # TODO trucate drops index.names
+ assert_frame_equal(trunced['ItemA'], expected, check_names=False)
- # truncate on dates that aren't in there
- wp = self.panel.to_panel()
- new_index = wp.major_axis[::5]
+ # truncate on dates that aren't in there
+ wp = self.panel.to_panel()
+ new_index = wp.major_axis[::5]
- wp2 = wp.reindex(major=new_index)
+ wp2 = wp.reindex(major=new_index)
- lp2 = wp2.to_frame()
- lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
+ lp2 = wp2.to_frame()
+ lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
- wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
+ wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
- assert_panel_equal(wp_trunc, lp_trunc.to_panel())
+ assert_panel_equal(wp_trunc, lp_trunc.to_panel())
- # throw proper exception
- self.assertRaises(Exception, lp2.truncate, wp.major_axis[-2],
- wp.major_axis[2])
+ # throw proper exception
+ self.assertRaises(Exception, lp2.truncate, wp.major_axis[-2],
+ wp.major_axis[2])
def test_axis_dummies(self):
from pandas.core.reshape import make_axis_dummies
@@ -2449,82 +2609,70 @@ def test_get_dummies(self):
self.assert_numpy_array_equal(dummies.values, minor_dummies.values)
def test_mean(self):
- means = self.panel.mean(level='minor')
+ with catch_warnings(record=True):
+ means = self.panel.mean(level='minor')
- # test versus Panel version
- wide_means = self.panel.to_panel().mean('major')
- assert_frame_equal(means, wide_means)
+ # test versus Panel version
+ wide_means = self.panel.to_panel().mean('major')
+ assert_frame_equal(means, wide_means)
def test_sum(self):
- sums = self.panel.sum(level='minor')
+ with catch_warnings(record=True):
+ sums = self.panel.sum(level='minor')
- # test versus Panel version
- wide_sums = self.panel.to_panel().sum('major')
- assert_frame_equal(sums, wide_sums)
+ # test versus Panel version
+ wide_sums = self.panel.to_panel().sum('major')
+ assert_frame_equal(sums, wide_sums)
def test_count(self):
- index = self.panel.index
+ with catch_warnings(record=True):
+ index = self.panel.index
- major_count = self.panel.count(level=0)['ItemA']
- labels = index.labels[0]
- for i, idx in enumerate(index.levels[0]):
- self.assertEqual(major_count[i], (labels == i).sum())
+ major_count = self.panel.count(level=0)['ItemA']
+ labels = index.labels[0]
+ for i, idx in enumerate(index.levels[0]):
+ self.assertEqual(major_count[i], (labels == i).sum())
- minor_count = self.panel.count(level=1)['ItemA']
- labels = index.labels[1]
- for i, idx in enumerate(index.levels[1]):
- self.assertEqual(minor_count[i], (labels == i).sum())
+ minor_count = self.panel.count(level=1)['ItemA']
+ labels = index.labels[1]
+ for i, idx in enumerate(index.levels[1]):
+ self.assertEqual(minor_count[i], (labels == i).sum())
def test_join(self):
- lp1 = self.panel.filter(['ItemA', 'ItemB'])
- lp2 = self.panel.filter(['ItemC'])
+ with catch_warnings(record=True):
+ lp1 = self.panel.filter(['ItemA', 'ItemB'])
+ lp2 = self.panel.filter(['ItemC'])
- joined = lp1.join(lp2)
+ joined = lp1.join(lp2)
- self.assertEqual(len(joined.columns), 3)
+ self.assertEqual(len(joined.columns), 3)
- self.assertRaises(Exception, lp1.join,
- self.panel.filter(['ItemB', 'ItemC']))
+ self.assertRaises(Exception, lp1.join,
+ self.panel.filter(['ItemB', 'ItemC']))
def test_pivot(self):
- from pandas.core.reshape import _slow_pivot
-
- one, two, three = (np.array([1, 2, 3, 4, 5]),
- np.array(['a', 'b', 'c', 'd', 'e']),
- np.array([1, 2, 3, 5, 4.]))
- df = pivot(one, two, three)
- self.assertEqual(df['a'][1], 1)
- self.assertEqual(df['b'][2], 2)
- self.assertEqual(df['c'][3], 3)
- self.assertEqual(df['d'][4], 5)
- self.assertEqual(df['e'][5], 4)
- assert_frame_equal(df, _slow_pivot(one, two, three))
-
- # weird overlap, TODO: test?
- a, b, c = (np.array([1, 2, 3, 4, 4]),
- np.array(['a', 'a', 'a', 'a', 'a']),
- np.array([1., 2., 3., 4., 5.]))
- self.assertRaises(Exception, pivot, a, b, c)
-
- # corner case, empty
- df = pivot(np.array([]), np.array([]), np.array([]))
-
-
-def test_monotonic():
- pos = np.array([1, 2, 3, 5])
-
- def _monotonic(arr):
- return not (arr[1:] < arr[:-1]).any()
-
- assert _monotonic(pos)
-
- neg = np.array([1, 2, 3, 4, 3])
-
- assert not _monotonic(neg)
-
- neg2 = np.array([5, 1, 2, 3, 4, 5])
-
- assert not _monotonic(neg2)
+ with catch_warnings(record=True):
+ from pandas.core.reshape import _slow_pivot
+
+ one, two, three = (np.array([1, 2, 3, 4, 5]),
+ np.array(['a', 'b', 'c', 'd', 'e']),
+ np.array([1, 2, 3, 5, 4.]))
+ df = pivot(one, two, three)
+ self.assertEqual(df['a'][1], 1)
+ self.assertEqual(df['b'][2], 2)
+ self.assertEqual(df['c'][3], 3)
+ self.assertEqual(df['d'][4], 5)
+ self.assertEqual(df['e'][5], 4)
+ assert_frame_equal(df, _slow_pivot(one, two, three))
+
+ # weird overlap, TODO: test?
+ a, b, c = (np.array([1, 2, 3, 4, 4]),
+ np.array(['a', 'a', 'a', 'a', 'a']),
+ np.array([1., 2., 3., 4., 5.]))
+ self.assertRaises(Exception, pivot, a, b, c)
+
+ # corner case, empty
+ df = pivot(np.array([]), np.array([]), np.array([]))
def test_panel_index():
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index ceb12c6c03074..5fc31e9321f31 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -10,8 +10,8 @@
from distutils.version import LooseVersion
import pandas as pd
-from pandas import (Series, DataFrame, Panel, bdate_range, isnull,
- notnull, concat, Timestamp)
+from pandas import (Series, DataFrame, bdate_range, isnull,
+ notnull, concat, Timestamp, Index)
import pandas.stats.moments as mom
import pandas.core.window as rwindow
import pandas.tseries.offsets as offsets
@@ -172,7 +172,7 @@ def test_agg_consistency(self):
tm.assert_index_equal(result, expected)
result = r['A'].agg([np.sum, np.mean]).columns
- expected = pd.Index(['sum', 'mean'])
+ expected = Index(['sum', 'mean'])
tm.assert_index_equal(result, expected)
result = r.agg({'A': [np.sum, np.mean]}).columns
@@ -1688,6 +1688,160 @@ def _check_ew_structures(self, func, name):
self.assertEqual(type(frame_result), DataFrame)
+class TestPairwise(object):
+
+ # GH 7738
+ df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]],
+ columns=['C', 'C']),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1., 0]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0., 1]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=['C', 1]),
+ DataFrame([[2., 4.], [1., 2.], [5., 2.], [8., 1.]],
+ columns=[1, 0.]),
+ DataFrame([[2, 4.], [1, 2.], [5, 2.], [8, 1.]],
+ columns=[0, 1.]),
+ DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.]],
+ columns=[1., 'X']), ]
+ df2 = DataFrame([[None, 1, 1], [None, 1, 2],
+ [None, 3, 2], [None, 8, 1]], columns=['Y', 'Z', 'X'])
+ s = Series([1, 1, 3, 8])
+
+ def compare(self, result, expected):
+
+ # since we have sorted the results
+ # we can only compare non-nans
+ result = result.dropna().values
+ expected = expected.dropna().values
+
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize('f', [lambda x: x.cov(), lambda x: x.corr()])
+ def test_no_flex(self, f):
+
+ # DataFrame methods (which do not call _flex_binary_moment())
+
+ results = [f(df) for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index, df.columns)
+ tm.assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x: x.expanding().cov(pairwise=True),
+ lambda x: x.expanding().corr(pairwise=True),
+ lambda x: x.rolling(window=3).cov(pairwise=True),
+ lambda x: x.rolling(window=3).corr(pairwise=True),
+ lambda x: x.ewm(com=3).cov(pairwise=True),
+ lambda x: x.ewm(com=3).corr(pairwise=True)])
+ def test_pairwise_with_self(self, f):
+
+ # DataFrame with itself, pairwise=True
+ results = [f(df) for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index.levels[0],
+ df.index,
+ check_names=False)
+ tm.assert_index_equal(result.index.levels[1],
+ df.columns,
+ check_names=False)
+ tm.assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x: x.expanding().cov(pairwise=False),
+ lambda x: x.expanding().corr(pairwise=False),
+ lambda x: x.rolling(window=3).cov(pairwise=False),
+ lambda x: x.rolling(window=3).corr(pairwise=False),
+ lambda x: x.ewm(com=3).cov(pairwise=False),
+ lambda x: x.ewm(com=3).corr(pairwise=False), ])
+ def test_no_pairwise_with_self(self, f):
+
+ # DataFrame with itself, pairwise=False
+ results = [f(df) for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index, df.index)
+ tm.assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x, y: x.expanding().cov(y, pairwise=True),
+ lambda x, y: x.expanding().corr(y, pairwise=True),
+ lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
+ lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
+ lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
+ lambda x, y: x.ewm(com=3).corr(y, pairwise=True), ])
+ def test_pairwise_with_other(self, f):
+
+ # DataFrame with another DataFrame, pairwise=True
+ results = [f(df, self.df2) for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index.levels[0],
+ df.index,
+ check_names=False)
+ tm.assert_index_equal(result.index.levels[1],
+ self.df2.columns,
+ check_names=False)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x, y: x.expanding().cov(y, pairwise=False),
+ lambda x, y: x.expanding().corr(y, pairwise=False),
+ lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
+ lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
+ lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
+ lambda x, y: x.ewm(com=3).corr(y, pairwise=False), ])
+ def test_no_pairwise_with_other(self, f):
+
+ # DataFrame with another DataFrame, pairwise=False
+ results = [f(df, self.df2) if df.columns.is_unique else None
+ for df in self.df1s]
+ for (df, result) in zip(self.df1s, results):
+ if result is not None:
+ with catch_warnings(record=True):
+ # we can have int and str columns
+ expected_index = df.index.union(self.df2.index)
+ expected_columns = df.columns.union(self.df2.columns)
+ tm.assert_index_equal(result.index, expected_index)
+ tm.assert_index_equal(result.columns, expected_columns)
+ else:
+ tm.assertRaisesRegexp(
+ ValueError, "'arg1' columns are not unique", f, df,
+ self.df2)
+ tm.assertRaisesRegexp(
+ ValueError, "'arg2' columns are not unique", f,
+ self.df2, df)
+
+ @pytest.mark.parametrize(
+ 'f', [lambda x, y: x.expanding().cov(y),
+ lambda x, y: x.expanding().corr(y),
+ lambda x, y: x.rolling(window=3).cov(y),
+ lambda x, y: x.rolling(window=3).corr(y),
+ lambda x, y: x.ewm(com=3).cov(y),
+ lambda x, y: x.ewm(com=3).corr(y), ])
+ def test_pairwise_with_series(self, f):
+
+ # DataFrame with a Series
+ results = ([f(df, self.s) for df in self.df1s] +
+ [f(self.s, df) for df in self.df1s])
+ for (df, result) in zip(self.df1s, results):
+ tm.assert_index_equal(result.index, df.index)
+ tm.assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.compare(result, results[0])
+
+
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
@@ -2083,21 +2237,6 @@ def test_expanding_consistency(self):
assert_equal(expanding_f_result,
expanding_apply_f_result)
- if (name in ['cov', 'corr']) and isinstance(x,
- DataFrame):
- # test pairwise=True
- expanding_f_result = expanding_f(x, pairwise=True)
- expected = Panel(items=x.index,
- major_axis=x.columns,
- minor_axis=x.columns)
- for i, _ in enumerate(x.columns):
- for j, _ in enumerate(x.columns):
- expected.iloc[:, i, j] = getattr(
- x.iloc[:, i].expanding(
- min_periods=min_periods),
- name)(x.iloc[:, j])
- tm.assert_panel_equal(expanding_f_result, expected)
-
@tm.slow
def test_rolling_consistency(self):
@@ -2203,25 +2342,6 @@ def cases():
assert_equal(rolling_f_result,
rolling_apply_f_result)
- if (name in ['cov', 'corr']) and isinstance(
- x, DataFrame):
- # test pairwise=True
- rolling_f_result = rolling_f(x,
- pairwise=True)
- expected = Panel(items=x.index,
- major_axis=x.columns,
- minor_axis=x.columns)
- for i, _ in enumerate(x.columns):
- for j, _ in enumerate(x.columns):
- expected.iloc[:, i, j] = (
- getattr(
- x.iloc[:, i]
- .rolling(window=window,
- min_periods=min_periods,
- center=center),
- name)(x.iloc[:, j]))
- tm.assert_panel_equal(rolling_f_result, expected)
-
# binary moments
def test_rolling_cov(self):
A = self.series
@@ -2257,11 +2377,11 @@ def _check_pairwise_moment(self, dispatch, name, **kwargs):
def get_result(obj, obj2=None):
return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)
- panel = get_result(self.frame)
- actual = panel.loc[:, 1, 5]
+ result = get_result(self.frame)
+ result = result.loc[(slice(None), 1), 5]
+ result.index = result.index.droplevel(1)
expected = get_result(self.frame[1], self.frame[5])
- tm.assert_series_equal(actual, expected, check_names=False)
- self.assertEqual(actual.name, 5)
+ tm.assert_series_equal(result, expected, check_names=False)
def test_flex_binary_moment(self):
# GH3155
@@ -2429,17 +2549,14 @@ def test_expanding_cov_pairwise(self):
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
- for i in result.items:
- tm.assert_almost_equal(result[i], rolling_result[i])
+ tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
-
- for i in result.items:
- tm.assert_almost_equal(result[i], rolling_result[i])
+ tm.assert_frame_equal(result, rolling_result)
def test_expanding_cov_diff_index(self):
# GH 7512
@@ -2507,8 +2624,6 @@ def test_rolling_functions_window_non_shrinkage(self):
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=['A', 'B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
- df_expected_panel = Panel(items=df.index, major_axis=df.columns,
- minor_axis=df.columns)
functions = [lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=False)),
@@ -2540,13 +2655,24 @@ def test_rolling_functions_window_non_shrinkage(self):
# scipy needed for rolling_window
continue
+ def test_rolling_functions_window_non_shrinkage_binary(self):
+
+ # corr/cov return a MI DataFrame
+ df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]],
+ columns=Index(['A', 'B'], name='foo'),
+ index=Index(range(4), name='bar'))
+ df_expected = DataFrame(
+ columns=Index(['A', 'B'], name='foo'),
+ index=pd.MultiIndex.from_product([df.index, df.columns],
+ names=['bar', 'foo']),
+ dtype='float64')
functions = [lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True))]
for f in functions:
- df_result_panel = f(df)
- tm.assert_panel_equal(df_result_panel, df_expected_panel)
+ df_result = f(df)
+ tm.assert_frame_equal(df_result, df_expected)
def test_moment_functions_zero_length(self):
# GH 8056
@@ -2554,13 +2680,9 @@ def test_moment_functions_zero_length(self):
s_expected = s
df1 = DataFrame()
df1_expected = df1
- df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns,
- minor_axis=df1.columns)
df2 = DataFrame(columns=['a'])
df2['a'] = df2['a'].astype('float64')
df2_expected = df2
- df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns,
- minor_axis=df2.columns)
functions = [lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(
@@ -2613,6 +2735,23 @@ def test_moment_functions_zero_length(self):
# scipy needed for rolling_window
continue
+ def test_moment_functions_zero_length_pairwise(self):
+
+ df1 = DataFrame()
+ df1_expected = df1
+ df2 = DataFrame(columns=Index(['a'], name='foo'),
+ index=Index([], name='bar'))
+ df2['a'] = df2['a'].astype('float64')
+
+ df1_expected = DataFrame(
+ index=pd.MultiIndex.from_product([df1.index, df1.columns]),
+ columns=Index([]))
+ df2_expected = DataFrame(
+ index=pd.MultiIndex.from_product([df2.index, df2.columns],
+ names=['bar', 'foo']),
+ columns=Index(['a'], name='foo'),
+ dtype='float64')
+
functions = [lambda x: (x.expanding(min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5)
@@ -2623,24 +2762,33 @@ def test_moment_functions_zero_length(self):
.corr(x, pairwise=True)),
]
for f in functions:
- df1_result_panel = f(df1)
- tm.assert_panel_equal(df1_result_panel, df1_expected_panel)
+ df1_result = f(df1)
+ tm.assert_frame_equal(df1_result, df1_expected)
- df2_result_panel = f(df2)
- tm.assert_panel_equal(df2_result_panel, df2_expected_panel)
+ df2_result = f(df2)
+ tm.assert_frame_equal(df2_result, df2_expected)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
- df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=['A', 'B'])
- df1a = DataFrame([[1, 5], [3, 9]], index=[0, 2], columns=['A', 'B'])
- df2 = DataFrame([[5, 6], [None, None], [2, 1]], columns=['X', 'Y'])
- df2a = DataFrame([[5, 6], [2, 1]], index=[0, 2], columns=['X', 'Y'])
- result1 = df1.expanding().cov(df2a, pairwise=True)[2]
- result2 = df1.expanding().cov(df2a, pairwise=True)[2]
- result3 = df1a.expanding().cov(df2, pairwise=True)[2]
- result4 = df1a.expanding().cov(df2a, pairwise=True)[2]
- expected = DataFrame([[-3., -5.], [-6., -10.]], index=['A', 'B'],
- columns=['X', 'Y'])
+ df1 = DataFrame([[1, 5], [3, 2], [3, 9]],
+ columns=Index(['A', 'B'], name='foo'))
+ df1a = DataFrame([[1, 5], [3, 9]],
+ index=[0, 2],
+ columns=Index(['A', 'B'], name='foo'))
+ df2 = DataFrame([[5, 6], [None, None], [2, 1]],
+ columns=Index(['X', 'Y'], name='foo'))
+ df2a = DataFrame([[5, 6], [2, 1]],
+ index=[0, 2],
+ columns=Index(['X', 'Y'], name='foo'))
+ # TODO: xref gh-15826
+ # .loc is not preserving the names
+ result1 = df1.expanding().cov(df2a, pairwise=True).loc[2]
+ result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
+ result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
+ result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
+ expected = DataFrame([[-3.0, -6.0], [-5.0, -10.0]],
+ columns=Index(['A', 'B'], name='foo'),
+ index=Index(['X', 'Y'], name='foo'))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
@@ -2648,149 +2796,30 @@ def test_expanding_cov_pairwise_diff_length(self):
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
- df1 = DataFrame([[1, 2], [3, 2], [3, 4]], columns=['A', 'B'])
- df1a = DataFrame([[1, 2], [3, 4]], index=[0, 2], columns=['A', 'B'])
- df2 = DataFrame([[5, 6], [None, None], [2, 1]], columns=['X', 'Y'])
- df2a = DataFrame([[5, 6], [2, 1]], index=[0, 2], columns=['X', 'Y'])
- result1 = df1.expanding().corr(df2, pairwise=True)[2]
- result2 = df1.expanding().corr(df2a, pairwise=True)[2]
- result3 = df1a.expanding().corr(df2, pairwise=True)[2]
- result4 = df1a.expanding().corr(df2a, pairwise=True)[2]
- expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]], index=['A', 'B'],
- columns=['X', 'Y'])
+ df1 = DataFrame([[1, 2], [3, 2], [3, 4]],
+ columns=['A', 'B'],
+ index=Index(range(3), name='bar'))
+ df1a = DataFrame([[1, 2], [3, 4]],
+ index=Index([0, 2], name='bar'),
+ columns=['A', 'B'])
+ df2 = DataFrame([[5, 6], [None, None], [2, 1]],
+ columns=['X', 'Y'],
+ index=Index(range(3), name='bar'))
+ df2a = DataFrame([[5, 6], [2, 1]],
+ index=Index([0, 2], name='bar'),
+ columns=['X', 'Y'])
+ result1 = df1.expanding().corr(df2, pairwise=True).loc[2]
+ result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]
+ result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]
+ result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]
+ expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]],
+ columns=['A', 'B'],
+ index=Index(['X', 'Y']))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
- def test_pairwise_stats_column_names_order(self):
- # GH 7738
- df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]],
- columns=['C', 'C']),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1., 0]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0., 1]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=['C', 1]),
- DataFrame([[2., 4.], [1., 2.], [5., 2.], [8., 1.]],
- columns=[1, 0.]),
- DataFrame([[2, 4.], [1, 2.], [5, 2.], [8, 1.]],
- columns=[0, 1.]),
- DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.]],
- columns=[1., 'X']), ]
- df2 = DataFrame([[None, 1, 1], [None, 1, 2],
- [None, 3, 2], [None, 8, 1]], columns=['Y', 'Z', 'X'])
- s = Series([1, 1, 3, 8])
-
- # suppress warnings about incomparable objects, as we are deliberately
- # testing with such column labels
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore",
- message=".*incomparable objects.*",
- category=RuntimeWarning)
-
- # DataFrame methods (which do not call _flex_binary_moment())
- for f in [lambda x: x.cov(), lambda x: x.corr(), ]:
- results = [f(df) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.index, df.columns)
- tm.assert_index_equal(result.columns, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- # compare internal values, as columns can be different
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
- # DataFrame with itself, pairwise=True
- for f in [lambda x: x.expanding().cov(pairwise=True),
- lambda x: x.expanding().corr(pairwise=True),
- lambda x: x.rolling(window=3).cov(pairwise=True),
- lambda x: x.rolling(window=3).corr(pairwise=True),
- lambda x: x.ewm(com=3).cov(pairwise=True),
- lambda x: x.ewm(com=3).corr(pairwise=True), ]:
- results = [f(df) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.items, df.index)
- tm.assert_index_equal(result.major_axis, df.columns)
- tm.assert_index_equal(result.minor_axis, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
- # DataFrame with itself, pairwise=False
- for f in [lambda x: x.expanding().cov(pairwise=False),
- lambda x: x.expanding().corr(pairwise=False),
- lambda x: x.rolling(window=3).cov(pairwise=False),
- lambda x: x.rolling(window=3).corr(pairwise=False),
- lambda x: x.ewm(com=3).cov(pairwise=False),
- lambda x: x.ewm(com=3).corr(pairwise=False), ]:
- results = [f(df) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.index, df.index)
- tm.assert_index_equal(result.columns, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
- # DataFrame with another DataFrame, pairwise=True
- for f in [lambda x, y: x.expanding().cov(y, pairwise=True),
- lambda x, y: x.expanding().corr(y, pairwise=True),
- lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
- lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
- lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
- lambda x, y: x.ewm(com=3).corr(y, pairwise=True), ]:
- results = [f(df, df2) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.items, df.index)
- tm.assert_index_equal(result.major_axis, df.columns)
- tm.assert_index_equal(result.minor_axis, df2.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
- # DataFrame with another DataFrame, pairwise=False
- for f in [lambda x, y: x.expanding().cov(y, pairwise=False),
- lambda x, y: x.expanding().corr(y, pairwise=False),
- lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
- lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
- lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
- lambda x, y: x.ewm(com=3).corr(y, pairwise=False), ]:
- results = [f(df, df2) if df.columns.is_unique else None
- for df in df1s]
- for (df, result) in zip(df1s, results):
- if result is not None:
- expected_index = df.index.union(df2.index)
- expected_columns = df.columns.union(df2.columns)
- tm.assert_index_equal(result.index, expected_index)
- tm.assert_index_equal(result.columns, expected_columns)
- else:
- tm.assertRaisesRegexp(
- ValueError, "'arg1' columns are not unique", f, df,
- df2)
- tm.assertRaisesRegexp(
- ValueError, "'arg2' columns are not unique", f,
- df2, df)
-
- # DataFrame with a Series
- for f in [lambda x, y: x.expanding().cov(y),
- lambda x, y: x.expanding().corr(y),
- lambda x, y: x.rolling(window=3).cov(y),
- lambda x, y: x.rolling(window=3).corr(y),
- lambda x, y: x.ewm(com=3).cov(y),
- lambda x, y: x.ewm(com=3).corr(y), ]:
- results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
- for (df, result) in zip(df1s, results):
- tm.assert_index_equal(result.index, df.index)
- tm.assert_index_equal(result.columns, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equal(result.values,
- results[0].values)
-
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
diff --git a/pandas/tests/tools/test_concat.py b/pandas/tests/tools/test_concat.py
index c41924a7987bd..623c5fa02fcb2 100644
--- a/pandas/tests/tools/test_concat.py
+++ b/pandas/tests/tools/test_concat.py
@@ -1,4 +1,5 @@
from warnings import catch_warnings
+
import numpy as np
from numpy.random import randn
@@ -1283,8 +1284,9 @@ def test_concat_mixed_objs(self):
assert_frame_equal(result, expected)
# invalid concatente of mixed dims
- panel = tm.makePanel()
- self.assertRaises(ValueError, lambda: concat([panel, s1], axis=1))
+ with catch_warnings(record=True):
+ panel = tm.makePanel()
+ self.assertRaises(ValueError, lambda: concat([panel, s1], axis=1))
def test_empty_dtype_coerce(self):
@@ -1322,56 +1324,59 @@ def test_dtype_coerceion(self):
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_panel_concat_other_axes(self):
- panel = tm.makePanel()
+ with catch_warnings(record=True):
+ panel = tm.makePanel()
- p1 = panel.iloc[:, :5, :]
- p2 = panel.iloc[:, 5:, :]
+ p1 = panel.iloc[:, :5, :]
+ p2 = panel.iloc[:, 5:, :]
- result = concat([p1, p2], axis=1)
- tm.assert_panel_equal(result, panel)
+ result = concat([p1, p2], axis=1)
+ tm.assert_panel_equal(result, panel)
- p1 = panel.iloc[:, :, :2]
- p2 = panel.iloc[:, :, 2:]
+ p1 = panel.iloc[:, :, :2]
+ p2 = panel.iloc[:, :, 2:]
- result = concat([p1, p2], axis=2)
- tm.assert_panel_equal(result, panel)
+ result = concat([p1, p2], axis=2)
+ tm.assert_panel_equal(result, panel)
- # if things are a bit misbehaved
- p1 = panel.iloc[:2, :, :2]
- p2 = panel.iloc[:, :, 2:]
- p1['ItemC'] = 'baz'
+ # if things are a bit misbehaved
+ p1 = panel.iloc[:2, :, :2]
+ p2 = panel.iloc[:, :, 2:]
+ p1['ItemC'] = 'baz'
- result = concat([p1, p2], axis=2)
+ result = concat([p1, p2], axis=2)
- expected = panel.copy()
- expected['ItemC'] = expected['ItemC'].astype('O')
- expected.loc['ItemC', :, :2] = 'baz'
- tm.assert_panel_equal(result, expected)
+ expected = panel.copy()
+ expected['ItemC'] = expected['ItemC'].astype('O')
+ expected.loc['ItemC', :, :2] = 'baz'
+ tm.assert_panel_equal(result, expected)
def test_panel_concat_buglet(self):
- # #2257
- def make_panel():
- index = 5
- cols = 3
-
- def df():
- return DataFrame(np.random.randn(index, cols),
- index=["I%s" % i for i in range(index)],
- columns=["C%s" % i for i in range(cols)])
- return Panel(dict([("Item%s" % x, df()) for x in ['A', 'B', 'C']]))
-
- panel1 = make_panel()
- panel2 = make_panel()
-
- panel2 = panel2.rename_axis(dict([(x, "%s_1" % x)
- for x in panel2.major_axis]),
- axis=1)
-
- panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1)
- panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2)
-
- # it works!
- concat([panel1, panel3], axis=1, verify_integrity=True)
+ with catch_warnings(record=True):
+ # #2257
+ def make_panel():
+ index = 5
+ cols = 3
+
+ def df():
+ return DataFrame(np.random.randn(index, cols),
+ index=["I%s" % i for i in range(index)],
+ columns=["C%s" % i for i in range(cols)])
+ return Panel(dict([("Item%s" % x, df())
+ for x in ['A', 'B', 'C']]))
+
+ panel1 = make_panel()
+ panel2 = make_panel()
+
+ panel2 = panel2.rename_axis(dict([(x, "%s_1" % x)
+ for x in panel2.major_axis]),
+ axis=1)
+
+ panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1)
+ panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2)
+
+ # it works!
+ concat([panel1, panel3], axis=1, verify_integrity=True)
def test_panel4d_concat(self):
with catch_warnings(record=True):
diff --git a/pandas/tests/types/test_missing.py b/pandas/tests/types/test_missing.py
index 2e35f5c1badbb..efd6dda02ab4b 100644
--- a/pandas/tests/types/test_missing.py
+++ b/pandas/tests/types/test_missing.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
+from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
@@ -76,14 +77,15 @@ def test_isnull(self):
tm.assert_frame_equal(result, expected)
# panel
- for p in [tm.makePanel(), tm.makePeriodPanel(),
- tm.add_nans(tm.makePanel())]:
- result = isnull(p)
- expected = p.apply(isnull)
- tm.assert_panel_equal(result, expected)
+ with catch_warnings(record=True):
+ for p in [tm.makePanel(), tm.makePeriodPanel(),
+ tm.add_nans(tm.makePanel())]:
+ result = isnull(p)
+ expected = p.apply(isnull)
+ tm.assert_panel_equal(result, expected)
# panel 4d
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 9a9f3c6c6b945..9d7b004374318 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1747,8 +1747,10 @@ def makePeriodPanel(nper=None):
def makePanel4D(nper=None):
- return Panel4D(dict(l1=makePanel(nper), l2=makePanel(nper),
- l3=makePanel(nper)))
+ with warnings.catch_warnings(record=True):
+ d = dict(l1=makePanel(nper), l2=makePanel(nper),
+ l3=makePanel(nper))
+ return Panel4D(d)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
| closes #13563
going to be on top of #15677
Partially addresses https://github.com/pandas-dev/pandas/issues/14565#issuecomment-275746292
Note this is currently a ``FutureWarning`` mainly to fail the tests if I missed anything. Intention is to change to a ``DeprecationWarning``. | https://api.github.com/repos/pandas-dev/pandas/pulls/15601 | 2017-03-07T16:27:43Z | 2017-04-07T19:10:25Z | null | 2018-10-28T08:20:26Z |
Parallelize doc build | diff --git a/doc/make.py b/doc/make.py
index 8a6d4e5df24f0..a2f5be5594e44 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -197,7 +197,7 @@ def html():
print(e)
print("Failed to convert %s" % nb)
- if os.system('sphinx-build -P -b html -d build/doctrees '
+ if os.system('sphinx-build -j 2 -P -b html -d build/doctrees '
'source build/html'):
raise SystemExit("Building HTML failed.")
try:
@@ -222,7 +222,7 @@ def latex():
check_build()
if sys.platform != 'win32':
# LaTeX format.
- if os.system('sphinx-build -b latex -d build/doctrees '
+ if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
@@ -245,7 +245,7 @@ def latex_forced():
check_build()
if sys.platform != 'win32':
# LaTeX format.
- if os.system('sphinx-build -b latex -d build/doctrees '
+ if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
diff --git a/doc/source/io.rst b/doc/source/io.rst
index c7a68a0fe9fbb..fa57d6d692152 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3758,7 +3758,7 @@ be data_columns
# on-disk operations
store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2'])
- store.select('df_dc', [ pd.Term('B>0') ])
+ store.select('df_dc', where='B>0')
# getting creative
store.select('df_dc', 'B > 0 & C > 0 & string == foo')
@@ -4352,6 +4352,9 @@ HDFStore supports ``Panel4D`` storage.
.. ipython:: python
:okwarning:
+ wp = pd.Panel(randn(2, 5, 4), items=['Item1', 'Item2'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B', 'C', 'D'])
p4d = pd.Panel4D({ 'l1' : wp })
p4d
store.append('p4d', p4d)
@@ -4368,8 +4371,7 @@ object). This cannot be changed after table creation.
:okwarning:
store.append('p4d2', p4d, axes=['labels', 'major_axis', 'minor_axis'])
- store
- store.select('p4d2', [ pd.Term('labels=l1'), pd.Term('items=Item1'), pd.Term('minor_axis=A_big_strings') ])
+ store.select('p4d2', where='labels=l1 and items=Item1 and minor_axis=A')
.. ipython:: python
:suppress:
| closes #15591
https://travis-ci.org/jreback/pandas/builds/208594369
a couple of minutes faster with -j 2.
fixes some deprecated use of ``pd.Term``.
Note there is another error in HDFStore build, though I can't repro it locally. | https://api.github.com/repos/pandas-dev/pandas/pulls/15600 | 2017-03-07T14:29:18Z | 2017-03-07T14:31:01Z | null | 2017-03-07T14:43:48Z |
DOC: remove Panel4D from the API docs #15579 | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 33ac5fde651d4..fbce64df84859 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1237,58 +1237,7 @@ Serialization / IO / Conversion
Panel.to_frame
Panel.to_xarray
Panel.to_clipboard
-
-.. _api.panel4d:
-
-Panel4D
--------
-
-Constructor
-~~~~~~~~~~~
-.. autosummary::
- :toctree: generated/
-
- Panel4D
-
-Serialization / IO / Conversion
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. autosummary::
- :toctree: generated/
-
- Panel4D.to_xarray
-
-Attributes and underlying data
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-**Axes**
-
- * **labels**: axis 1; each label corresponds to a Panel contained inside
- * **items**: axis 2; each item corresponds to a DataFrame contained inside
- * **major_axis**: axis 3; the index (rows) of each of the DataFrames
- * **minor_axis**: axis 4; the columns of each of the DataFrames
-
-.. autosummary::
- :toctree: generated/
-
- Panel4D.values
- Panel4D.axes
- Panel4D.ndim
- Panel4D.size
- Panel4D.shape
- Panel4D.dtypes
- Panel4D.ftypes
- Panel4D.get_dtype_counts
- Panel4D.get_ftype_counts
-
-Conversion
-~~~~~~~~~~
-.. autosummary::
- :toctree: generated/
-
- Panel4D.astype
- Panel4D.copy
- Panel4D.isnull
- Panel4D.notnull
-
+
.. _api.index:
Index
diff --git a/scripts/api_rst_coverage.py b/scripts/api_rst_coverage.py
index cc456f03c02ec..6bb5383509be6 100644
--- a/scripts/api_rst_coverage.py
+++ b/scripts/api_rst_coverage.py
@@ -4,11 +4,11 @@
def main():
# classes whose members to check
- classes = [pd.Series, pd.DataFrame, pd.Panel, pd.Panel4D]
+ classes = [pd.Series, pd.DataFrame, pd.Panel]
def class_name_sort_key(x):
if x.startswith('Series'):
- # make sure Series precedes DataFrame, Panel, and Panel4D
+ # make sure Series precedes DataFrame, and Panel.
return ' ' + x
else:
return x
| Hi this is my first PR to Pandas
I'm pretty sure this is what @jorisvandenbossche was getting at on ticket <a href='https://github.com/pandas-dev/pandas/issues/15579'>#15579<a/>.
I noticed that <a href='https://github.com/pandas-dev/pandas/blob/be4a63fe791e27c2f8a9ae4f3a419ccc255c1b5b/pandas/tests/test_expressions.py#L206'>tests</a> for panel4D also still exist, is that something that should remain?
Thanks!
| https://api.github.com/repos/pandas-dev/pandas/pulls/15598 | 2017-03-07T07:02:55Z | 2017-03-07T07:41:31Z | 2017-03-07T07:41:31Z | 2017-03-07T07:51:22Z |
DOC: remove wakari.io section | diff --git a/doc/source/install.rst b/doc/source/install.rst
index 8b0fec6a3dac3..fe2a9fa4ba509 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -23,18 +23,6 @@ Officially Python 2.7, 3.4, 3.5, and 3.6
Installing pandas
-----------------
-Trying out pandas, no installation required!
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The easiest way to start experimenting with pandas doesn't involve installing
-pandas at all.
-
-`Wakari <https://wakari.io>`__ is a free service that provides a hosted
-`IPython Notebook <http://ipython.org/notebook.html>`__ service in the cloud.
-
-Simply create an account, and have access to pandas from within your brower via
-an `IPython Notebook <http://ipython.org/notebook.html>`__ in a few minutes.
-
.. _install.anaconda:
Installing pandas with Anaconda
| - [x] closes #15595
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15596 | 2017-03-06T21:46:49Z | 2017-03-06T22:15:10Z | 2017-03-06T22:15:10Z | 2017-03-06T23:56:59Z |
BUG: upcasting on reshaping ops #13247 | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index f6d5e3df814fc..cc13f39a47d5a 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -886,3 +886,5 @@ Bug Fixes
- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
- Bug in ``pd.read_msgpack`` which did not allow to load dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
+
+- Concating multiple objects will no longer result in automatically upcast to `float64`, and instead try to find the smallest `dtype` that would suffice (:issue:`13247`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index aa954fbee9a60..1c070b3ed34a9 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -21,6 +21,7 @@
is_datetime64tz_dtype,
is_object_dtype,
is_datetimelike_v_numeric,
+ is_float_dtype, is_numeric_dtype,
is_numeric_v_string_like, is_extension_type,
is_list_like,
is_re,
@@ -4522,6 +4523,8 @@ def _interleaved_dtype(blocks):
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
+ elif have_int and have_float and not have_complex:
+ return np.dtype('float64')
elif have_complex:
return np.dtype('c16')
else:
@@ -4891,6 +4894,8 @@ def get_empty_dtype_and_na(join_units):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
+ elif is_float_dtype(dtype) or is_numeric_dtype(dtype):
+ upcast_cls = dtype.name
else:
upcast_cls = 'float'
@@ -4915,8 +4920,6 @@ def get_empty_dtype_and_na(join_units):
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
- elif 'float' in upcast_classes:
- return np.dtype(np.float64), np.nan
elif 'datetimetz' in upcast_classes:
dtype = upcast_classes['datetimetz']
return dtype[0], tslib.iNaT
@@ -4925,7 +4928,17 @@ def get_empty_dtype_and_na(join_units):
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
- raise AssertionError("invalid dtype determination in get_concat_dtype")
+ g = np.find_common_type(upcast_classes, [])
+ if is_float_dtype(g):
+ return g, g.type(np.nan)
+ elif is_numeric_dtype(g):
+ if has_none_blocks:
+ return np.float64, np.nan
+ else:
+ return g, None
+ else:
+ msg = "invalid dtype determination in get_concat_dtype"
+ raise AssertionError(msg)
def concatenate_join_units(join_units, concat_axis, copy):
@@ -5190,7 +5203,6 @@ def is_null(self):
return True
def get_reindexed_values(self, empty_dtype, upcasted_na):
-
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index a00f880ff6591..b92ffbfb6fe59 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -210,7 +210,7 @@ def f():
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
- dtype='float64')
+ dtype='object')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 5ab2bbc4ac6ba..df5e843097514 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -651,7 +651,7 @@ def test_interleave(self):
mgr = create_mgr('a: f8; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'f8')
mgr = create_mgr('a: f4; b: i8')
- self.assertEqual(mgr.as_matrix().dtype, 'f4')
+ self.assertEqual(mgr.as_matrix().dtype, 'f8')
mgr = create_mgr('a: f4; b: i8; d: object')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: bool; b: i8')
diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py
index d587e4ea6a1fa..24e26be15a44b 100644
--- a/pandas/tests/test_reshape.py
+++ b/pandas/tests/test_reshape.py
@@ -250,6 +250,7 @@ def test_basic_types(self):
self.assertEqual(type(r), exp_df_type)
r = get_dummies(s_df, sparse=self.sparse, columns=['a'])
+ exp_blk_type = pd.core.internals.IntBlock
self.assertEqual(type(r[['a_0']]._data.blocks[0]), exp_blk_type)
self.assertEqual(type(r[['a_1']]._data.blocks[0]), exp_blk_type)
self.assertEqual(type(r[['a_2']]._data.blocks[0]), exp_blk_type)
diff --git a/pandas/tests/tools/test_concat.py b/pandas/tests/tools/test_concat.py
index a2b5773f551c9..a0b22892e74c5 100644
--- a/pandas/tests/tools/test_concat.py
+++ b/pandas/tests/tools/test_concat.py
@@ -13,6 +13,8 @@
makeCustomDataframe as mkdf,
assert_almost_equal)
+import pytest
+
class ConcatenateBase(tm.TestCase):
@@ -1899,3 +1901,15 @@ def test_concat_multiindex_dfs_with_deepcopy(self):
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=['testname'])
tm.assert_frame_equal(result_no_copy, expected)
+
+
+@pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel])
+@pytest.mark.parametrize('dt', np.sctypes['float'])
+def test_concat_no_unnecessary_upcast(dt, pdt):
+ # GH 13247
+ dims = pdt().ndim
+ dfs = [pdt(np.array([1], dtype=dt, ndmin=dims)),
+ pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
+ pdt(np.array([5], dtype=dt, ndmin=dims))]
+ x = pd.concat(dfs)
+ assert x.values.dtype == dt
| Only rebasing and fixing the merge conflicts
Original work done by: @jennolsen84
Original branch: https://github.com/jennolsen84/pandas/tree/concatnan
Original PR: #13337
- [x] closes #13247
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15594 | 2017-03-06T21:13:02Z | 2017-03-14T13:33:58Z | null | 2017-03-20T21:21:56Z |
Moved freeze_panes validation to io/excel.py (#15160) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b3e43edc3eb55..15179ac321076 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1431,24 +1431,12 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
inf_rep=inf_rep)
formatted_cells = formatter.get_formatted_cells()
- freeze_panes = self._validate_freeze_panes(freeze_panes)
excel_writer.write_cells(formatted_cells, sheet_name,
startrow=startrow, startcol=startcol,
freeze_panes=freeze_panes)
if need_save:
excel_writer.save()
- def _validate_freeze_panes(self, freeze_panes):
- if freeze_panes is not None:
- if (
- len(freeze_panes) == 2 and
- all(isinstance(item, int) for item in freeze_panes)
- ):
- return freeze_panes
-
- raise ValueError("freeze_panes must be of form (row, column)"
- " where row and column are integers")
-
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 37a61b7dc9ab5..00ec8bcf060ef 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -543,6 +543,22 @@ def __exit__(self, exc_type, exc_value, traceback):
self.close()
+def _validate_freeze_panes(freeze_panes):
+ if freeze_panes is not None:
+ if (
+ len(freeze_panes) == 2 and
+ all(isinstance(item, int) for item in freeze_panes)
+ ):
+ return True
+
+ raise ValueError("freeze_panes must be of form (row, column)"
+ " where row and column are integers")
+
+ # freeze_panes wasn't specified, return False so it won't be applied
+ # to output sheet
+ return False
+
+
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
@@ -1330,7 +1346,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
wks.title = sheet_name
self.sheets[sheet_name] = wks
- if freeze_panes is not None:
+ if _validate_freeze_panes(freeze_panes):
wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1,
column=freeze_panes[1] + 1)
@@ -1418,7 +1434,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
wks = self.book.add_sheet(sheet_name)
self.sheets[sheet_name] = wks
- if freeze_panes is not None:
+ if _validate_freeze_panes(freeze_panes):
wks.set_panes_frozen(True)
wks.set_horz_split_pos(freeze_panes[0])
wks.set_vert_split_pos(freeze_panes[1])
@@ -1550,7 +1566,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
style_dict = {}
- if freeze_panes is not None:
+ if _validate_freeze_panes(freeze_panes):
wks.freeze_panes(*(freeze_panes))
for cell in cells:
| - follow up to #15160
- [x] tests added / passed - Existing test suite passed
- [x] passes ``git diff upstream/master | flake8 --diff``
| https://api.github.com/repos/pandas-dev/pandas/pulls/15592 | 2017-03-06T14:51:06Z | 2017-03-07T13:22:10Z | null | 2017-03-07T13:22:10Z |
API: return Index instead of array from DatetimeIndex field accessors (GH15022) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 5ac7624856040..61ec609cd57b2 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -471,6 +471,40 @@ New Behavior:
s.map(lambda x: x.hour)
+
+.. _whatsnew_0200.api_breaking.index_dt_field:
+
+Accessing datetime fields of Index now return Index
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The several datetime-related attributes (see :ref:`here <timeseries.components>`
+for an overview) of ``DatetimeIndex``, ``PeriodIndex`` and ``TimedeltaIndex`` previously
+returned numpy arrays, now they will return a new Index object (:issue:`15022`).
+Only in the case of a boolean field, a the return value is still a boolean array
+instead of an Index (to support boolean indexing).
+
+Previous behaviour:
+
+.. code-block:: ipython
+
+ In [1]: idx = pd.date_range("2015-01-01", periods=5, freq='10H')
+
+ In [2]: idx.hour
+ Out[2]: array([ 0, 10, 20, 6, 16], dtype=int32)
+
+New Behavior:
+
+.. ipython:: python
+
+ idx = pd.date_range("2015-01-01", periods=5, freq='10H')
+ idx.hour
+
+This has the advantage that specific Index methods are still available on the
+result. On the other hand, this might have backward incompatibilities: e.g.
+compared to numpy arrays, Index objects are not mutable (values cannot be set
+by indexing). To get the original result, you can convert to a numpy array
+explicitly using ``np.asarray(idx.hour)``.
+
.. _whatsnew_0200.api_breaking.s3:
S3 File Handling
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index e99f1d46637c2..ef24c493f5090 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -172,6 +172,7 @@ def test_normalize(self):
class TestDatetime64(tm.TestCase):
def test_datetimeindex_accessors(self):
+
dti_naive = DatetimeIndex(freq='D', start=datetime(1998, 1, 1),
periods=365)
# GH 13303
@@ -255,6 +256,34 @@ def test_datetimeindex_accessors(self):
self.assertEqual(len(dti.is_year_end), 365)
self.assertEqual(len(dti.weekday_name), 365)
+ dti.name = 'name'
+
+ # non boolean accessors -> return Index
+ for accessor in ['year', 'month', 'day', 'hour', 'minute',
+ 'second', 'microsecond', 'nanosecond',
+ 'dayofweek', 'dayofyear', 'weekofyear',
+ 'quarter', 'weekday_name']:
+ res = getattr(dti, accessor)
+ assert len(res) == 365
+ assert isinstance(res, Index)
+ assert res.name == 'name'
+
+ # boolean accessors -> return array
+ for accessor in ['is_month_start', 'is_month_end',
+ 'is_quarter_start', 'is_quarter_end',
+ 'is_year_start', 'is_year_end']:
+ res = getattr(dti, accessor)
+ assert len(res) == 365
+ assert isinstance(res, np.ndarray)
+
+ # test boolean indexing
+ res = dti[dti.is_quarter_start]
+ exp = dti[[0, 90, 181, 273]]
+ tm.assert_index_equal(res, exp)
+ res = dti[dti.is_leap_year]
+ exp = DatetimeIndex([], freq='D', tz=dti.tz, name='name')
+ tm.assert_index_equal(res, exp)
+
dti = DatetimeIndex(freq='BQ-FEB', start=datetime(1998, 1, 1),
periods=4)
@@ -313,5 +342,5 @@ def test_datetimeindex_accessors(self):
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
- self.assert_numpy_array_equal(dti.nanosecond,
- np.arange(10, dtype=np.int32))
+ self.assert_index_equal(dti.nanosecond,
+ pd.Index(np.arange(10, dtype=np.int64)))
diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py
index f13a84f4f0e92..ab70ad59846e8 100644
--- a/pandas/tests/indexes/period/test_construction.py
+++ b/pandas/tests/indexes/period/test_construction.py
@@ -91,8 +91,8 @@ def test_constructor_arrays_negative_year(self):
pindex = PeriodIndex(year=years, quarter=quarters)
- self.assert_numpy_array_equal(pindex.year, years)
- self.assert_numpy_array_equal(pindex.quarter, quarters)
+ self.assert_index_equal(pindex.year, pd.Index(years))
+ self.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004),
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 4fbadfca06ede..6a6c0ab49b15d 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -658,12 +658,12 @@ def test_negative_ordinals(self):
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
- '2012-03', '2012-04'], freq='D')
+ '2012-03', '2012-04'], freq='D', name='name')
- exp = np.array([2011, 2011, -1, 2012, 2012], dtype=np.int64)
- self.assert_numpy_array_equal(idx.year, exp)
- exp = np.array([1, 2, -1, 3, 4], dtype=np.int64)
- self.assert_numpy_array_equal(idx.month, exp)
+ exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name='name')
+ self.assert_index_equal(idx.year, exp)
+ exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name='name')
+ self.assert_index_equal(idx.month, exp)
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 4c8571e4f08f9..3abc2d8422fd3 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -424,7 +424,7 @@ def test_total_seconds(self):
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
- tm.assert_almost_equal(rng.total_seconds(), np.array(expt))
+ tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
s = Series(rng)
@@ -486,16 +486,16 @@ def test_append_numpy_bug_1681(self):
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
- self.assert_numpy_array_equal(rng.days, np.array(
- [1, 1], dtype='int64'))
- self.assert_numpy_array_equal(
+ self.assert_index_equal(rng.days, Index([1, 1], dtype='int64'))
+ self.assert_index_equal(
rng.seconds,
- np.array([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],
- dtype='int64'))
- self.assert_numpy_array_equal(rng.microseconds, np.array(
- [100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))
- self.assert_numpy_array_equal(rng.nanoseconds, np.array(
- [456, 456], dtype='int64'))
+ Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],
+ dtype='int64'))
+ self.assert_index_equal(
+ rng.microseconds,
+ Index([100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))
+ self.assert_index_equal(rng.nanoseconds,
+ Index([456, 456], dtype='int64'))
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
@@ -509,6 +509,10 @@ def test_fields(self):
tm.assert_series_equal(s.dt.seconds, Series(
[10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]))
+ # preserve name (GH15589)
+ rng.name = 'name'
+ assert rng.days.name == 'name'
+
def test_freq_conversion(self):
# doc example
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index 082f0fa9c40d5..bbf33c4db5ad7 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -597,9 +597,20 @@ def test_nat_fields(self):
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
+ # non boolean fields
fields = ['year', 'quarter', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond', 'week', 'dayofyear',
- 'days_in_month', 'is_leap_year']
+ 'days_in_month']
+
+ for field in fields:
+ result = getattr(idx, field)
+ expected = [getattr(x, field) for x in idx]
+ self.assert_index_equal(result, pd.Index(expected))
+
+ # boolean fields
+ fields = ['is_leap_year']
+ # other boolean fields like 'is_month_start' and 'is_month_end'
+ # not yet supported by NaT
for field in fields:
result = getattr(idx, field)
diff --git a/pandas/tests/tools/test_pivot.py b/pandas/tests/tools/test_pivot.py
index 62863372dbd02..4502f232c6d9c 100644
--- a/pandas/tests/tools/test_pivot.py
+++ b/pandas/tests/tools/test_pivot.py
@@ -1367,7 +1367,7 @@ def test_daily(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
annual = pivot_annual(ts, 'D')
- doy = ts.index.dayofyear
+ doy = np.asarray(ts.index.dayofyear)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
doy[(~isleapyear(ts.index.year)) & (doy >= 60)] += 1
diff --git a/pandas/tests/tools/test_util.py b/pandas/tests/tools/test_util.py
index 2672db13a959f..ed64e8f42d84b 100644
--- a/pandas/tests/tools/test_util.py
+++ b/pandas/tests/tools/test_util.py
@@ -31,10 +31,10 @@ def test_datetimeindex(self):
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result1, result2 = [Index(y).day for y in cartesian_product([x, x])]
- expected1 = np.array([1, 1, 2, 2], dtype=np.int32)
- expected2 = np.array([1, 2, 1, 2], dtype=np.int32)
- tm.assert_numpy_array_equal(result1, expected1)
- tm.assert_numpy_array_equal(result2, expected2)
+ expected1 = Index([1, 1, 2, 2])
+ expected2 = Index([1, 2, 1, 2])
+ tm.assert_index_equal(result1, expected1)
+ tm.assert_index_equal(result2, expected2)
def test_empty(self):
# product of empty factors
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index 1ccc1652d2719..1fc0e1b73df6b 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -358,8 +358,8 @@ def test_field_access_localize(self):
dr = date_range('2011-10-02 00:00', freq='h', periods=10,
tz=self.tzstr('America/Atikokan'))
- expected = np.arange(10, dtype=np.int32)
- self.assert_numpy_array_equal(dr.hour, expected)
+ expected = Index(np.arange(10, dtype=np.int64))
+ self.assert_index_equal(dr.hour, expected)
def test_with_tz(self):
tz = self.tz('US/Central')
@@ -947,8 +947,8 @@ def test_tz_convert_hour_overflow_dst(self):
'2009-05-12 09:50:32']
tt = to_datetime(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
- expected = np.array([13, 14, 13], dtype=np.int32)
- self.assert_numpy_array_equal(ut.hour, expected)
+ expected = Index([13, 14, 13])
+ self.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
@@ -956,8 +956,8 @@ def test_tz_convert_hour_overflow_dst(self):
'2009-05-12 13:50:32']
tt = to_datetime(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
- expected = np.array([9, 9, 9], dtype=np.int32)
- self.assert_numpy_array_equal(ut.hour, expected)
+ expected = Index([9, 9, 9])
+ self.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
@@ -965,8 +965,8 @@ def test_tz_convert_hour_overflow_dst(self):
'2008-05-12 09:50:32']
tt = to_datetime(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
- expected = np.array([13, 14, 13], dtype=np.int32)
- self.assert_numpy_array_equal(ut.hour, expected)
+ expected = Index([13, 14, 13])
+ self.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
@@ -974,8 +974,8 @@ def test_tz_convert_hour_overflow_dst(self):
'2008-05-12 13:50:32']
tt = to_datetime(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
- expected = np.array([9, 9, 9], dtype=np.int32)
- self.assert_numpy_array_equal(ut.hour, expected)
+ expected = Index([9, 9, 9])
+ self.assert_index_equal(ut.hour, expected)
def test_tz_convert_hour_overflow_dst_timestamps(self):
# Regression test for:
@@ -989,8 +989,8 @@ def test_tz_convert_hour_overflow_dst_timestamps(self):
Timestamp('2009-05-12 09:50:32', tz=tz)]
tt = to_datetime(ts)
ut = tt.tz_convert('UTC')
- expected = np.array([13, 14, 13], dtype=np.int32)
- self.assert_numpy_array_equal(ut.hour, expected)
+ expected = Index([13, 14, 13])
+ self.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
@@ -998,8 +998,8 @@ def test_tz_convert_hour_overflow_dst_timestamps(self):
Timestamp('2009-05-12 13:50:32', tz='UTC')]
tt = to_datetime(ts)
ut = tt.tz_convert('US/Eastern')
- expected = np.array([9, 9, 9], dtype=np.int32)
- self.assert_numpy_array_equal(ut.hour, expected)
+ expected = Index([9, 9, 9])
+ self.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [Timestamp('2008-05-12 09:50:00', tz=tz),
@@ -1007,8 +1007,8 @@ def test_tz_convert_hour_overflow_dst_timestamps(self):
Timestamp('2008-05-12 09:50:32', tz=tz)]
tt = to_datetime(ts)
ut = tt.tz_convert('UTC')
- expected = np.array([13, 14, 13], dtype=np.int32)
- self.assert_numpy_array_equal(ut.hour, expected)
+ expected = Index([13, 14, 13])
+ self.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
@@ -1016,8 +1016,8 @@ def test_tz_convert_hour_overflow_dst_timestamps(self):
Timestamp('2008-05-12 13:50:32', tz='UTC')]
tt = to_datetime(ts)
ut = tt.tz_convert('US/Eastern')
- expected = np.array([9, 9, 9], dtype=np.int32)
- self.assert_numpy_array_equal(ut.hour, expected)
+ expected = Index([9, 9, 9])
+ self.assert_index_equal(ut.hour, expected)
def test_tslib_tz_convert_trans_pos_plus_1__bug(self):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
@@ -1028,9 +1028,8 @@ def test_tslib_tz_convert_trans_pos_plus_1__bug(self):
idx = idx.tz_localize('UTC')
idx = idx.tz_convert('Europe/Moscow')
- expected = np.repeat(np.array([3, 4, 5], dtype=np.int32),
- np.array([n, n, 1]))
- self.assert_numpy_array_equal(idx.hour, expected)
+ expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
+ self.assert_index_equal(idx.hour, Index(expected))
def test_tslib_tz_convert_dst(self):
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
@@ -1039,62 +1038,57 @@ def test_tslib_tz_convert_dst(self):
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([18, 19, 20, 21, 22, 23,
- 0, 1, 3, 4, 5], dtype=np.int32),
+ 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
- self.assert_numpy_array_equal(idx.hour, expected)
+ self.assert_index_equal(idx.hour, Index(expected))
idx = date_range('2014-03-08 18:00', '2014-03-09 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
- expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
- dtype=np.int32),
+ expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
- self.assert_numpy_array_equal(idx.hour, expected)
+ self.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range('2014-11-01 23:00', '2014-11-02 09:00', freq=freq,
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([19, 20, 21, 22, 23,
- 0, 1, 1, 2, 3, 4], dtype=np.int32),
+ 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
- self.assert_numpy_array_equal(idx.hour, expected)
+ self.assert_index_equal(idx.hour, Index(expected))
idx = date_range('2014-11-01 18:00', '2014-11-02 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
expected = np.repeat(np.array([22, 23, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10], dtype=np.int32),
+ 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n,
n, n, n, 1]))
- self.assert_numpy_array_equal(idx.hour, expected)
+ self.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
- self.assert_numpy_array_equal(idx.hour,
- np.array([19, 19], dtype=np.int32))
+ self.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
- self.assert_numpy_array_equal(idx.hour,
- np.array([5, 5], dtype=np.int32))
+ self.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range('2014-11-01 00:00', '2014-11-02 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
- self.assert_numpy_array_equal(idx.hour,
- np.array([20, 20], dtype=np.int32))
+ self.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range('2014-11-01 00:00', '2014-11-02 000:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
- self.assert_numpy_array_equal(idx.hour,
- np.array([4, 4], dtype=np.int32))
+ self.assert_index_equal(idx.hour, Index([4, 4]))
def test_tzlocal(self):
# GH 13583
diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py
index 82fcdbcd0d367..f9fd27176487c 100644
--- a/pandas/tseries/common.py
+++ b/pandas/tseries/common.py
@@ -105,6 +105,8 @@ def _delegate_property_get(self, name):
elif not is_list_like(result):
return result
+ result = np.asarray(result)
+
# blow up if we operate on categories
if self.orig is not None:
result = take_1d(result, self.orig.cat.codes)
diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py
index 8aea14a2688d1..bc768a8bc5b58 100644
--- a/pandas/tseries/converter.py
+++ b/pandas/tseries/converter.py
@@ -455,7 +455,7 @@ def period_break(dates, period):
"""
current = getattr(dates, period)
previous = getattr(dates - 1, period)
- return (current - previous).nonzero()[0]
+ return np.nonzero(current - previous)[0]
def has_level_label(label_flags, vmin):
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 983c1a4cd9de9..11d2d29597fc0 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -64,6 +64,7 @@ def f(self):
if self.tz is not utc:
values = self._local_timestamps()
+ # boolean accessors -> return array
if field in ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']:
@@ -73,16 +74,20 @@ def f(self):
result = libts.get_start_end_field(values, field, self.freqstr,
month_kw)
- elif field in ['weekday_name']:
- result = libts.get_date_name_field(values, field)
- return self._maybe_mask_results(result)
+ return self._maybe_mask_results(result, convert='float64')
elif field in ['is_leap_year']:
# no need to mask NaT
return libts.get_date_field(values, field)
+
+ # non-boolean accessors -> return Index
+ elif field in ['weekday_name']:
+ result = libts.get_date_name_field(values, field)
+ result = self._maybe_mask_results(result)
else:
result = libts.get_date_field(values, field)
+ result = self._maybe_mask_results(result, convert='float64')
- return self._maybe_mask_results(result, convert='float64')
+ return Index(result, name=self.name)
f.__name__ = name
f.__doc__ = docstring
@@ -1909,9 +1914,9 @@ def to_julian_date(self):
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
- year = self.year
- month = self.month
- day = self.day
+ year = np.asarray(self.year)
+ month = np.asarray(self.month)
+ day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index f7e9ba9eaa9b1..c279d5a9342e8 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -52,7 +52,8 @@
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
- return get_period_field_arr(alias, self._values, base)
+ result = get_period_field_arr(alias, self._values, base)
+ return Index(result, name=self.name)
f.__name__ = name
f.__doc__ = docstring
return property(f)
@@ -585,7 +586,7 @@ def to_datetime(self, dayfirst=False):
@property
def is_leap_year(self):
""" Logical indicating if the date belongs to a leap year """
- return tslib._isleapyear_arr(self.year)
+ return tslib._isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index 13d844bb6a399..55333890640c1 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -374,7 +374,7 @@ def _get_field(self, m):
else:
result = np.array([getattr(Timedelta(val), m)
for val in values], dtype='int64')
- return result
+ return Index(result, name=self.name)
@property
def days(self):
@@ -437,7 +437,8 @@ def total_seconds(self):
.. versionadded:: 0.17.0
"""
- return self._maybe_mask_results(1e-9 * self.asi8)
+ return Index(self._maybe_mask_results(1e-9 * self.asi8),
+ name=self.name)
def to_pytimedelta(self):
"""
diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py
index dc460dee8415b..da3bb075dd02c 100644
--- a/pandas/tseries/util.py
+++ b/pandas/tseries/util.py
@@ -54,7 +54,7 @@ def pivot_annual(series, freq=None):
if freq == 'D':
width = 366
- offset = index.dayofyear - 1
+ offset = np.asarray(index.dayofyear) - 1
# adjust for leap year
offset[(~isleapyear(year)) & (offset >= 59)] += 1
@@ -63,7 +63,7 @@ def pivot_annual(series, freq=None):
# todo: strings like 1/1, 1/25, etc.?
elif freq in ('M', 'BM'):
width = 12
- offset = index.month - 1
+ offset = np.asarray(index.month) - 1
columns = lrange(1, 13)
elif freq == 'H':
width = 8784
| - [x] closes #15022
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
This changes the datetime field accessors of a DatetimeIndex (and PeriodIndex, etc) to return an Index object instead of a plain array:
So for example:
```
# PR
In [1]: idx = pd.date_range("2015-01-01", periods=5, freq='10H')
In [2]: idx
Out[2]:
DatetimeIndex(['2015-01-01 00:00:00', '2015-01-01 10:00:00',
'2015-01-01 20:00:00', '2015-01-02 06:00:00',
'2015-01-02 16:00:00'],
dtype='datetime64[ns]', freq='10H')
In [3]: idx.hour
Out[3]: Int64Index([0, 10, 20, 6, 16], dtype='int64')
```
instead of
```
# master
In [1]: idx = pd.date_range("2015-01-01", periods=5, freq='10H')
In [2]: idx.hour
Out[2]: array([ 0, 10, 20, 6, 16], dtype=int32)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/15589 | 2017-03-06T09:43:49Z | 2017-03-22T18:47:11Z | null | 2017-03-22T18:47:11Z |
BUG: Timestamp.round precision error for ns (#15578) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 725dc7fc52ed0..f1df8f456159a 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -652,7 +652,7 @@ Bug Fixes
- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
- Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`)
- Bug in ``TimedeltaIndex`` raising a ``ValueError`` when boolean indexing with ``loc`` (:issue:`14946`)
-- Bug in ``DatetimeIndex.round()`` and ``Timestamp.round()`` floating point accuracy when rounding by milliseconds (:issue: `14440`)
+- Bug in ``DatetimeIndex.round()`` and ``Timestamp.round()`` floating point accuracy when rounding by milliseconds or less (:issue: `14440`, :issue:`15578`)
- Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`)
- Bug in ``DataFrame(..).apply(to_numeric)`` when values are of type decimal.Decimal. (:issue:`14827`)
- Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`)
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 3a6402ae83ae2..312017eef3446 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -175,17 +175,29 @@ def test_round(self):
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
- # GH 14440
+ # GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
+ for freq in ['us', 'ns']:
+ tm.assert_index_equal(index, index.round(freq))
+
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
+ index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
+ result = index.round('10ns')
+ expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
+ tm.assert_index_equal(result, expected)
+
+ with tm.assert_produces_warning():
+ ts = '2016-10-17 12:00:00.001501031'
+ pd.DatetimeIndex([ts]).round('1010ns')
+
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index ae278ebfa2533..bbcdce922f58a 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -732,7 +732,7 @@ def test_round(self):
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: dti.round(freq))
- # GH 14440
+ # GH 14440 & 15578
result = pd.Timestamp('2016-10-17 12:00:00.0015').round('ms')
expected = pd.Timestamp('2016-10-17 12:00:00.002000')
self.assertEqual(result, expected)
@@ -741,6 +741,17 @@ def test_round(self):
expected = pd.Timestamp('2016-10-17 12:00:00.001000')
self.assertEqual(result, expected)
+ ts = pd.Timestamp('2016-10-17 12:00:00.0015')
+ for freq in ['us', 'ns']:
+ self.assertEqual(ts, ts.round(freq))
+
+ result = pd.Timestamp('2016-10-17 12:00:00.001501031').round('10ns')
+ expected = pd.Timestamp('2016-10-17 12:00:00.001501030')
+ self.assertEqual(result, expected)
+
+ with tm.assert_produces_warning():
+ pd.Timestamp('2016-10-17 12:00:00.001501031').round('1010ns')
+
def test_class_ops_pytz(self):
tm._skip_if_no_pytz()
from pytz import timezone
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 5891481677ed2..2e22c35868cb3 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -1,6 +1,7 @@
"""
Base and utility classes for tseries type pandas objects.
"""
+import warnings
from datetime import datetime, timedelta
@@ -79,11 +80,20 @@ def _round(self, freq, rounder):
from pandas.tseries.frequencies import to_offset
unit = to_offset(freq).nanos
-
# round the local times
values = _ensure_datetimelike_to_i8(self)
-
- result = (unit * rounder(values / float(unit)).astype('i8'))
+ if unit < 1000 and unit % 1000 != 0:
+ # for nano rounding, work with the last 6 digits separately
+ # due to float precision
+ buff = 1000000
+ result = (buff * (values // buff) + unit *
+ (rounder((values % buff) / float(unit))).astype('i8'))
+ elif unit >= 1000 and unit % 1000 != 0:
+ msg = 'Precision will be lost using frequency: {}'
+ warnings.warn(msg.format(freq))
+ result = (unit * rounder(values / float(unit)).astype('i8'))
+ else:
+ result = (unit * rounder(values / float(unit)).astype('i8'))
result = self._maybe_mask_results(result, fill_value=tslib.NaT)
attribs = self._get_attributes_dict()
if 'freq' in attribs:
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index b96e9434e617a..8ee92e9fb900d 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -421,7 +421,18 @@ class Timestamp(_Timestamp):
value = self.tz_localize(None).value
else:
value = self.value
- result = (unit * rounder(value / float(unit)).astype('i8'))
+ if unit < 1000 and unit % 1000 != 0:
+ # for nano rounding, work with the last 6 digits separately
+ # due to float precision
+ buff = 1000000
+ result = (buff * (value // buff) + unit *
+ (rounder((value % buff) / float(unit))).astype('i8'))
+ elif unit >= 1000 and unit % 1000 != 0:
+ msg = 'Precision will be lost using frequency: {}'
+ warnings.warn(msg.format(freq))
+ result = (unit * rounder(value / float(unit)).astype('i8'))
+ else:
+ result = (unit * rounder(value / float(unit)).astype('i8'))
result = Timestamp(result, unit='ns')
if self.tz is not None:
result = result.tz_localize(self.tz)
| - [x] closes #15578
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
Nice trick @jreback! `round()` patched for `DatetimeIndex` and `Timestamp` when rounding by ns. | https://api.github.com/repos/pandas-dev/pandas/pulls/15588 | 2017-03-06T05:48:56Z | 2017-03-07T13:26:22Z | null | 2017-12-20T02:01:01Z |
ENH: standardize fill_value behavior across the API | diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 87cb088c2e91e..b76e52acb8be4 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -25,6 +25,7 @@
import pandas.core.algorithms as algos
import pandas.algos as _algos
+from pandas.core.missing import validate_fill_value
from pandas.core.index import MultiIndex, _get_na_value
@@ -405,6 +406,9 @@ def _slow_pivot(index, columns, values):
def unstack(obj, level, fill_value=None):
+ if fill_value:
+ validate_fill_value(fill_value, obj.values.dtype)
+
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
diff --git a/pandas/tests/types/test_missing.py b/pandas/tests/types/test_missing.py
index cab44f1122ae1..bdbebfe01985b 100644
--- a/pandas/tests/types/test_missing.py
+++ b/pandas/tests/types/test_missing.py
@@ -12,7 +12,8 @@
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
- na_value_for_dtype)
+ na_value_for_dtype,
+ validate_fill_value)
def test_notnull():
@@ -301,3 +302,11 @@ def test_na_value_for_dtype():
for dtype in ['O']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
+
+
+class TestValidateFillValue(tm.TestCase):
+ # TODO: Fill out the test cases.
+ def test_validate_fill_value(self):
+ # validate_fill_value()
+ # import pdb; pdb.set_trace()
+ pass
diff --git a/pandas/types/missing.py b/pandas/types/missing.py
index e6791b79bf3bd..9d36a38b59c38 100644
--- a/pandas/types/missing.py
+++ b/pandas/types/missing.py
@@ -19,7 +19,10 @@
is_object_dtype,
is_integer,
_TD_DTYPE,
- _NS_DTYPE)
+ _NS_DTYPE,
+ is_datetime64_any_dtype, is_float,
+ is_numeric_dtype, is_complex)
+from datetime import datetime, timedelta
from .inference import is_list_like
@@ -391,3 +394,30 @@ def na_value_for_dtype(dtype):
elif is_bool_dtype(dtype):
return False
return np.nan
+
+
+def validate_fill_value(value, dtype):
+ """
+ Make sure the fill value is appropriate for the given dtype.
+ """
+ if not is_scalar(value):
+ raise TypeError('"fill_value" parameter must be '
+ 'a scalar, but you passed a '
+ '"{0}"'.format(type(value).__name__))
+ elif not isnull(value):
+ if is_numeric_dtype(dtype):
+ if not (is_float(value) or is_integer(value) or is_complex(value)):
+ raise TypeError('"fill_value" parameter must be '
+ 'numeric, but you passed a '
+ '"{0}"'.format(type(value).__name__))
+ elif is_datetime64_any_dtype(dtype):
+ if not isinstance(value, (np.datetime64, datetime)):
+ raise TypeError('"fill_value" parameter must be a '
+ 'datetime, but you passed a '
+ '"{0}"'.format(type(value).__name__))
+ elif is_timedelta64_dtype(dtype):
+ if not isinstance(value, (np.timedelta64, timedelta)):
+ raise TypeError('"value" parameter must be '
+ 'a timedelta, but you passed a '
+ '"{0}"'.format(type(value).__name__))
+ # if object dtype, do nothing.
| - [ ] closes #15533
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15587 | 2017-03-06T04:32:39Z | 2017-03-11T17:38:27Z | null | 2017-03-11T17:48:07Z |
Join result takes on the calling from index | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b3e43edc3eb55..e1ce159d1154c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4591,7 +4591,7 @@ def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
- return merge(self, other, left_on=on, how=how,
+ return merge(other, self, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
| https://github.com/pandas-dev/pandas/issues/15582
Fixes indexing referenced in 15582. Tests appear to work on my end, we'll see... | https://api.github.com/repos/pandas-dev/pandas/pulls/15586 | 2017-03-06T01:00:27Z | 2017-03-06T23:29:45Z | null | 2017-03-07T01:24:00Z |
BUG: Fix index order for Index.intersection() | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 37a70435ed6ff..a808ad4bd9dc6 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -738,6 +738,62 @@ New Behavior:
TypeError: Cannot compare 2014-01-01 00:00:00 of
type <class 'pandas.tslib.Timestamp'> to string column
+.. _whatsnew_0200.api_breaking.index_order:
+
+Index.intersection and inner join now preserve the order of the left Index
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``Index.intersection`` now preserves the order of the calling Index (left)
+instead of the other Index (right) (:issue:`15582`). This affects the inner
+joins (methods ``DataFrame.join`` and ``pd.merge``) and the .align methods.
+
+- ``Index.intersection``
+
+ .. ipython:: python
+
+ left = pd.Index([2, 1, 0])
+ left
+ right = pd.Index([1, 2, 3])
+ right
+
+ Previous Behavior:
+
+ .. code-block:: ipython
+
+ In [4]: left.intersection(right)
+ Out[4]: Int64Index([1, 2], dtype='int64')
+
+ New Behavior:
+
+ .. ipython:: python
+
+ left.intersection(right)
+
+- ``DataFrame.join`` and ``pd.merge``
+
+ .. ipython:: python
+
+ left = pd.DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
+ left
+ right = pd.DataFrame({'b': [100, 200, 300]}, index=[1, 2, 3])
+ right
+
+ Previous Behavior:
+
+ .. code-block:: ipython
+
+ In [4]: left.join(right, how='inner')
+ Out[4]:
+ a b
+ 1 10 100
+ 2 20 200
+
+ New Behavior:
+
+ .. ipython:: python
+
+ left.join(right, how='inner')
+
.. _whatsnew_0200.api:
@@ -960,6 +1016,7 @@ Bug Fixes
- Bug in ``DataFrame.to_stata()`` and ``StataWriter`` which produces incorrectly formatted files to be produced for some locales (:issue:`13856`)
- Bug in ``StataReader`` and ``StataWriter`` which allows invalid encodings (:issue:`15723`)
+- Bug with ``sort=True`` in ``DataFrame.join`` and ``pd.merge`` when joining on indexes (:issue:`15582`)
- Bug in ``pd.concat()`` in which concatting with an empty dataframe with ``join='inner'`` was being improperly handled (:issue:`15328`)
- Bug in ``groupby.agg()`` incorrectly localizing timezone on ``datetime`` (:issue:`15426`, :issue:`10668`, :issue:`13046`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6b5e8e0799421..0f558232786ec 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -124,10 +124,14 @@
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
- * left: use only keys from left frame (SQL: left outer join)
- * right: use only keys from right frame (SQL: right outer join)
- * outer: use union of keys from both frames (SQL: full outer join)
- * inner: use intersection of keys from both frames (SQL: inner join)
+ * left: use only keys from left frame, similar to a SQL left outer join;
+ preserve key order
+ * right: use only keys from right frame, similar to a SQL right outer join;
+ preserve key order
+ * outer: use union of keys from both frames, similar to a SQL full outer
+ join; sort keys lexicographically
+ * inner: use intersection of keys from both frames, similar to a SQL inner
+ join; preserve the order of the left keys
on : label or list
Field names to join on. Must be found in both DataFrames. If on is
None and not merging on indexes, then it merges on the intersection of
@@ -147,7 +151,8 @@
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
- Sort the join keys lexicographically in the result DataFrame
+ Sort the join keys lexicographically in the result DataFrame. If False,
+ the order of the join keys depends on the join type (how keyword)
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
@@ -4463,16 +4468,18 @@ def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
- specified) with other frame's index
+ specified) with other frame's index, and sort it
+ lexicographically
* inner: form intersection of calling frame's index (or column if
- on is specified) with other frame's index
+ on is specified) with other frame's index, preserving the order
+ of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
- preserves the index order of the calling (left) DataFrame
+ the order of the join key depends on the join type (how keyword)
Notes
-----
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 54f73a2466286..7f0de963e5c56 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -2089,8 +2089,8 @@ def intersection(self, other):
"""
Form the intersection of two Index objects.
- This returns a new Index with elements common to the index and `other`.
- Sortedness of the result is not guaranteed.
+ This returns a new Index with elements common to the index and `other`,
+ preserving the order of the calling index.
Parameters
----------
@@ -2128,15 +2128,15 @@ def intersection(self, other):
pass
try:
- indexer = Index(self._values).get_indexer(other._values)
+ indexer = Index(other._values).get_indexer(self._values)
indexer = indexer.take((indexer != -1).nonzero()[0])
except:
# duplicates
- indexer = Index(self._values).get_indexer_non_unique(
- other._values)[0].unique()
+ indexer = Index(other._values).get_indexer_non_unique(
+ self._values)[0].unique()
indexer = indexer[indexer != -1]
- taken = self.take(indexer)
+ taken = other.take(indexer)
if self.name != other.name:
taken.name = None
return taken
@@ -2831,8 +2831,7 @@ def _reindex_non_unique(self, target):
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
- def join(self, other, how='left', level=None, return_indexers=False):
- """
+ _index_shared_docs['join'] = """
*this is an internal non-public method*
Compute join_index and indexers to conform data
@@ -2844,11 +2843,20 @@ def join(self, other, how='left', level=None, return_indexers=False):
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
+ sort : boolean, default False
+ Sort the join keys lexicographically in the result Index. If False,
+ the order of the join keys depends on the join type (how keyword)
+
+ .. versionadded:: 0.20.0
Returns
-------
join_index, (left_indexer, right_indexer)
"""
+
+ @Appender(_index_shared_docs['join'])
+ def join(self, other, how='left', level=None, return_indexers=False,
+ sort=False):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
@@ -2929,6 +2937,9 @@ def join(self, other, how='left', level=None, return_indexers=False):
elif how == 'outer':
join_index = self.union(other)
+ if sort:
+ join_index = join_index.sort_values()
+
if return_indexers:
if join_index is self:
lindexer = None
diff --git a/pandas/indexes/range.py b/pandas/indexes/range.py
index 103a3ac2fd5f4..be68c97fb7890 100644
--- a/pandas/indexes/range.py
+++ b/pandas/indexes/range.py
@@ -431,29 +431,16 @@ def union(self, other):
return self._int64index.union(other)
- def join(self, other, how='left', level=None, return_indexers=False):
- """
- *this is an internal non-public method*
-
- Compute join_index and indexers to conform data
- structures to the new index.
-
- Parameters
- ----------
- other : Index
- how : {'left', 'right', 'inner', 'outer'}
- level : int or level name, default None
- return_indexers : boolean, default False
-
- Returns
- -------
- join_index, (left_indexer, right_indexer)
- """
+ @Appender(_index_shared_docs['join'])
+ def join(self, other, how='left', level=None, return_indexers=False,
+ sort=False):
if how == 'outer' and self is not other:
# note: could return RangeIndex in more circumstances
- return self._int64index.join(other, how, level, return_indexers)
+ return self._int64index.join(other, how, level, return_indexers,
+ sort)
- return super(RangeIndex, self).join(other, how, level, return_indexers)
+ return super(RangeIndex, self).join(other, how, level, return_indexers,
+ sort)
def __len__(self):
"""
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b3b253f151541..f75a4761e0948 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -4321,7 +4321,7 @@ def _reindex_axis(obj, axis, labels, other=None):
labels = _ensure_index(labels.unique())
if other is not None:
- labels = labels & _ensure_index(other.unique())
+ labels = _ensure_index(other.unique()) & labels
if not labels.equals(ax):
slicer = [slice(None, None)] * obj.ndim
slicer[axis] = labels
diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py
new file mode 100644
index 0000000000000..f7a510023ca07
--- /dev/null
+++ b/pandas/tests/frame/test_join.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+import numpy as np
+
+from pandas import DataFrame, Index
+from pandas.tests.frame.common import TestData
+import pandas.util.testing as tm
+
+
+@pytest.fixture
+def frame():
+ return TestData().frame
+
+
+@pytest.fixture
+def left():
+ return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
+
+
+@pytest.fixture
+def right():
+ return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])
+
+
+@pytest.mark.parametrize(
+ "how, sort, expected",
+ [('inner', False, DataFrame({'a': [20, 10],
+ 'b': [200, 100]},
+ index=[2, 1])),
+ ('inner', True, DataFrame({'a': [10, 20],
+ 'b': [100, 200]},
+ index=[1, 2])),
+ ('left', False, DataFrame({'a': [20, 10, 0],
+ 'b': [200, 100, np.nan]},
+ index=[2, 1, 0])),
+ ('left', True, DataFrame({'a': [0, 10, 20],
+ 'b': [np.nan, 100, 200]},
+ index=[0, 1, 2])),
+ ('right', False, DataFrame({'a': [np.nan, 10, 20],
+ 'b': [300, 100, 200]},
+ index=[3, 1, 2])),
+ ('right', True, DataFrame({'a': [10, 20, np.nan],
+ 'b': [100, 200, 300]},
+ index=[1, 2, 3])),
+ ('outer', False, DataFrame({'a': [0, 10, 20, np.nan],
+ 'b': [np.nan, 100, 200, 300]},
+ index=[0, 1, 2, 3])),
+ ('outer', True, DataFrame({'a': [0, 10, 20, np.nan],
+ 'b': [np.nan, 100, 200, 300]},
+ index=[0, 1, 2, 3]))])
+def test_join(left, right, how, sort, expected):
+
+ result = left.join(right, how=how, sort=sort)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_join_index(frame):
+ # left / right
+
+ f = frame.loc[frame.index[:10], ['A', 'B']]
+ f2 = frame.loc[frame.index[5:], ['C', 'D']].iloc[::-1]
+
+ joined = f.join(f2)
+ tm.assert_index_equal(f.index, joined.index)
+ expected_columns = Index(['A', 'B', 'C', 'D'])
+ tm.assert_index_equal(joined.columns, expected_columns)
+
+ joined = f.join(f2, how='left')
+ tm.assert_index_equal(joined.index, f.index)
+ tm.assert_index_equal(joined.columns, expected_columns)
+
+ joined = f.join(f2, how='right')
+ tm.assert_index_equal(joined.index, f2.index)
+ tm.assert_index_equal(joined.columns, expected_columns)
+
+ # inner
+
+ joined = f.join(f2, how='inner')
+ tm.assert_index_equal(joined.index, f.index[5:10])
+ tm.assert_index_equal(joined.columns, expected_columns)
+
+ # outer
+
+ joined = f.join(f2, how='outer')
+ tm.assert_index_equal(joined.index, frame.index.sort_values())
+ tm.assert_index_equal(joined.columns, expected_columns)
+
+ tm.assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')
+
+ # corner case - overlapping columns
+ for how in ('outer', 'left', 'inner'):
+ with tm.assertRaisesRegexp(ValueError, 'columns overlap but '
+ 'no suffix'):
+ frame.join(frame, how=how)
+
+
+def test_join_index_more(frame):
+ af = frame.loc[:, ['A', 'B']]
+ bf = frame.loc[::2, ['C', 'D']]
+
+ expected = af.copy()
+ expected['C'] = frame['C'][::2]
+ expected['D'] = frame['D'][::2]
+
+ result = af.join(bf)
+ tm.assert_frame_equal(result, expected)
+
+ result = af.join(bf, how='right')
+ tm.assert_frame_equal(result, expected[::2])
+
+ result = bf.join(af, how='right')
+ tm.assert_frame_equal(result, expected.loc[:, result.columns])
+
+
+def test_join_index_series(frame):
+ df = frame.copy()
+ s = df.pop(frame.columns[-1])
+ joined = df.join(s)
+
+ # TODO should this check_names ?
+ tm.assert_frame_equal(joined, frame, check_names=False)
+
+ s.name = None
+ tm.assertRaisesRegexp(ValueError, 'must have a name', df.join, s)
+
+
+def test_join_overlap(frame):
+ df1 = frame.loc[:, ['A', 'B', 'C']]
+ df2 = frame.loc[:, ['B', 'C', 'D']]
+
+ joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
+ df1_suf = df1.loc[:, ['B', 'C']].add_suffix('_df1')
+ df2_suf = df2.loc[:, ['B', 'C']].add_suffix('_df2')
+
+ no_overlap = frame.loc[:, ['A', 'D']]
+ expected = df1_suf.join(df2_suf).join(no_overlap)
+
+ # column order not necessarily sorted
+ tm.assert_frame_equal(joined, expected.loc[:, joined.columns])
diff --git a/pandas/tests/frame/test_misc_api.py b/pandas/tests/frame/test_misc_api.py
index 321d46739b24c..42427df90401d 100644
--- a/pandas/tests/frame/test_misc_api.py
+++ b/pandas/tests/frame/test_misc_api.py
@@ -57,92 +57,6 @@ def test_get_value(self):
expected = self.frame[col][idx]
tm.assert_almost_equal(result, expected)
- def test_join_index(self):
- # left / right
-
- f = self.frame.reindex(columns=['A', 'B'])[:10]
- f2 = self.frame.reindex(columns=['C', 'D'])
-
- joined = f.join(f2)
- self.assert_index_equal(f.index, joined.index)
- self.assertEqual(len(joined.columns), 4)
-
- joined = f.join(f2, how='left')
- self.assert_index_equal(joined.index, f.index)
- self.assertEqual(len(joined.columns), 4)
-
- joined = f.join(f2, how='right')
- self.assert_index_equal(joined.index, f2.index)
- self.assertEqual(len(joined.columns), 4)
-
- # inner
-
- f = self.frame.reindex(columns=['A', 'B'])[:10]
- f2 = self.frame.reindex(columns=['C', 'D'])
-
- joined = f.join(f2, how='inner')
- self.assert_index_equal(joined.index, f.index.intersection(f2.index))
- self.assertEqual(len(joined.columns), 4)
-
- # outer
-
- f = self.frame.reindex(columns=['A', 'B'])[:10]
- f2 = self.frame.reindex(columns=['C', 'D'])
-
- joined = f.join(f2, how='outer')
- self.assertTrue(tm.equalContents(self.frame.index, joined.index))
- self.assertEqual(len(joined.columns), 4)
-
- assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')
-
- # corner case - overlapping columns
- for how in ('outer', 'left', 'inner'):
- with assertRaisesRegexp(ValueError, 'columns overlap but '
- 'no suffix'):
- self.frame.join(self.frame, how=how)
-
- def test_join_index_more(self):
- af = self.frame.loc[:, ['A', 'B']]
- bf = self.frame.loc[::2, ['C', 'D']]
-
- expected = af.copy()
- expected['C'] = self.frame['C'][::2]
- expected['D'] = self.frame['D'][::2]
-
- result = af.join(bf)
- assert_frame_equal(result, expected)
-
- result = af.join(bf, how='right')
- assert_frame_equal(result, expected[::2])
-
- result = bf.join(af, how='right')
- assert_frame_equal(result, expected.loc[:, result.columns])
-
- def test_join_index_series(self):
- df = self.frame.copy()
- s = df.pop(self.frame.columns[-1])
- joined = df.join(s)
-
- # TODO should this check_names ?
- assert_frame_equal(joined, self.frame, check_names=False)
-
- s.name = None
- assertRaisesRegexp(ValueError, 'must have a name', df.join, s)
-
- def test_join_overlap(self):
- df1 = self.frame.loc[:, ['A', 'B', 'C']]
- df2 = self.frame.loc[:, ['B', 'C', 'D']]
-
- joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
- df1_suf = df1.loc[:, ['B', 'C']].add_suffix('_df1')
- df2_suf = df2.loc[:, ['B', 'C']].add_suffix('_df2')
-
- no_overlap = self.frame.loc[:, ['A', 'D']]
- expected = df1_suf.join(df2_suf).join(no_overlap)
-
- # column order not necessarily sorted
- assert_frame_equal(joined, expected.loc[:, joined.columns])
-
def test_add_prefix_suffix(self):
with_prefix = self.frame.add_prefix('foo#')
expected = pd.Index(['foo#%s' % c for c in self.frame.columns])
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index c4dc10d8174cc..a8197b070b032 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -626,14 +626,14 @@ def test_intersection(self):
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
- result2 = idx1.intersection(idx2)
- self.assertTrue(tm.equalContents(result2, expected2))
- self.assertEqual(result2.name, expected2.name)
+ expected = Index([5, 3, 4], name='idx')
+ result = idx1.intersection(idx2)
+ self.assert_index_equal(result, expected)
- idx3 = Index([4, 7, 6, 5, 3], name='other')
- result3 = idx1.intersection(idx3)
- self.assertTrue(tm.equalContents(result3, expected3))
- self.assertEqual(result3.name, expected3.name)
+ idx2 = Index([4, 7, 6, 5, 3], name='other')
+ expected = Index([5, 3, 4], name=None)
+ result = idx1.intersection(idx2)
+ self.assert_index_equal(result, expected)
# non-monotonic non-unique
idx1 = Index(['A', 'B', 'A', 'C'])
@@ -642,6 +642,11 @@ def test_intersection(self):
result = idx1.intersection(idx2)
self.assert_index_equal(result, expected)
+ idx2 = Index(['B', 'D', 'A'])
+ expected = Index(['A', 'B', 'A'], dtype='object')
+ result = idx1.intersection(idx2)
+ self.assert_index_equal(result, expected)
+
# preserve names
first = self.strIndex[5:20]
second = self.strIndex[:10]
diff --git a/pandas/tests/tools/test_merge.py b/pandas/tests/tools/test_merge.py
index ff27500355998..8011bc4a1cfc2 100644
--- a/pandas/tests/tools/test_merge.py
+++ b/pandas/tests/tools/test_merge.py
@@ -1355,3 +1355,51 @@ def test_dtype_on_merged_different(self, change, how, left, right):
np.dtype('int64')],
index=['X', 'Y', 'Z'])
assert_series_equal(result, expected)
+
+
+@pytest.fixture
+def left_df():
+ return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
+
+
+@pytest.fixture
+def right_df():
+ return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])
+
+
+class TestMergeOnIndexes(object):
+
+ @pytest.mark.parametrize(
+ "how, sort, expected",
+ [('inner', False, DataFrame({'a': [20, 10],
+ 'b': [200, 100]},
+ index=[2, 1])),
+ ('inner', True, DataFrame({'a': [10, 20],
+ 'b': [100, 200]},
+ index=[1, 2])),
+ ('left', False, DataFrame({'a': [20, 10, 0],
+ 'b': [200, 100, np.nan]},
+ index=[2, 1, 0])),
+ ('left', True, DataFrame({'a': [0, 10, 20],
+ 'b': [np.nan, 100, 200]},
+ index=[0, 1, 2])),
+ ('right', False, DataFrame({'a': [np.nan, 10, 20],
+ 'b': [300, 100, 200]},
+ index=[3, 1, 2])),
+ ('right', True, DataFrame({'a': [10, 20, np.nan],
+ 'b': [100, 200, 300]},
+ index=[1, 2, 3])),
+ ('outer', False, DataFrame({'a': [0, 10, 20, np.nan],
+ 'b': [np.nan, 100, 200, 300]},
+ index=[0, 1, 2, 3])),
+ ('outer', True, DataFrame({'a': [0, 10, 20, np.nan],
+ 'b': [np.nan, 100, 200, 300]},
+ index=[0, 1, 2, 3]))])
+ def test_merge_on_indexes(self, left_df, right_df, how, sort, expected):
+
+ result = pd.merge(left_df, right_df,
+ left_index=True,
+ right_index=True,
+ how=how,
+ sort=sort)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 60d523a8ea539..7de2549cadfc7 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -733,7 +733,8 @@ def _get_join_info(self):
if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
- left_ax.join(right_ax, how=self.how, return_indexers=True)
+ left_ax.join(right_ax, how=self.how, return_indexers=True,
+ sort=self.sort)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 11d2d29597fc0..5ca45bb63bd59 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1037,7 +1037,8 @@ def union_many(self, others):
this.offset = to_offset(this.inferred_freq)
return this
- def join(self, other, how='left', level=None, return_indexers=False):
+ def join(self, other, how='left', level=None, return_indexers=False,
+ sort=False):
"""
See Index.join
"""
@@ -1051,7 +1052,7 @@ def join(self, other, how='left', level=None, return_indexers=False):
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
- return_indexers=return_indexers)
+ return_indexers=return_indexers, sort=sort)
def _maybe_utc_convert(self, other):
this = self
@@ -1203,9 +1204,10 @@ def intersection(self, other):
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
- if isinstance(result, DatetimeIndex):
- if result.freq is None:
- result.offset = to_offset(result.inferred_freq)
+ result = self._shallow_copy(result._values, name=result.name,
+ tz=result.tz, freq=None)
+ if result.freq is None:
+ result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
@@ -1528,7 +1530,7 @@ def _get_freq(self):
def _set_freq(self, value):
self.offset = value
freq = property(fget=_get_freq, fset=_set_freq,
- doc="get/set the frequncy of the Index")
+ doc="get/set the frequency of the Index")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M',
| - [x] closes #15582
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15583 | 2017-03-05T20:57:02Z | 2017-03-29T00:29:47Z | null | 2017-03-29T08:06:09Z |
DOC: Use nbsphinx | diff --git a/ci/requirements-3.5_DOC.run b/ci/requirements-3.5_DOC.run
index 644a16f51f4b6..9bdd7c652cc9d 100644
--- a/ci/requirements-3.5_DOC.run
+++ b/ci/requirements-3.5_DOC.run
@@ -5,6 +5,7 @@ nbconvert
nbformat
notebook
matplotlib
+seaborn
scipy
lxml
beautifulsoup4
diff --git a/ci/requirements-3.5_DOC.sh b/ci/requirements-3.5_DOC.sh
index 1a5d4643edcf2..e43e483d77a73 100644
--- a/ci/requirements-3.5_DOC.sh
+++ b/ci/requirements-3.5_DOC.sh
@@ -6,6 +6,6 @@ echo "[install DOC_BUILD deps]"
pip install pandas-gbq
-conda install -n pandas -c conda-forge feather-format
+conda install -n pandas -c conda-forge feather-format nbsphinx pandoc
conda install -n pandas -c r r rpy2 --yes
diff --git a/ci/requirements_all.txt b/ci/requirements_all.txt
index 4ff80a478f247..e9f49ed879c86 100644
--- a/ci/requirements_all.txt
+++ b/ci/requirements_all.txt
@@ -3,6 +3,7 @@ pytest-cov
pytest-xdist
flake8
sphinx
+nbsphinx
ipython
python-dateutil
pytz
@@ -19,6 +20,7 @@ scipy
numexpr
pytables
matplotlib
+seaborn
lxml
sqlalchemy
bottleneck
diff --git a/doc/README.rst b/doc/README.rst
index a3733846d9ed1..0ea3234dec348 100644
--- a/doc/README.rst
+++ b/doc/README.rst
@@ -81,7 +81,9 @@ have ``sphinx`` and ``ipython`` installed. `numpydoc
<https://github.com/numpy/numpydoc>`_ is used to parse the docstrings that
follow the Numpy Docstring Standard (see above), but you don't need to install
this because a local copy of ``numpydoc`` is included in the pandas source
-code.
+code. `nbsphinx <https://nbsphinx.readthedocs.io/>`_ is used to convert
+Jupyter notebooks. You will need to install it if you intend to modify any of
+the notebooks included in the documentation.
Furthermore, it is recommended to have all `optional dependencies
<http://pandas.pydata.org/pandas-docs/dev/install.html#optional-dependencies>`_
diff --git a/doc/make.py b/doc/make.py
index 30cd2ad8b61c9..e70655c3e2f92 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -106,106 +106,42 @@ def clean():
@contextmanager
-def cleanup_nb(nb):
- try:
- yield
- finally:
- try:
- os.remove(nb + '.executed')
- except OSError:
- pass
-
-
-def get_kernel():
- """Find the kernel name for your python version"""
- return 'python%s' % sys.version_info.major
-
-
-def execute_nb(src, dst, allow_errors=False, timeout=1000, kernel_name=''):
- """
- Execute notebook in `src` and write the output to `dst`
-
- Parameters
- ----------
- src, dst: str
- path to notebook
- allow_errors: bool
- timeout: int
- kernel_name: str
- defualts to value set in notebook metadata
-
- Returns
- -------
- dst: str
- """
- import nbformat
- from nbconvert.preprocessors import ExecutePreprocessor
-
- with io.open(src, encoding='utf-8') as f:
- nb = nbformat.read(f, as_version=4)
-
- ep = ExecutePreprocessor(allow_errors=allow_errors,
- timeout=timeout,
- kernel_name=kernel_name)
- ep.preprocess(nb, resources={})
-
- with io.open(dst, 'wt', encoding='utf-8') as f:
- nbformat.write(nb, f)
- return dst
-
-
-def convert_nb(src, dst, to='html', template_file='basic'):
+def maybe_exclude_notebooks():
"""
- Convert a notebook `src`.
-
- Parameters
- ----------
- src, dst: str
- filepaths
- to: {'rst', 'html'}
- format to export to
- template_file: str
- name of template file to use. Default 'basic'
+ Skip building the notebooks if pandoc is not installed.
+ This assumes that nbsphinx is installed.
"""
- from nbconvert import HTMLExporter, RSTExporter
-
- dispatch = {'rst': RSTExporter, 'html': HTMLExporter}
- exporter = dispatch[to.lower()](template_file=template_file)
-
- (body, resources) = exporter.from_filename(src)
- with io.open(dst, 'wt', encoding='utf-8') as f:
- f.write(body)
- return dst
+ base = os.path.dirname(__file__)
+ notebooks = [os.path.join(base, 'source', nb)
+ for nb in ['style.ipynb']]
+ contents = {}
+ try:
+ import nbconvert
+ nbconvert.utils.pandoc.get_pandoc_version()
+ except (ImportError, nbconvert.utils.pandoc.PandocMissing):
+ print("Warning: Pandoc is not installed. Skipping Notebooks.")
+ for nb in notebooks:
+ with open(nb, 'rt') as f:
+ contents[nb] = f.read()
+ os.remove(nb)
+ yield
+ for nb, content in contents.items():
+ with open(nb, 'wt') as f:
+ f.write(content)
def html():
check_build()
- notebooks = [
- 'source/html-styling.ipynb',
- ]
-
- for nb in notebooks:
- with cleanup_nb(nb):
- try:
- print("Converting %s" % nb)
- kernel_name = get_kernel()
- executed = execute_nb(nb, nb + '.executed', allow_errors=True,
- kernel_name=kernel_name)
- convert_nb(executed, nb.rstrip('.ipynb') + '.html')
- except (ImportError, IndexError) as e:
- print(e)
- print("Failed to convert %s" % nb)
-
- if os.system('sphinx-build -P -b html -d build/doctrees '
- 'source build/html'):
- raise SystemExit("Building HTML failed.")
- try:
- # remove stale file
- os.remove('source/html-styling.html')
- os.remove('build/html/pandas.zip')
- except:
- pass
+ with maybe_exclude_notebooks():
+ if os.system('sphinx-build -P -b html -d build/doctrees '
+ 'source build/html'):
+ raise SystemExit("Building HTML failed.")
+ try:
+ # remove stale file
+ os.remove('build/html/pandas.zip')
+ except:
+ pass
def zip_html():
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 0b0de16411e9b..a2a6dca57c34c 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -52,14 +52,16 @@
'numpydoc', # used to parse numpy-style docstrings for autodoc
'ipython_sphinxext.ipython_directive',
'ipython_sphinxext.ipython_console_highlighting',
+ 'IPython.sphinxext.ipython_console_highlighting', # lowercase didn't work
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode',
+ 'nbsphinx',
]
-
+exclude_patterns = ['**.ipynb_checkpoints']
with open("index.rst") as f:
index_rst_lines = f.readlines()
@@ -70,15 +72,16 @@
# JP: added from sphinxdocs
autosummary_generate = False
-if any([re.match("\s*api\s*",l) for l in index_rst_lines]):
+if any([re.match("\s*api\s*", l) for l in index_rst_lines]):
autosummary_generate = True
files_to_delete = []
for f in os.listdir(os.path.dirname(__file__)):
- if not f.endswith('.rst') or f.startswith('.') or os.path.basename(f) == 'index.rst':
+ if (not f.endswith(('.ipynb', '.rst')) or
+ f.startswith('.') or os.path.basename(f) == 'index.rst'):
continue
- _file_basename = f.split('.rst')[0]
+ _file_basename = os.path.splitext(f)[0]
_regex_to_match = "\s*{}\s*$".format(_file_basename)
if not any([re.match(_regex_to_match, line) for line in index_rst_lines]):
files_to_delete.append(f)
@@ -261,6 +264,9 @@
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas'
+# -- Options for nbsphinx ------------------------------------------------
+
+nbsphinx_allow_errors = True
# -- Options for LaTeX output --------------------------------------------
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 8af7de688a2ae..aac1e4eade932 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -347,15 +347,14 @@ have ``sphinx`` and ``ipython`` installed. `numpydoc
<https://github.com/numpy/numpydoc>`_ is used to parse the docstrings that
follow the Numpy Docstring Standard (see above), but you don't need to install
this because a local copy of numpydoc is included in the *pandas* source
-code.
-`nbconvert <https://nbconvert.readthedocs.io/en/latest/>`_ and
-`nbformat <https://nbformat.readthedocs.io/en/latest/>`_ are required to build
+code. `nbsphinx <https://nbsphinx.readthedocs.io/>`_ is required to build
the Jupyter notebooks included in the documentation.
If you have a conda environment named ``pandas_dev``, you can install the extra
requirements with::
conda install -n pandas_dev sphinx ipython nbconvert nbformat
+ conda install -n pandas_dev -c conda-forge nbsphinx
Furthermore, it is recommended to have all :ref:`optional dependencies <install.optional_dependencies>`.
installed. This is not strictly necessary, but be aware that you will see some error
diff --git a/doc/source/html-styling.ipynb b/doc/source/style.ipynb
similarity index 95%
rename from doc/source/html-styling.ipynb
rename to doc/source/style.ipynb
index 1a97378fd30b1..7e408f96f6c28 100644
--- a/doc/source/html-styling.ipynb
+++ b/doc/source/style.ipynb
@@ -4,9 +4,11 @@
"cell_type": "markdown",
"metadata": {},
"source": [
+ "# HTML Styling\n",
+ "\n",
"*New in version 0.17.1*\n",
"\n",
- "<p style=\"color: red\">*Provisional: This is a new feature and still under development. We'll be adding features and possibly making breaking changes in future releases. We'd love to hear your [feedback](https://github.com/pandas-dev/pandas/issues).*<p style=\"color: red\">\n",
+ "<p style=\"color: red\">*Provisional: This is a new feature and still under development. We'll be adding features and possibly making breaking changes in future releases. We'd love to hear your feedback.*<p style=\"color: red\">\n",
"\n",
"This document is written as a Jupyter Notebook, and can be viewed or downloaded [here](http://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/html-styling.ipynb).\n",
"\n",
@@ -17,25 +19,14 @@
"\n",
"The styling is accomplished using CSS.\n",
"You write \"style functions\" that take scalars, `DataFrame`s or `Series`, and return *like-indexed* DataFrames or Series with CSS `\"attribute: value\"` pairs for the values.\n",
- "These functions can be incrementally passed to the `Styler` which collects the styles before rendering.\n",
- "\n",
- "### Contents\n",
- "\n",
- "- [Building Styles](#Building-Styles)\n",
- "- [Finer Control: Slicing](#Finer-Control:-Slicing)\n",
- "- [Builtin Styles](#Builtin-Styles)\n",
- "- [Other options](#Other-options)\n",
- "- [Sharing Styles](#Sharing-Styles)\n",
- "- [Limitations](#Limitations)\n",
- "- [Terms](#Terms)\n",
- "- [Extensibility](#Extensibility)"
+ "These functions can be incrementally passed to the `Styler` which collects the styles before rendering."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Building Styles\n",
+ "## Building Styles\n",
"\n",
"Pass your style functions into one of the following methods:\n",
"\n",
@@ -58,7 +49,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -83,7 +74,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -103,7 +94,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -156,7 +147,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -204,7 +195,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -230,7 +221,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -286,7 +277,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -336,7 +327,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -354,7 +345,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -389,7 +380,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -407,7 +398,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -425,7 +416,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -450,7 +441,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -468,7 +459,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -491,7 +482,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -503,7 +494,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -525,7 +516,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -543,7 +534,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -554,7 +545,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -572,7 +563,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -599,7 +590,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -612,7 +603,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -653,7 +644,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Precision"
+ "### Precision"
]
},
{
@@ -667,7 +658,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -689,7 +680,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -724,7 +715,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -752,7 +743,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -792,7 +783,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# CSS Classes\n",
+ "### CSS Classes\n",
"\n",
"Certain CSS classes are attached to cells.\n",
"\n",
@@ -813,7 +804,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Limitations\n",
+ "### Limitations\n",
"\n",
"- DataFrame only `(use Series.to_frame().style)`\n",
"- The index and columns must be unique\n",
@@ -828,7 +819,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Terms\n",
+ "### Terms\n",
"\n",
"- Style function: a function that's passed into `Styler.apply` or `Styler.applymap` and returns values like `'css attribute: value'`\n",
"- Builtin style functions: style functions that are methods on `Styler`\n",
@@ -850,7 +841,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -867,7 +858,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -888,7 +879,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -907,7 +898,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Extensibility\n",
+ "## Extensibility\n",
"\n",
"The core of pandas is, and will remain, its \"high-performance, easy-to-use data structures\".\n",
"With that in mind, we hope that `DataFrame.style` accomplishes two goals\n",
@@ -917,7 +908,7 @@
"\n",
"If you build a great library on top of this, let us know and we'll [link](http://pandas.pydata.org/pandas-docs/stable/ecosystem.html) to it.\n",
"\n",
- "## Subclassing\n",
+ "### Subclassing\n",
"\n",
"This section contains a bit of information about the implementation of `Styler`.\n",
"Since the feature is so new all of this is subject to change, even more so than the end-use API.\n",
@@ -933,7 +924,7 @@
"The `.translate` method takes `self.ctx` and builds another dictionary ready to be passed into `Styler.template.render`, the Jinja template.\n",
"\n",
"\n",
- "## Alternate templates\n",
+ "### Alternate templates\n",
"\n",
"We've used [Jinja](http://jinja.pocoo.org/) templates to build up the HTML.\n",
"The template is stored as a class variable ``Styler.template.``. Subclasses can override that.\n",
@@ -961,9 +952,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.1"
+ "version": "3.6.1"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
}
diff --git a/doc/source/style.rst b/doc/source/style.rst
deleted file mode 100644
index 506b38bf06e65..0000000000000
--- a/doc/source/style.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-.. _style:
-
-.. currentmodule:: pandas
-
-*****
-Style
-*****
-
-.. raw:: html
- :file: html-styling.html
diff --git a/doc/source/themes/nature_with_gtoc/static/nature.css_t b/doc/source/themes/nature_with_gtoc/static/nature.css_t
index 2948f0d68b402..2958678dc8221 100644
--- a/doc/source/themes/nature_with_gtoc/static/nature.css_t
+++ b/doc/source/themes/nature_with_gtoc/static/nature.css_t
@@ -299,18 +299,35 @@ td.field-body blockquote {
padding-left: 30px;
}
-.rendered_html table {
+// Adapted from the new Jupyter notebook style
+// https://github.com/jupyter/notebook/blob/c8841b68c4c0739bbee1291e0214771f24194079/notebook/static/notebook/less/renderedhtml.less#L59
+table {
margin-left: auto;
margin-right: auto;
- border-right: 1px solid #cbcbcb;
- border-bottom: 1px solid #cbcbcb;
-}
-
-.rendered_html td, th {
- border-left: 1px solid #cbcbcb;
- border-top: 1px solid #cbcbcb;
- margin: 0;
- padding: 0.5em .75em;
+ border: none;
+ border-collapse: collapse;
+ border-spacing: 0;
+ color: @rendered_html_border_color;
+ table-layout: fixed;
+}
+thead {
+ border-bottom: 1px solid @rendered_html_border_color;
+ vertical-align: bottom;
+}
+tr, th, td {
+ text-align: right;
+ vertical-align: middle;
+ padding: 0.5em 0.5em;
+ line-height: normal;
+ white-space: normal;
+ max-width: none;
+ border: none;
+}
+th {
+ font-weight: bold;
+}
+tbody tr:nth-child(odd) {
+ background: #f5f5f5;
}
/**
| Update header levels for nbsphinx
Link to nb, nicer default table
Closes #15539 | https://api.github.com/repos/pandas-dev/pandas/pulls/15581 | 2017-03-05T18:28:19Z | 2017-04-08T16:11:10Z | 2017-04-08T16:11:10Z | 2017-05-29T20:43:53Z |
Comment typo correction | diff --git a/pandas/types/dtypes.py b/pandas/types/dtypes.py
index 5b6d7905d4095..43135ba94ab46 100644
--- a/pandas/types/dtypes.py
+++ b/pandas/types/dtypes.py
@@ -73,7 +73,7 @@ def __ne__(self, other):
@classmethod
def is_dtype(cls, dtype):
- """ Return a boolean if we if the passed type is an actual dtype that
+ """ Return a boolean if the passed type is an actual dtype that
we can match (via string or type)
"""
if hasattr(dtype, 'dtype'):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15577 | 2017-03-05T14:45:38Z | 2017-03-05T16:21:15Z | 2017-03-05T16:21:15Z | 2017-03-05T16:21:17Z |
Raising for invalid dtype issue #15520 | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 298fa75779420..ae5b1180c86f4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -172,6 +172,12 @@ def _validate_dtype(self, dtype):
raise NotImplementedError("compound dtypes are not implemented"
"in the {0} constructor"
.format(self.__class__.__name__))
+
+ # check if coerced dtype is of type object
+ if dtype.kind == 'O':
+ raise TypeError("argument 'dtype' must be a valid "
+ "pandas/numpy dtype")
+
return dtype
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
| closes #15520
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes git diff upstream/master | flake8 --diff
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/15576 | 2017-03-05T14:06:12Z | 2017-03-10T12:08:16Z | null | 2017-03-10T12:08:16Z |
Added exception for invalid dtypes per issue #15520 | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 626a4a81193cc..6465264df02e3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -156,7 +156,11 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
if data is None:
data = {}
if dtype is not None:
- dtype = self._validate_dtype(dtype)
+ if dtype is numpy.dtype or dtype is pandas.dtype:
+ dtype = self._validate_dtype(dtype)
+ else:
+ raise TypeError("Argument 'dtype' must be
+ numpy.dtype or pandas.dtype")
if isinstance(data, MultiIndex):
raise NotImplementedError("initializing a Series from a "
| close #15520
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15574 | 2017-03-05T04:15:30Z | 2017-03-10T12:08:46Z | null | 2017-03-10T12:08:46Z |
ENH: Allow parameters method and min_periods in DataFrame.corrwith() | diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index 57480a244f308..56bc9e0ee9613 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -157,6 +157,7 @@ objects.
df2 = pd.DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
df1.corrwith(df2)
df2.corrwith(df1, axis=1)
+ df2.corrwith(df1, axis=1, method='kendall')
.. _computation.ranking:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index f51ff4cd0c908..2299daa76ba7d 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -227,6 +227,7 @@ Other enhancements
- ``pd.TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`)
- ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs <categorical.union>` for more information.
- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
+- ``pd.DataFrame.corrwith()`` now accepts ``method`` and ``min_periods`` as optional arguments, as in pd.DataFrame.corr() and pd.Series.corr() (:issue:`9490`)
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b3e43edc3eb55..0c831ab83d703 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4819,7 +4819,8 @@ def cov(self, min_periods=None):
return self._constructor(baseCov, index=idx, columns=cols)
- def corrwith(self, other, axis=0, drop=False):
+ def corrwith(self, other, axis=0, drop=False, method='pearson',
+ min_periods=1):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
@@ -4831,6 +4832,17 @@ def corrwith(self, other, axis=0, drop=False):
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
+ method : {'pearson', 'kendall', 'spearman'}
+ * pearson : standard correlation coefficient
+ * kendall : Kendall Tau correlation coefficient
+ * spearman : Spearman rank correlation
+
+ .. versionadded:: 0.20.0
+
+ min_periods : int, optional
+ Minimum number of observations needed to have a valid result
+
+ .. versionadded:: 0.20.0
Returns
-------
@@ -4838,29 +4850,23 @@ def corrwith(self, other, axis=0, drop=False):
"""
axis = self._get_axis_number(axis)
if isinstance(other, Series):
- return self.apply(other.corr, axis=axis)
+ return self.apply(other.corr, axis=axis,
+ method=method, min_periods=min_periods)
this = self._get_numeric_data()
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
- # mask missing values
- left = left + right * 0
- right = right + left * 0
-
if axis == 1:
left = left.T
right = right.T
- # demeaned data
- ldem = left - left.mean()
- rdem = right - right.mean()
-
- num = (ldem * rdem).sum()
- dom = (left.count() - 1) * left.std() * right.std()
-
- correl = num / dom
+ correl = Series({col: nanops.nancorr(left[col].values,
+ right[col].values,
+ method=method,
+ min_periods=min_periods)
+ for col in left.columns})
if not drop:
raxis = 1 if axis == 0 else 0
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 111195363beb2..f85b344aef17a 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -181,28 +181,33 @@ def test_corrwith(self):
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
- colcorr = a.corrwith(b, axis=0)
- tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
-
- rowcorr = a.corrwith(b, axis=1)
- tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
-
- dropped = a.corrwith(b, axis=0, drop=True)
- tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
- self.assertNotIn('B', dropped)
-
- dropped = a.corrwith(b, axis=1, drop=True)
- self.assertNotIn(a.index[-1], dropped.index)
-
- # non time-series data
- index = ['a', 'b', 'c', 'd', 'e']
- columns = ['one', 'two', 'three', 'four']
- df1 = DataFrame(randn(5, 4), index=index, columns=columns)
- df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
- correls = df1.corrwith(df2, axis=1)
- for row in index[:4]:
- tm.assert_almost_equal(correls[row],
- df1.loc[row].corr(df2.loc[row]))
+ for meth in ['pearson', 'kendall', 'spearman']:
+ colcorr = a.corrwith(b, axis=0, method=meth)
+ tm.assert_almost_equal(colcorr['A'],
+ a['A'].corr(b['A'], method=meth))
+
+ rowcorr = a.corrwith(b, axis=1, method=meth)
+ tm.assert_series_equal(rowcorr,
+ a.T.corrwith(b.T, axis=0, method=meth))
+
+ dropped = a.corrwith(b, axis=0, drop=True, method=meth)
+ tm.assert_almost_equal(dropped['A'],
+ a['A'].corr(b['A'], method=meth))
+ self.assertNotIn('B', dropped)
+
+ dropped = a.corrwith(b, axis=1, drop=True, method=meth)
+ self.assertNotIn(a.index[-1], dropped.index)
+
+ # non time-series data
+ index = ['a', 'b', 'c', 'd', 'e']
+ columns = ['one', 'two', 'three', 'four']
+ df1 = DataFrame(randn(5, 4), index=index, columns=columns)
+ df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
+ correls = df1.corrwith(df2, axis=1, method=meth)
+ for row in index[:4]:
+ tm.assert_almost_equal(correls[row],
+ df1.loc[row].corr(df2.loc[row],
+ method=meth))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
@@ -212,19 +217,22 @@ def test_corrwith_with_objects(self):
df1['obj'] = 'foo'
df2['obj'] = 'bar'
- result = df1.corrwith(df2)
- expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
- tm.assert_series_equal(result, expected)
+ for meth in ['pearson', 'kendall', 'spearman']:
+ result = df1.corrwith(df2, method=meth)
+ expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], method=meth)
+ tm.assert_series_equal(result, expected)
- result = df1.corrwith(df2, axis=1)
- expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
- tm.assert_series_equal(result, expected)
+ result = df1.corrwith(df2, axis=1, method=meth)
+ expected = df1.loc[:, cols].corrwith(df2.loc[:, cols],
+ axis=1, method=meth)
+ tm.assert_series_equal(result, expected)
def test_corrwith_series(self):
- result = self.tsframe.corrwith(self.tsframe['A'])
- expected = self.tsframe.apply(self.tsframe['A'].corr)
+ for meth in ['pearson', 'kendall', 'spearman']:
+ result = self.tsframe.corrwith(self.tsframe['A'], method=meth)
+ expected = self.tsframe.apply(self.tsframe['A'].corr, method=meth)
- tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
| Added new keyword parameters for `DataFrame.corrwith()`, which allows methods other than Pearson to be used. See #9490.
- [x] closes #9490
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15573 | 2017-03-04T23:18:17Z | 2017-08-17T10:29:06Z | null | 2017-08-17T10:29:06Z |
DOC: reset table_schema option after example | diff --git a/doc/source/options.rst b/doc/source/options.rst
index 1a0e5cf6b7235..1b219f640cc87 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -533,3 +533,9 @@ by default. False by default, this can be enabled globally with the
pd.set_option('display.html.table_schema', True)
Only ``'display.max_rows'`` are serialized and published.
+
+
+.. ipython:: python
+ :suppress:
+
+ pd.reset_option('display.html.table_schema')
\ No newline at end of file
| @jreback I think this should fix the errors you noticed here https://github.com/pandas-dev/pandas/issues/15559#issuecomment-284166380 (let's see if travis agrees with me)
The table schema repr was activated due to the example (the ipython environment lives on between different documents; it would actually be a good idea to completely reset them for each rst file)
cc @TomAugspurger in any case an example that it's not yet ready to be default on True :-) | https://api.github.com/repos/pandas-dev/pandas/pulls/15572 | 2017-03-04T22:10:55Z | 2017-03-05T10:28:57Z | 2017-03-05T10:28:57Z | 2017-03-05T10:28:58Z |
DOC: fix build_table_schema docs | diff --git a/doc/source/io.rst b/doc/source/io.rst
index c34cc1ec17512..c7a68a0fe9fbb 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2090,7 +2090,7 @@ A few notes on the generated table schema:
- All dates are converted to UTC when serializing. Even timezone naïve values,
which are treated as UTC with an offset of 0.
- .. ipython:: python:
+ .. ipython:: python
from pandas.io.json import build_table_schema
s = pd.Series(pd.date_range('2016', periods=4))
| Small follow-up on #14904 | https://api.github.com/repos/pandas-dev/pandas/pulls/15571 | 2017-03-04T21:56:54Z | 2017-03-04T21:58:33Z | 2017-03-04T21:58:33Z | 2017-03-04T21:58:33Z |
BUG: DataFrame.isin empty datetimelike | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 1ba327a4ea50c..e18bf8ba9912d 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -676,7 +676,7 @@ Bug Fixes
- Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`)
- Bug in ``Series.ffill()`` with mixed dtypes containing tz-aware datetimes. (:issue:`14956`)
-
+- Bug in ``DataFrame.isin`` comparing datetimelike to empty frame (:issue:`15473`)
- Bug in ``Series.where()`` and ``DataFrame.where()`` where array-like conditionals were being rejected (:issue:`15414`)
- Bug in ``Series`` construction with a datetimetz (:issue:`14928`)
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 697a99f63f62f..6cc43cd9228f6 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -1249,7 +1249,7 @@ def na_op(x, y):
result = op(x, y)
except TypeError:
xrav = x.ravel()
- result = np.empty(x.size, dtype=x.dtype)
+ result = np.empty(x.size, dtype=bool)
if isinstance(y, (np.ndarray, ABCSeries)):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 111195363beb2..4758ee1323ca0 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1502,6 +1502,27 @@ def test_isin_multiIndex(self):
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
+ def test_isin_empty_datetimelike(self):
+ # GH 15473
+ df1_ts = DataFrame({'date':
+ pd.to_datetime(['2014-01-01', '2014-01-02'])})
+ df1_td = DataFrame({'date':
+ [pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
+ df2 = DataFrame({'date': []})
+ df3 = DataFrame()
+
+ expected = DataFrame({'date': [False, False]})
+
+ result = df1_ts.isin(df2)
+ tm.assert_frame_equal(result, expected)
+ result = df1_ts.isin(df3)
+ tm.assert_frame_equal(result, expected)
+
+ result = df1_td.isin(df2)
+ tm.assert_frame_equal(result, expected)
+ result = df1_td.isin(df3)
+ tm.assert_frame_equal(result, expected)
+
# ----------------------------------------------------------------------
# Row deduplication
| - [x] closes #15473
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15570 | 2017-03-04T21:07:05Z | 2017-03-05T01:45:21Z | 2017-03-05T01:45:21Z | 2017-03-05T01:45:25Z |
BUG: Groupby.cummin/max DataError on datetimes (#15561) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 1ba327a4ea50c..3ddab0717396e 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -635,7 +635,7 @@ Performance Improvements
- Increased performance of ``pd.factorize()`` by releasing the GIL with ``object`` dtype when inferred as strings (:issue:`14859`)
- Improved performance of timeseries plotting with an irregular DatetimeIndex
(or with ``compat_x=True``) (:issue:`15073`).
-- Improved performance of ``groupby().cummin()`` and ``groupby().cummax()`` (:issue:`15048`, :issue:`15109`)
+- Improved performance of ``groupby().cummin()`` and ``groupby().cummax()`` (:issue:`15048`, :issue:`15109`, :issue:`15561`)
- Improved performance and reduced memory when indexing with a ``MultiIndex`` (:issue:`15245`)
- When reading buffer object in ``read_sas()`` method without specified format, filepath string is inferred rather than buffer object. (:issue:`14947`)
- Improved performance of `rank()` for categorical data (:issue:`15498`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 578c334781d15..43c57a88b4d19 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1442,7 +1442,7 @@ def cummin(self, axis=0, **kwargs):
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
- return self._cython_transform('cummin', **kwargs)
+ return self._cython_transform('cummin', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
@@ -1451,7 +1451,7 @@ def cummax(self, axis=0, **kwargs):
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
- return self._cython_transform('cummax', **kwargs)
+ return self._cython_transform('cummax', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 74e8c6c45946f..e846963732883 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1954,7 +1954,8 @@ def test_arg_passthru(self):
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
- tm.assert_index_equal(result.columns, expected_columns_numeric)
+ # GH 15561: numeric_only=False set by default like min/max
+ tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
@@ -4295,6 +4296,13 @@ def test_cummin_cummax(self):
result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
+ # GH 15561
+ df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
+ expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
+ for method in ['cummax', 'cummin']:
+ result = getattr(df.groupby('a')['b'], method)()
+ tm.assert_series_equal(expected, result)
+
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
| - [x] closes #15561
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
`numeric_only=False` set by default for `groupby.cummin/cummax` like `groupby.max/min` allows datetimes to work. | https://api.github.com/repos/pandas-dev/pandas/pulls/15569 | 2017-03-04T20:20:15Z | 2017-03-05T02:02:41Z | 2017-03-05T02:02:41Z | 2017-12-20T02:01:10Z |
BUG:Floating point accuracy with DatetimeIndex.round (#14440) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 1ba327a4ea50c..8dc337aa24fb7 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -651,6 +651,7 @@ Bug Fixes
- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
- Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`)
- Bug in ``TimedeltaIndex`` raising a ``ValueError`` when boolean indexing with ``loc`` (:issue:`14946`)
+- Bug in ``DatetimeIndex.round()`` and ``Timestamp.round()`` floating point accuracy when rounding by milliseconds (:issue: `14440`)
- Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`)
- Bug in ``DataFrame(..).apply(to_numeric)`` when values are of type decimal.Decimal. (:issue:`14827`)
- Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`)
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 8eb9128d8d1c8..3a6402ae83ae2 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -175,6 +175,17 @@ def test_round(self):
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
+ # GH 14440
+ index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
+ result = index.round('ms')
+ expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
+ tm.assert_index_equal(result, expected)
+
+ index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
+ result = index.round('ms')
+ expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
+ tm.assert_index_equal(result, expected)
+
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index 2abc83ca6109c..ae278ebfa2533 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -732,6 +732,15 @@ def test_round(self):
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: dti.round(freq))
+ # GH 14440
+ result = pd.Timestamp('2016-10-17 12:00:00.0015').round('ms')
+ expected = pd.Timestamp('2016-10-17 12:00:00.002000')
+ self.assertEqual(result, expected)
+
+ result = pd.Timestamp('2016-10-17 12:00:00.00149').round('ms')
+ expected = pd.Timestamp('2016-10-17 12:00:00.001000')
+ self.assertEqual(result, expected)
+
def test_class_ops_pytz(self):
tm._skip_if_no_pytz()
from pytz import timezone
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index ee9234d6c8237..5891481677ed2 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -83,7 +83,7 @@ def _round(self, freq, rounder):
# round the local times
values = _ensure_datetimelike_to_i8(self)
- result = (unit * rounder(values / float(unit))).astype('i8')
+ result = (unit * rounder(values / float(unit)).astype('i8'))
result = self._maybe_mask_results(result, fill_value=tslib.NaT)
attribs = self._get_attributes_dict()
if 'freq' in attribs:
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index fc6e689a35d81..b96e9434e617a 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -421,7 +421,8 @@ class Timestamp(_Timestamp):
value = self.tz_localize(None).value
else:
value = self.value
- result = Timestamp(unit * rounder(value / float(unit)), unit='ns')
+ result = (unit * rounder(value / float(unit)).astype('i8'))
+ result = Timestamp(result, unit='ns')
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
| - [x] closes #14440
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
Employs @eoincondron's fix for float point inaccuracies when rounding by milliseconds for `DatetimeIndex.round` and `Timestamp.round` | https://api.github.com/repos/pandas-dev/pandas/pulls/15568 | 2017-03-04T18:26:52Z | 2017-03-05T16:26:59Z | null | 2017-12-20T02:01:17Z |
DEPR/CLN: remove SparseTimeSeries class (follow-up GH15098) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 782ae6082c1cf..381c90f6b84a5 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -585,6 +585,8 @@ Removal of prior version deprecations/changes
Similar functionality can be found in the `Google2Pandas <https://github.com/panalysis/Google2Pandas>`__ package.
- ``pd.to_datetime`` and ``pd.to_timedelta`` have dropped the ``coerce`` parameter in favor of ``errors`` (:issue:`13602`)
- ``pandas.stats.fama_macbeth``, ``pandas.stats.ols``, ``pandas.stats.plm`` and ``pandas.stats.var``, as well as the top-level ``pandas.fama_macbeth`` and ``pandas.ols`` routines are removed. Similar functionaility can be found in the `statsmodels <shttp://www.statsmodels.org/dev/>`__ package. (:issue:`11898`)
+- The ``TimeSeries`` and ``SparseTimeSeries`` classes, aliases of ``Series``
+ and ``SparseSeries``, are removed (:issue:`10890`, :issue:`15098`).
- ``Series.is_time_series`` is dropped in favor of ``Series.index.is_all_dates`` (:issue:``)
- The deprecated ``irow``, ``icol``, ``iget`` and ``iget_value`` methods are removed
in favor of ``iloc`` and ``iat`` as explained :ref:`here <whatsnew_0170.deprecations>` (:issue:`10711`).
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index b8ccd13c153d4..25a170c3eb121 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -61,7 +61,8 @@ def load_reduce(self):
('pandas.core.base', 'FrozenList'): ('pandas.indexes.frozen', 'FrozenList'),
# 10890
- ('pandas.core.series', 'TimeSeries'): ('pandas.core.series', 'Series')
+ ('pandas.core.series', 'TimeSeries'): ('pandas.core.series', 'Series'),
+ ('pandas.sparse.series', 'SparseTimeSeries'): ('pandas.sparse.series', 'SparseSeries')
}
diff --git a/pandas/sparse/api.py b/pandas/sparse/api.py
index 55841fbeffa2d..90be0a216535f 100644
--- a/pandas/sparse/api.py
+++ b/pandas/sparse/api.py
@@ -2,5 +2,5 @@
# flake8: noqa
from pandas.sparse.array import SparseArray
from pandas.sparse.list import SparseList
-from pandas.sparse.series import SparseSeries, SparseTimeSeries
+from pandas.sparse.series import SparseSeries
from pandas.sparse.frame import SparseDataFrame
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index dfdbb3c89814a..a3b701169ce91 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -844,14 +844,3 @@ def from_coo(cls, A, dense_index=False):
comp_method=_arith_method,
bool_method=None, use_numexpr=False,
force=True)
-
-
-# backwards compatiblity
-class SparseTimeSeries(SparseSeries):
-
- def __init__(self, *args, **kwargs):
- # deprecation TimeSeries, #10890
- warnings.warn("SparseTimeSeries is deprecated. Please use "
- "SparseSeries", FutureWarning, stacklevel=2)
-
- super(SparseTimeSeries, self).__init__(*args, **kwargs)
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index f2f7a9c778e66..2f8ebc4cc1df4 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -57,8 +57,7 @@ class TestPDApi(Base, tm.TestCase):
'TimedeltaIndex', 'Timestamp']
# these are already deprecated; awaiting removal
- deprecated_classes = ['WidePanel',
- 'SparseTimeSeries', 'Panel4D',
+ deprecated_classes = ['WidePanel', 'Panel4D',
'SparseList', 'Expr', 'Term']
# these should be deprecated in the future
diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py
index d4543b97af4dd..de6636162ff05 100644
--- a/pandas/tests/sparse/test_series.py
+++ b/pandas/tests/sparse/test_series.py
@@ -112,12 +112,6 @@ def test_iteration_and_str(self):
[x for x in self.bseries]
str(self.bseries)
- def test_TimeSeries_deprecation(self):
-
- # deprecation TimeSeries, #10890
- with tm.assert_produces_warning(FutureWarning):
- pd.SparseTimeSeries(1, index=pd.date_range('20130101', periods=3))
-
def test_construct_DataFrame_with_sp_series(self):
# it works!
df = DataFrame({'col': self.bseries})
| #15098 removed the TimeSeries alias, but we also deprecated SparseTimeSeries (#10890), so this makes the removal complete. | https://api.github.com/repos/pandas-dev/pandas/pulls/15567 | 2017-03-04T11:33:23Z | 2017-03-04T14:14:37Z | 2017-03-04T14:14:37Z | 2017-03-04T14:14:37Z |
Revert FrozenList changes (doc build slowdown, #15559) | diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index 2d406de7c0c9b..8484ccd69a983 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -126,16 +126,6 @@ We could naturally group by either the ``A`` or ``B`` columns or both:
grouped = df.groupby('A')
grouped = df.groupby(['A', 'B'])
-.. versionadded:: 0.20
-
-If we also have a MultiIndex on columns ``A`` and ``B``, we can group by all
-but the specified columns.
-
-.. ipython:: python
-
- df2 = df.set_index(['A', 'B'])
- grouped = df2.groupby(level=df2.index.names.difference(['B'])
-
These will split the DataFrame on its index (rows). We could also split by the
columns:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index df259f4a42b86..c172b28a79ec4 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -28,7 +28,6 @@ New features
- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`.
- ``.str.replace`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`)
-- ``FrozenList`` has gained the ``.difference()`` setop method (:issue:`15475`)
@@ -536,7 +535,6 @@ Deprecations
- ``Series.sortlevel`` and ``DataFrame.sortlevel`` have been deprecated in favor of ``Series.sort_index`` and ``DataFrame.sort_index`` (:issue:`15099`)
- importing ``concat`` from ``pandas.tools.merge`` has been deprecated in favor of imports from the ``pandas`` namespace. This should only affect explict imports (:issue:`15358`)
- ``Series/DataFrame/Panel.consolidate()`` been deprecated as a public method. (:issue:`15483`)
-- ``FrozenList`` addition (new object and inplace) have been deprecated in favor of the ``.union()`` method. (:issue: `15475`)
- The following top-level pandas functions have been deprecated and will be removed in a future version (:issue:`13790`)
* ``pd.pnow()``, replaced by ``Period.now()``
* ``pd.Term``, is removed, as it is not applicable to user code. Instead use in-line string expressions in the where clause when searching in HDFStore
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index c5ea513223dce..4a6c6cf291316 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -940,9 +940,9 @@ def construct_index_parts(idx, major=True):
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
- levels = list(major_levels) + list(minor_levels)
- labels = list(major_labels) + list(minor_labels)
- names = list(major_names) + list(minor_names)
+ levels = major_levels + minor_levels
+ labels = major_labels + minor_labels
+ names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index faad6c500a21f..87cb088c2e91e 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -216,8 +216,8 @@ def get_new_columns(self):
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
- new_levels = self.value_columns.levels.union((self.removed_level,))
- new_names = self.value_columns.names.union((self.removed_name,))
+ new_levels = self.value_columns.levels + (self.removed_level,)
+ new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
@@ -806,7 +806,7 @@ def melt(frame, id_vars=None, value_vars=None, var_name=None,
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
- mcolumns = list(id_vars) + list(var_name) + list([value_name])
+ mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 51016926d6909..ac8d1db6a0bf3 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -787,7 +787,7 @@ def str_extractall(arr, pat, flags=0):
if 0 < len(index_list):
from pandas import MultiIndex
index = MultiIndex.from_tuples(
- index_list, names=arr.index.names.union(["match"]))
+ index_list, names=arr.index.names + ["match"])
else:
index = None
result = arr._constructor_expanddim(match_list, index=index,
diff --git a/pandas/indexes/frozen.py b/pandas/indexes/frozen.py
index 47e2557333ec7..e043ba64bbad7 100644
--- a/pandas/indexes/frozen.py
+++ b/pandas/indexes/frozen.py
@@ -13,8 +13,6 @@
from pandas.types.cast import _coerce_indexer_dtype
from pandas.formats.printing import pprint_thing
-import warnings
-
class FrozenList(PandasObject, list):
@@ -27,14 +25,11 @@ class FrozenList(PandasObject, list):
# typechecks
def __add__(self, other):
- warnings.warn("__add__ is deprecated, use union(...)", FutureWarning)
- return self.union(other)
-
- def __iadd__(self, other):
- warnings.warn("__iadd__ is deprecated, use union(...)", FutureWarning)
if isinstance(other, tuple):
other = list(other)
- return super(FrozenList, self).__iadd__(other)
+ return self.__class__(super(FrozenList, self).__add__(other))
+
+ __iadd__ = __add__
# Python 2 compat
def __getslice__(self, i, j):
@@ -85,19 +80,6 @@ def __repr__(self):
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
- def union(self, other):
- """Returns a FrozenList with other concatenated to the end of self"""
- if isinstance(other, tuple):
- other = list(other)
- return self.__class__(super(FrozenList, self).__add__(other))
-
- def difference(self, other):
- """Returns a FrozenList with the same elements as self, but with elements
- that are also in other removed."""
- other = set(other)
- temp = [x for x in self if x not in other]
- return self.__class__(temp)
-
class FrozenNDArray(PandasObject, np.ndarray):
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index ff01df2693c7c..801d0da070112 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -28,7 +28,7 @@ def check_value_counts(df, keys, bins):
gr = df.groupby(keys, sort=isort)
right = gr['3rd'].apply(Series.value_counts, **kwargs)
- right.index.names = right.index.names[:-1].union(['3rd'])
+ right.index.names = right.index.names[:-1] + ['3rd']
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
diff --git a/pandas/tests/indexes/test_frozen.py b/pandas/tests/indexes/test_frozen.py
index a5fbf066adc83..a82409fbf9513 100644
--- a/pandas/tests/indexes/test_frozen.py
+++ b/pandas/tests/indexes/test_frozen.py
@@ -15,35 +15,20 @@ def setUp(self):
self.klass = FrozenList
def test_add(self):
- q = FrozenList([1])
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- q = q + [2, 3]
- expected = FrozenList([1, 2, 3])
- self.check_result(q, expected)
-
- def test_iadd(self):
- q = FrozenList([1])
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- q += [2, 3]
- expected = FrozenList([1, 2, 3])
- self.check_result(q, expected)
-
- def test_union(self):
- result = self.container.union((1, 2, 3))
+ result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
- def test_difference(self):
- result = self.container.difference([2])
- expected = FrozenList([1, 3, 4, 5])
+ result = (1, 2, 3) + self.container
+ expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
- def test_difference_dupe(self):
- result = FrozenList([1, 2, 3, 2]).difference([2])
- expected = FrozenList([1, 3])
- self.check_result(result, expected)
+ def test_inplace(self):
+ q = r = self.container
+ q += [5]
+ self.check_result(q, self.lst + [5])
+ # other shouldn't be mutated
+ self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):
diff --git a/pandas/tools/concat.py b/pandas/tools/concat.py
index ae9d7af9d98ff..6405106118472 100644
--- a/pandas/tools/concat.py
+++ b/pandas/tools/concat.py
@@ -574,7 +574,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
" not have the same number of levels")
# also copies
- names = list(names) + list(_get_consensus_names(indexes))
+ names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
diff --git a/test_fast.sh b/test_fast.sh
index f22ab73277e8b..30ac7f84cbe8b 100755
--- a/test_fast.sh
+++ b/test_fast.sh
@@ -5,4 +5,4 @@
# https://github.com/pytest-dev/pytest/issues/1075
export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))')
-pytest pandas --skip-slow --skip-network -m "not single" -n 4 $@
+pytest pandas --skip-slow --skip-network -m "not single" -n 4
| See #15559. This temporarily reverts #15506, to see if this fixes the doc build slowdown. | https://api.github.com/repos/pandas-dev/pandas/pulls/15566 | 2017-03-04T11:33:00Z | 2017-03-04T15:38:43Z | null | 2017-03-04T15:38:43Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.